summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrice Rising <60147418+bricerisingalgorand@users.noreply.github.com>2022-01-13 14:14:45 -0500
committerGitHub <noreply@github.com>2022-01-13 14:14:45 -0500
commit29269c3504976ac57901eec5d402c4c37cbef607 (patch)
treed66754194a8735621ae960c76a89757d63d6f109
parent62469a2a60cdde3c3fd42f2b49d26b931d5d6dee (diff)
Feature/contract to contract update (#3412)
* testing: Fix unit test TestAsyncTelemetryHook_QueueDepth (#2685) Fix the unit test TestAsyncTelemetryHook_QueueDepth * Deprecate `FastPartitionRecovery` from `ConsensusParams` (#3386) ## Summary This PR removes `FastPartitionRecovery` option from consensus parameters. The code now acts as if this value is set to true. Closes https://github.com/algorand/go-algorand-internal/issues/1830. ## Test Plan None. * Remaking a PR for CI (#3398) * Allow setting manager, reserve, freeze, and clawback at goal asset create * Add e2e tests * Add more tests for goal asset create flags Co-authored-by: Fionna <fionnacst@gmail.com> * [Other] CircleCI pipeline change for binary uploads (#3381) For nightly builds ("rel/nightly"), we want to have deadlock enabled. For rel/beta and rel/stable, we want to make sure we can build and upload a binary with deadlock disabled so that it can be used for release testing and validation purposes. * signer.KeyDilution need not depend on config package (#3265) crypto package need not depend on config. There is an unnecessary dependency on config. signer.KeyDilution takes the `config.ConsensusParams` as argument to pick the DefaultKeyDilution from it. This introduces dependency from the crypto package to config package. Instead, only the DefaultKeyDilution value can be passed to signer.KeyDilution. * algodump is a tcpdump-like tool for algod's network protocol (#3166) This PR introduces algodump, a tcpdump-like tool for monitoring algod network messages. * Removing C/crypto dependencies from `data/abi` package (#3375) * Feature Networks pipeline related changes (#3393) Added support for not having certain files in signing script Co-authored-by: Tsachi Herman <tsachi.herman@algorand.com> Co-authored-by: Tolik Zinovyev <tolik@algorand.com> Co-authored-by: Jack <87339414+algojack@users.noreply.github.com> Co-authored-by: Fionna <fionnacst@gmail.com> Co-authored-by: algobarb <78746954+algobarb@users.noreply.github.com> Co-authored-by: Shant Karakashian <55754073+algonautshant@users.noreply.github.com> Co-authored-by: Nickolai Zeldovich <nickolai@csail.mit.edu> Co-authored-by: Hang Su <87964331+ahangsu@users.noreply.github.com>
-rw-r--r--.circleci/config.yml8
-rw-r--r--agreement/cryptoVerifier_test.go2
-rw-r--r--agreement/demux.go18
-rw-r--r--agreement/proposal.go3
-rw-r--r--agreement/vote.go28
-rw-r--r--agreement/voteAggregator.go3
-rw-r--r--cmd/goal/asset.go71
-rw-r--r--config/consensus.go4
-rw-r--r--crypto/onetimesig.go8
-rw-r--r--data/abi/abi_encode.go16
-rw-r--r--data/abi/abi_encode_test.go159
-rw-r--r--data/abi/abi_json.go57
-rw-r--r--data/abi/abi_json_test.go27
-rw-r--r--data/abi/abi_type.go16
-rw-r--r--data/basics/address.go17
-rw-r--r--logging/telemetryhook_test.go7
-rw-r--r--node/netprio.go2
-rwxr-xr-xscripts/build_package.sh6
-rwxr-xr-xscripts/release/mule/sign/sign.sh7
-rwxr-xr-xtest/scripts/e2e_subs/asset-misc.sh102
-rw-r--r--test/testdata/consensus/catchpointtestingprotocol.json1
-rw-r--r--tools/debug/algodump/README.md33
-rw-r--r--tools/debug/algodump/main.go188
23 files changed, 621 insertions, 162 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 9c58e88c3..2f2efbb77 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -166,7 +166,6 @@ workflows:
context:
- slack-secrets
- aws-secrets
-
#- windows_x64_build
commands:
@@ -478,11 +477,14 @@ commands:
- attach_workspace:
at: << parameters.build_dir >>
- run:
- name: Upload binaries << parameters.platform >>
+ name: Upload Binaries << parameters.platform >>
command: |
+ if [ "${CIRCLE_BRANCH}" = "rel/nightly" ]
+ then
+ export NO_BUILD="true"
+ fi
export PATH=$(echo "$PATH" | sed -e "s|:${HOME}/\.go_workspace/bin||g" | sed -e 's|:/usr/local/go/bin||g')
export GOPATH="<< parameters.build_dir >>/go"
- export NO_BUILD=true
export TRAVIS_BRANCH=${CIRCLE_BRANCH}
scripts/travis/deploy_packages.sh
- when:
diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go
index 939f8aa71..a42ffd9b0 100644
--- a/agreement/cryptoVerifier_test.go
+++ b/agreement/cryptoVerifier_test.go
@@ -72,7 +72,7 @@ func makeUnauthenticatedVote(l Ledger, sender basics.Address, selection *crypto.
m, _ := membership(l, rv.Sender, rv.Round, rv.Period, rv.Step)
cred := committee.MakeCredential(&selection.SK, m.Selector)
- ephID := basics.OneTimeIDForRound(rv.Round, voting.KeyDilution(config.Consensus[protocol.ConsensusCurrentVersion]))
+ ephID := basics.OneTimeIDForRound(rv.Round, voting.KeyDilution(config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution))
sig := voting.Sign(ephID, rv)
return unauthenticatedVote{
diff --git a/agreement/demux.go b/agreement/demux.go
index 258c54698..fe48f604b 100644
--- a/agreement/demux.go
+++ b/agreement/demux.go
@@ -21,7 +21,6 @@ import (
"fmt"
"time"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/logging/logspec"
"github.com/algorand/go-algorand/protocol"
@@ -236,22 +235,7 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat
ledgerNextRoundCh := s.Ledger.Wait(nextRound)
deadlineCh := s.Clock.TimeoutAt(deadline)
- var fastDeadlineCh <-chan time.Time
-
- fastPartitionRecoveryEnabled := false
- if proto, err := d.ledger.ConsensusVersion(ParamsRound(currentRound)); err != nil {
- logging.Base().Warnf("demux: could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err)
- // this might happen during catchup, since the Ledger.Wait fires as soon as a new block is received by the ledger, which could be
- // far before it's being committed. In these cases, it should be safe to default to the current consensus version. On subsequent
- // iterations, it will get "corrected" since the ledger would finish flushing the blocks to disk.
- fastPartitionRecoveryEnabled = config.Consensus[protocol.ConsensusCurrentVersion].FastPartitionRecovery
- } else {
- fastPartitionRecoveryEnabled = config.Consensus[proto].FastPartitionRecovery
- }
-
- if fastPartitionRecoveryEnabled {
- fastDeadlineCh = s.Clock.TimeoutAt(fastDeadline)
- }
+ fastDeadlineCh := s.Clock.TimeoutAt(fastDeadline)
d.UpdateEventsQueue(eventQueueDemux, 0)
d.monitor.dec(demuxCoserviceType)
diff --git a/agreement/proposal.go b/agreement/proposal.go
index d69a2d182..30eb2ef73 100644
--- a/agreement/proposal.go
+++ b/agreement/proposal.go
@@ -60,6 +60,9 @@ type unauthenticatedProposal struct {
OriginalProposer basics.Address `codec:"oprop"`
}
+// TransmittedPayload exported for dumping textual versions of messages
+type TransmittedPayload = transmittedPayload
+
// ToBeHashed implements the Hashable interface.
func (p unauthenticatedProposal) ToBeHashed() (protocol.HashID, []byte) {
return protocol.Payload, protocol.Encode(&p)
diff --git a/agreement/vote.go b/agreement/vote.go
index 72a805602..e39edd7b7 100644
--- a/agreement/vote.go
+++ b/agreement/vote.go
@@ -83,6 +83,9 @@ type (
Proposals [2]proposalValue `codec:"props"`
Sigs [2]crypto.OneTimeSignature `codec:"sigs"`
}
+
+ // UnauthenticatedVote exported for dumping textual versions of messages
+ UnauthenticatedVote = unauthenticatedVote
)
// verify verifies that a vote that was received from the network is valid.
@@ -152,27 +155,18 @@ func makeVote(rv rawVote, voting crypto.OneTimeSigner, selection *crypto.VRFSecr
return unauthenticatedVote{}, fmt.Errorf("makeVote: could not get consensus params for round %d: %v", ParamsRound(rv.Round), err)
}
- if proto.FastPartitionRecovery {
- switch rv.Step {
- case propose, soft, cert, late, redo:
- if rv.Proposal == bottom {
- logging.Base().Panicf("makeVote: votes from step %d cannot validate bottom", rv.Step)
- }
- case down:
- if rv.Proposal != bottom {
- logging.Base().Panicf("makeVote: votes from step %d must validate bottom", rv.Step)
- }
+ switch rv.Step {
+ case propose, soft, cert, late, redo:
+ if rv.Proposal == bottom {
+ logging.Base().Panicf("makeVote: votes from step %d cannot validate bottom", rv.Step)
}
- } else {
- switch rv.Step {
- case propose, soft, cert:
- if rv.Proposal == bottom {
- logging.Base().Panicf("makeVote: votes from step %d cannot validate bottom", rv.Step)
- }
+ case down:
+ if rv.Proposal != bottom {
+ logging.Base().Panicf("makeVote: votes from step %d must validate bottom", rv.Step)
}
}
- ephID := basics.OneTimeIDForRound(rv.Round, voting.KeyDilution(proto))
+ ephID := basics.OneTimeIDForRound(rv.Round, voting.KeyDilution(proto.DefaultKeyDilution))
sig := voting.Sign(ephID, rv)
if (sig == crypto.OneTimeSignature{}) {
return unauthenticatedVote{}, fmt.Errorf("makeVote: got back empty signature for vote")
diff --git a/agreement/voteAggregator.go b/agreement/voteAggregator.go
index b77cb437a..95b378c0d 100644
--- a/agreement/voteAggregator.go
+++ b/agreement/voteAggregator.go
@@ -19,7 +19,6 @@ package agreement
import (
"fmt"
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -227,7 +226,7 @@ func voteStepFresh(descr string, proto protocol.ConsensusVersion, mine, vote ste
// always propagate first recovery vote to ensure synchronous block of periods after partition
return nil
}
- if config.Consensus[proto].FastPartitionRecovery && vote >= late {
+ if vote >= late {
// always propagate fast partition recovery votes
return nil
}
diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go
index ad2891e82..b940a54e1 100644
--- a/cmd/goal/asset.go
+++ b/cmd/goal/asset.go
@@ -37,8 +37,13 @@ var (
assetURL string
assetName string
assetManager string
+ assetReserve string
assetClawback string
assetFreezer string
+ assetNoManager bool
+ assetNoReserve bool
+ assetNoFreezer bool
+ assetNoClawback bool
assetNewManager string
assetNewReserve string
@@ -64,6 +69,14 @@ func init() {
createAssetCmd.Flags().StringVar(&assetName, "name", "", "Name for the entire asset")
createAssetCmd.Flags().StringVar(&assetURL, "asseturl", "", "URL where user can access more information about the asset (max 32 bytes)")
createAssetCmd.Flags().StringVar(&assetMetadataHashBase64, "assetmetadatab64", "", "base-64 encoded 32-byte commitment to asset metadata")
+ createAssetCmd.Flags().StringVar(&assetManager, "manager", "", "Manager account that can issue transactions to re-configure or destroy the asset")
+ createAssetCmd.Flags().StringVar(&assetReserve, "reserve", "", "Reserve account that non-minted assets will reside in")
+ createAssetCmd.Flags().StringVar(&assetFreezer, "freezer", "", "Freezer account that can freeze or unfreeze the asset holdings for a specific account")
+ createAssetCmd.Flags().StringVar(&assetClawback, "clawback", "", "Clawback account that is allowed to transfer assets from and to any asset holder")
+ createAssetCmd.Flags().BoolVar(&assetNoManager, "no-manager", false, "Explicitly declare the lack of manager")
+ createAssetCmd.Flags().BoolVar(&assetNoReserve, "no-reserve", false, "Explicitly declare the lack of reserve")
+ createAssetCmd.Flags().BoolVar(&assetNoFreezer, "no-freezer", false, "Explicitly declare the lack of freezer")
+ createAssetCmd.Flags().BoolVar(&assetNoClawback, "no-clawback", false, "Explicitly declare the lack of clawback")
createAssetCmd.MarkFlagRequired("total")
createAssetCmd.MarkFlagRequired("creator")
@@ -185,10 +198,66 @@ var createAssetCmd = &cobra.Command{
Run: func(cmd *cobra.Command, _ []string) {
checkTxValidityPeriodCmdFlags(cmd)
+ if assetManager != "" && assetNoManager {
+ reportErrorf("The [--manager] flag and the [--no-manager] flag are mutually exclusive, do not provide both flags.")
+ }
+
+ if assetReserve != "" && assetNoReserve {
+ reportErrorf("The [--reserve] flag and the [--no-reserve] flag are mutually exclusive, do not provide both flags.")
+ }
+
+ if assetFreezer != "" && assetNoFreezer {
+ reportErrorf("The [--freezer] flag and the [--no-freezer] flag are mutually exclusive, do not provide both flags.")
+ }
+
+ if assetClawback != "" && assetNoClawback {
+ reportErrorf("The [--clawback] flag and the [--no-clawback] flag are mutually exclusive, do not provide both flags.")
+ }
+
dataDir := ensureSingleDataDir()
client := ensureFullClient(dataDir)
accountList := makeAccountsList(dataDir)
creator := accountList.getAddressByName(assetCreator)
+ manager := creator
+ reserve := creator
+ freezer := creator
+ clawback := creator
+
+ if cmd.Flags().Changed("manager") {
+ assetManager = accountList.getAddressByName(assetManager)
+ manager = assetManager
+ }
+
+ if assetNoManager {
+ manager = ""
+ }
+
+ if cmd.Flags().Changed("reserve") {
+ assetReserve = accountList.getAddressByName(assetReserve)
+ reserve = assetReserve
+ }
+
+ if assetNoReserve {
+ reserve = ""
+ }
+
+ if cmd.Flags().Changed("freezer") {
+ assetFreezer = accountList.getAddressByName(assetFreezer)
+ freezer = assetFreezer
+ }
+
+ if assetNoFreezer {
+ freezer = ""
+ }
+
+ if cmd.Flags().Changed("clawback") {
+ assetClawback = accountList.getAddressByName(assetClawback)
+ clawback = assetClawback
+ }
+
+ if assetNoClawback {
+ clawback = ""
+ }
var err error
var assetMetadataHash []byte
@@ -199,7 +268,7 @@ var createAssetCmd = &cobra.Command{
}
}
- tx, err := client.MakeUnsignedAssetCreateTx(assetTotal, assetFrozen, creator, creator, creator, creator, assetUnitName, assetName, assetURL, assetMetadataHash, assetDecimals)
+ tx, err := client.MakeUnsignedAssetCreateTx(assetTotal, assetFrozen, manager, reserve, freezer, clawback, assetUnitName, assetName, assetURL, assetMetadataHash, assetDecimals)
if err != nil {
reportErrorf("Cannot construct transaction: %s", err)
}
diff --git a/config/consensus.go b/config/consensus.go
index 75d12b7dc..2a29ed823 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -152,8 +152,7 @@ type ConsensusParams struct {
// critical path
AgreementFilterTimeoutPeriod0 time.Duration
- FastRecoveryLambda time.Duration // time between fast recovery attempts
- FastPartitionRecovery bool // set when fast partition recovery is enabled
+ FastRecoveryLambda time.Duration // time between fast recovery attempts
// how to commit to the payset: flat or merkle tree
PaysetCommit PaysetCommitType
@@ -711,7 +710,6 @@ func initConsensusProtocols() {
// v10 introduces fast partition recovery (and also raises NumProposers).
v10 := v9
- v10.FastPartitionRecovery = true
v10.NumProposers = 20
v10.LateCommitteeSize = 500
v10.LateCommitteeThreshold = 320
diff --git a/crypto/onetimesig.go b/crypto/onetimesig.go
index 6196a99ee..119119d37 100644
--- a/crypto/onetimesig.go
+++ b/crypto/onetimesig.go
@@ -20,11 +20,9 @@ import (
"encoding/binary"
"fmt"
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-deadlock"
)
// A OneTimeSignature is a cryptographic signature that is produced a limited
@@ -432,10 +430,10 @@ type OneTimeSigner struct {
}
// KeyDilution returns the appropriate key dilution value for a OneTimeSigner.
-func (ots OneTimeSigner) KeyDilution(params config.ConsensusParams) uint64 {
+func (ots OneTimeSigner) KeyDilution(defaultKeyDilution uint64) uint64 {
if ots.OptionalKeyDilution != 0 {
return ots.OptionalKeyDilution
}
- return params.DefaultKeyDilution
+ return defaultKeyDilution
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
index 1f14af71a..8e5a49086 100644
--- a/data/abi/abi_encode.go
+++ b/data/abi/abi_encode.go
@@ -170,14 +170,14 @@ func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
return nil, fmt.Errorf("passed in numeric value should be non negative")
}
- bytes := bigInt.Bytes()
- if len(bytes) > int(bitSize/8) {
- return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", len(bytes)*8, bitSize)
+ castedBytes := make([]byte, bitSize/8)
+
+ if bigInt.Cmp(new(big.Int).Lsh(big.NewInt(1), uint(bitSize))) >= 0 {
+ return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", bigInt.BitLen(), bitSize)
}
- zeroPadding := make([]byte, bitSize/8-uint16(len(bytes)))
- buffer := append(zeroPadding, bytes...)
- return buffer, nil
+ bigInt.FillBytes(castedBytes)
+ return castedBytes, nil
}
// inferToSlice infers an interface element to a slice of interface{}, returns error if it cannot infer successfully
@@ -201,7 +201,7 @@ func inferToSlice(value interface{}) ([]interface{}, error) {
// encodeTuple encodes slice-of-interface of golang values to bytes, following ABI encoding rules
func encodeTuple(value interface{}, childT []Type) ([]byte, error) {
- if len(childT) >= (1 << 16) {
+ if len(childT) >= abiEncodingLengthLimit {
return nil, fmt.Errorf("abi child type number exceeds uint16 maximum")
}
values, err := inferToSlice(value)
@@ -277,7 +277,7 @@ func encodeTuple(value interface{}, childT []Type) ([]byte, error) {
if isDynamicIndex[i] {
// calculate where the index of dynamic value encoding byte start
headValue := headLength + tailCurrLength
- if headValue >= (1 << 16) {
+ if headValue >= abiEncodingLengthLimit {
return nil, fmt.Errorf("cannot encode abi tuple: encode length exceeds uint16 maximum")
}
binary.BigEndian.PutUint16(heads[i], uint16(headValue))
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
index 64296680b..66060f5ce 100644
--- a/data/abi/abi_encode_test.go
+++ b/data/abi/abi_encode_test.go
@@ -29,21 +29,21 @@ import (
)
const (
- UintStepLength = 8
- UintBegin = 8
- UintEnd = 512
- UintRandomTestPoints = 1000
- UintTestCaseCount = 200
- UfixedPrecision = 160
- UfixedRandomTestPoints = 20
- TupleMaxLength = 10
- ByteTestCaseCount = 1 << 8
- BoolTestCaseCount = 2
- AddressTestCaseCount = 300
- StringTestCaseCount = 10
- StringTestCaseSpecLenCount = 5
- TakeNum = 10
- TupleTestCaseCount = 100
+ uintStepLength = 8
+ uintBegin = 8
+ uintEnd = 512
+ uintRandomTestPoints = 1000
+ uintTestCaseCount = 200
+ ufixedPrecision = 160
+ ufixedRandomTestPoints = 20
+ tupleMaxLength = 10
+ byteTestCaseCount = 1 << 8
+ boolTestCaseCount = 2
+ addressTestCaseCount = 300
+ stringTestCaseCount = 10
+ stringTestCaseSpecLenCount = 5
+ takeNum = 10
+ tupleTestCaseCount = 100
)
/*
@@ -74,18 +74,17 @@ func TestEncodeValid(t *testing.T) {
// encoding test for uint type, iterating through all uint sizes
// randomly pick 1000 valid uint values and check if encoded value match with expected
- for intSize := UintBegin; intSize <= UintEnd; intSize += UintStepLength {
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(intSize))
+ for intSize := uintBegin; intSize <= uintEnd; intSize += uintStepLength {
+ upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(intSize))
uintType, err := makeUintType(intSize)
require.NoError(t, err, "make uint type fail")
- for i := 0; i < UintRandomTestPoints; i++ {
+ for i := 0; i < uintRandomTestPoints; i++ {
randomInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- randomIntByte := randomInt.Bytes()
- expected := make([]byte, intSize/8-len(randomIntByte))
- expected = append(expected, randomIntByte...)
+ expected := make([]byte, intSize/8)
+ randomInt.FillBytes(expected)
uintEncode, err := uintType.Encode(randomInt)
require.NoError(t, err, "encoding from uint type fail")
@@ -94,9 +93,9 @@ func TestEncodeValid(t *testing.T) {
}
// 2^[bitSize] - 1 test
// check if uint<bitSize> can contain max uint value (2^bitSize - 1)
- largest := big.NewInt(0).Add(
+ largest := new(big.Int).Add(
upperLimit,
- big.NewInt(1).Neg(big.NewInt(1)),
+ new(big.Int).Neg(big.NewInt(1)),
)
encoded, err := uintType.Encode(largest)
require.NoError(t, err, "largest uint encode error")
@@ -106,27 +105,26 @@ func TestEncodeValid(t *testing.T) {
// encoding test for ufixed, iterating through all the valid ufixed bitSize and precision
// randomly generate 10 big int values for ufixed numerator and check if encoded value match with expected
// also check if ufixed can fit max numerator (2^bitSize - 1) under specific byte bitSize
- for size := UintBegin; size <= UintEnd; size += UintStepLength {
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
+ for size := uintBegin; size <= uintEnd; size += uintStepLength {
+ upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(size))
largest := big.NewInt(0).Add(
upperLimit,
- big.NewInt(1).Neg(big.NewInt(1)),
+ new(big.Int).Neg(big.NewInt(1)),
)
- for precision := 1; precision <= UfixedPrecision; precision++ {
+ for precision := 1; precision <= ufixedPrecision; precision++ {
typeUfixed, err := makeUfixedType(size, precision)
require.NoError(t, err, "make ufixed type fail")
- for i := 0; i < UfixedRandomTestPoints; i++ {
+ for i := 0; i < ufixedRandomTestPoints; i++ {
randomInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
encodedUfixed, err := typeUfixed.Encode(randomInt)
require.NoError(t, err, "ufixed encode fail")
- randomBytes := randomInt.Bytes()
- buffer := make([]byte, size/8-len(randomBytes))
- buffer = append(buffer, randomBytes...)
- require.Equal(t, buffer, encodedUfixed, "encode ufixed not match with expected")
+ expected := make([]byte, size/8)
+ randomInt.FillBytes(expected)
+ require.Equal(t, expected, encodedUfixed, "encode ufixed not match with expected")
}
// (2^[bitSize] - 1) / (10^[precision]) test
ufixedLargestEncode, err := typeUfixed.Encode(largest)
@@ -138,14 +136,13 @@ func TestEncodeValid(t *testing.T) {
// encoding test for address, since address is 32 byte, it can be considered as 256 bit uint
// randomly generate 1000 uint256 and make address values, check if encoded value match with expected
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), addressByteSize<<3)
- for i := 0; i < UintRandomTestPoints; i++ {
+ upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
+ for i := 0; i < uintRandomTestPoints; i++ {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- rand256Bytes := randomAddrInt.Bytes()
- addrBytesExpected := make([]byte, addressByteSize-len(rand256Bytes))
- addrBytesExpected = append(addrBytesExpected, rand256Bytes...)
+ addrBytesExpected := make([]byte, addressByteSize)
+ randomAddrInt.FillBytes(addrBytesExpected)
addrBytesActual, err := addressType.Encode(addrBytesExpected)
require.NoError(t, err, "address encode fail")
@@ -153,7 +150,7 @@ func TestEncodeValid(t *testing.T) {
}
// encoding test for bool values
- for i := 0; i < BoolTestCaseCount; i++ {
+ for i := 0; i < boolTestCaseCount; i++ {
boolEncode, err := boolType.Encode(i == 1)
require.NoError(t, err, "bool encode fail")
expected := []byte{0x00}
@@ -164,7 +161,7 @@ func TestEncodeValid(t *testing.T) {
}
// encoding test for byte values
- for i := 0; i < ByteTestCaseCount; i++ {
+ for i := 0; i < byteTestCaseCount; i++ {
byteEncode, err := byteType.Encode(byte(i))
require.NoError(t, err, "byte encode fail")
expected := []byte{byte(i)}
@@ -175,8 +172,8 @@ func TestEncodeValid(t *testing.T) {
// we use `gobberish` to generate random utf-8 symbols
// randomly generate utf-8 str from length 1 to 100, each length draw 10 random strs
// check if encoded ABI str match with expected value
- for length := 1; length <= StringTestCaseCount; length++ {
- for i := 0; i < StringTestCaseSpecLenCount; i++ {
+ for length := 1; length <= stringTestCaseCount; length++ {
+ for i := 0; i < stringTestCaseSpecLenCount; i++ {
// generate utf8 strings from `gobberish` at some length
utf8Str := gobberish.GenerateString(length)
// since string is just type alias of `byte[]`, we need to store number of bytes in encoding
@@ -350,11 +347,11 @@ func TestDecodeValid(t *testing.T) {
// decoding test for uint, iterating through all valid uint bitSize
// randomly take 1000 tests on each valid bitSize
// generate bytes from random uint values and decode bytes with additional type information
- for intSize := 8; intSize <= 512; intSize += 8 {
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(intSize))
+ for intSize := uintBegin; intSize <= uintEnd; intSize += uintStepLength {
+ upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(intSize))
uintType, err := makeUintType(intSize)
require.NoError(t, err, "make uint type failure")
- for i := 0; i < 1000; i++ {
+ for i := 0; i < uintRandomTestPoints; i++ {
randBig, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
@@ -383,12 +380,12 @@ func TestDecodeValid(t *testing.T) {
// decoding test for ufixed, iterating through all valid ufixed bitSize and precision
// randomly take 10 tests on each valid setting
// generate ufixed bytes and try to decode back with additional type information
- for size := 8; size <= 512; size += 8 {
+ for size := uintBegin; size <= uintEnd; size += uintStepLength {
upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
- for precision := 1; precision <= 160; precision++ {
+ for precision := 1; precision <= ufixedPrecision; precision++ {
ufixedType, err := makeUfixedType(size, precision)
require.NoError(t, err, "make ufixed type failure")
- for i := 0; i < 10; i++ {
+ for i := 0; i < ufixedRandomTestPoints; i++ {
randBig, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
@@ -416,17 +413,16 @@ func TestDecodeValid(t *testing.T) {
}
}
- // decoding test for address, randomly take 1000 tests
+ // decoding test for address, randomly take 300 tests
// address is type alias of byte[32], we generate address value with random 256 bit big int values
// we make the expected address value and decode the encoding of expected, check if they match
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), 256)
- for i := 0; i < 1000; i++ {
+ upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
+ for i := 0; i < addressTestCaseCount; i++ {
randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
require.NoError(t, err, "cryptographic random int init fail")
- addressBytes := randomAddrInt.Bytes()
- expected := make([]byte, 32-len(addressBytes))
- expected = append(expected, addressBytes...)
+ expected := make([]byte, addressByteSize)
+ randomAddrInt.FillBytes(expected)
actual, err := addressType.Decode(expected)
require.NoError(t, err, "decoding address should not return error")
@@ -443,7 +439,7 @@ func TestDecodeValid(t *testing.T) {
}
// byte value decoding test, iterating through 256 valid byte value
- for i := 0; i < (1 << 8); i++ {
+ for i := 0; i < byteTestCaseCount; i++ {
byteEncode, err := byteType.Encode(byte(i))
require.NoError(t, err, "byte encode fail")
actual, err := byteType.Decode(byteEncode)
@@ -451,11 +447,11 @@ func TestDecodeValid(t *testing.T) {
require.Equal(t, byte(i), actual, "decode byte not match with expected")
}
- // string value decoding test, test from utf string length 1 to 100
- // randomly take 10 utf-8 strings to make ABI string values
+ // string value decoding test, test from utf string length 1 to 10
+ // randomly take 5 utf-8 strings to make ABI string values
// decode the encoded expected value and check if they match
- for length := 1; length <= 100; length++ {
- for i := 0; i < 10; i++ {
+ for length := 1; length <= stringTestCaseCount; length++ {
+ for i := 0; i < stringTestCaseSpecLenCount; i++ {
expected := gobberish.GenerateString(length)
strEncode, err := stringType.Encode(expected)
require.NoError(t, err, "string encode fail")
@@ -898,20 +894,20 @@ func categorySelfRoundTripTest(t *testing.T, category []testUnit) {
}
func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
- (*pool)[Uint] = make([]testUnit, UintTestCaseCount*UintEnd/UintStepLength)
- (*pool)[Ufixed] = make([]testUnit, UfixedPrecision*UintEnd/UintStepLength)
+ (*pool)[Uint] = make([]testUnit, uintTestCaseCount*uintEnd/uintStepLength)
+ (*pool)[Ufixed] = make([]testUnit, ufixedPrecision*uintEnd/uintStepLength)
uintIndex := 0
ufixedIndex := 0
- for bitSize := UintBegin; bitSize <= UintEnd; bitSize += UintStepLength {
+ for bitSize := uintBegin; bitSize <= uintEnd; bitSize += uintStepLength {
max := new(big.Int).Lsh(big.NewInt(1), uint(bitSize))
uintT, err := makeUintType(bitSize)
require.NoError(t, err, "make uint type failure")
uintTstr := uintT.String()
- for j := 0; j < UintTestCaseCount; j++ {
+ for j := 0; j < uintTestCaseCount; j++ {
randVal, err := rand.Int(rand.Reader, max)
require.NoError(t, err, "generate random uint, should be no error")
@@ -922,7 +918,7 @@ func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
uintIndex++
}
- for precision := 1; precision <= UfixedPrecision; precision++ {
+ for precision := 1; precision <= ufixedPrecision; precision++ {
randVal, err := rand.Int(rand.Reader, max)
require.NoError(t, err, "generate random ufixed, should be no error")
@@ -939,33 +935,32 @@ func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
categorySelfRoundTripTest(t, (*pool)[Uint])
categorySelfRoundTripTest(t, (*pool)[Ufixed])
- (*pool)[Byte] = make([]testUnit, ByteTestCaseCount)
- for i := 0; i < ByteTestCaseCount; i++ {
+ (*pool)[Byte] = make([]testUnit, byteTestCaseCount)
+ for i := 0; i < byteTestCaseCount; i++ {
(*pool)[Byte][i] = testUnit{serializedType: byteType.String(), value: byte(i)}
}
categorySelfRoundTripTest(t, (*pool)[Byte])
- (*pool)[Bool] = make([]testUnit, BoolTestCaseCount)
+ (*pool)[Bool] = make([]testUnit, boolTestCaseCount)
(*pool)[Bool][0] = testUnit{serializedType: boolType.String(), value: false}
(*pool)[Bool][1] = testUnit{serializedType: boolType.String(), value: true}
categorySelfRoundTripTest(t, (*pool)[Bool])
maxAddress := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- (*pool)[Address] = make([]testUnit, AddressTestCaseCount)
- for i := 0; i < AddressTestCaseCount; i++ {
+ (*pool)[Address] = make([]testUnit, addressTestCaseCount)
+ for i := 0; i < addressTestCaseCount; i++ {
randAddrVal, err := rand.Int(rand.Reader, maxAddress)
require.NoError(t, err, "generate random value for address, should be no error")
- addrBytes := randAddrVal.Bytes()
- remainBytes := make([]byte, addressByteSize-len(addrBytes))
- addrBytes = append(remainBytes, addrBytes...)
+ addrBytes := make([]byte, addressByteSize)
+ randAddrVal.FillBytes(addrBytes)
(*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
}
categorySelfRoundTripTest(t, (*pool)[Address])
- (*pool)[String] = make([]testUnit, StringTestCaseCount*StringTestCaseSpecLenCount)
+ (*pool)[String] = make([]testUnit, stringTestCaseCount*stringTestCaseSpecLenCount)
stringIndex := 0
- for length := 1; length <= StringTestCaseCount; length++ {
- for i := 0; i < StringTestCaseSpecLenCount; i++ {
+ for length := 1; length <= stringTestCaseCount; length++ {
+ for i := 0; i < stringTestCaseSpecLenCount; i++ {
(*pool)[String][stringIndex] = testUnit{
serializedType: stringType.String(),
value: gobberish.GenerateString(length),
@@ -1000,21 +995,21 @@ func takeSomeFromCategoryAndGenerateArray(
}
func addArrayRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
- for intIndex := 0; intIndex < len((*pool)[Uint]); intIndex += UintTestCaseCount {
- takeSomeFromCategoryAndGenerateArray(t, Uint, intIndex, TakeNum, pool)
+ for intIndex := 0; intIndex < len((*pool)[Uint]); intIndex += uintTestCaseCount {
+ takeSomeFromCategoryAndGenerateArray(t, Uint, intIndex, takeNum, pool)
}
- takeSomeFromCategoryAndGenerateArray(t, Byte, 0, TakeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, Address, 0, TakeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, String, 0, TakeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, Bool, 0, TakeNum, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Byte, 0, takeNum, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Address, 0, takeNum, pool)
+ takeSomeFromCategoryAndGenerateArray(t, String, 0, takeNum, pool)
+ takeSomeFromCategoryAndGenerateArray(t, Bool, 0, takeNum, pool)
categorySelfRoundTripTest(t, (*pool)[ArrayStatic])
categorySelfRoundTripTest(t, (*pool)[ArrayDynamic])
}
func addTupleRandomValues(t *testing.T, slotRange BaseType, pool *map[BaseType][]testUnit) {
- for i := 0; i < TupleTestCaseCount; i++ {
- tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(TupleMaxLength))
+ for i := 0; i < tupleTestCaseCount; i++ {
+ tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(tupleMaxLength))
require.NoError(t, err, "generate random tuple length should not return error")
tupleLen := tupleLenBig.Int64() + 1
testUnits := make([]testUnit, tupleLen)
diff --git a/data/abi/abi_json.go b/data/abi/abi_json.go
index a3177c4b2..a71823f0c 100644
--- a/data/abi/abi_json.go
+++ b/data/abi/abi_json.go
@@ -18,12 +18,26 @@ package abi
import (
"bytes"
+ "crypto/sha512"
+ "encoding/base32"
"encoding/json"
"fmt"
- "github.com/algorand/go-algorand/data/basics"
"math/big"
)
+// NOTE: discussion about go-algorand-sdk
+// https://github.com/algorand/go-algorand/pull/3375#issuecomment-1007536841
+
+var base32Encoder = base32.StdEncoding.WithPadding(base32.NoPadding)
+
+func addressCheckSum(addressBytes []byte) ([]byte, error) {
+ if len(addressBytes) != addressByteSize {
+ return nil, fmt.Errorf("address bytes should be of length 32")
+ }
+ hashed := sha512.Sum512_256(addressBytes[:])
+ return hashed[addressByteSize-checksumByteSize:], nil
+}
+
func castBigIntToNearestPrimitive(num *big.Int, bitSize uint16) (interface{}, error) {
if num.BitLen() > int(bitSize) {
return nil, fmt.Errorf("cast big int to nearest primitive failure: %v >= 2^%d", num, bitSize)
@@ -74,17 +88,24 @@ func (t Type) MarshalToJSON(value interface{}) ([]byte, error) {
}
return json.Marshal(byteValue)
case Address:
- var addressInternal basics.Address
+ var addressValueInternal []byte
switch valueCasted := value.(type) {
case []byte:
- copy(addressInternal[:], valueCasted[:])
- return json.Marshal(addressInternal.String())
+ if len(valueCasted) != addressByteSize {
+ return nil, fmt.Errorf("address byte slice length not equal to 32 byte")
+ }
+ addressValueInternal = valueCasted
case [addressByteSize]byte:
- addressInternal = valueCasted
- return json.Marshal(addressInternal.String())
+ copy(addressValueInternal[:], valueCasted[:])
default:
return nil, fmt.Errorf("cannot infer to byte slice/array for marshal to JSON")
}
+ checksum, err := addressCheckSum(addressValueInternal)
+ if err != nil {
+ return nil, err
+ }
+ addressValueInternal = append(addressValueInternal, checksum...)
+ return json.Marshal(base32Encoder.EncodeToString(addressValueInternal))
case ArrayStatic, ArrayDynamic:
values, err := inferToSlice(value)
if err != nil {
@@ -175,13 +196,29 @@ func (t Type) UnmarshalFromJSON(jsonEncoded []byte) (interface{}, error) {
case Address:
var addrStr string
if err := json.Unmarshal(jsonEncoded, &addrStr); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded to string: %v", err)
+ return nil, fmt.Errorf("cannot cast JSON encoded to address string: %v", err)
}
- addr, err := basics.UnmarshalChecksumAddress(addrStr)
+ decoded, err := base32Encoder.DecodeString(addrStr)
if err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to address: %v", string(jsonEncoded), err)
+ return nil,
+ fmt.Errorf("cannot cast JSON encoded address string (%s) to address: %v", addrStr, err)
+ }
+ if len(decoded) != addressByteSize+checksumByteSize {
+ return nil,
+ fmt.Errorf(
+ "cannot cast JSON encoded address string (%s) to address: "+
+ "decoded byte length should equal to 36 with address and checksum",
+ string(jsonEncoded),
+ )
+ }
+ checksum, err := addressCheckSum(decoded[:addressByteSize])
+ if err != nil {
+ return nil, err
+ }
+ if !bytes.Equal(checksum, decoded[addressByteSize:]) {
+ return nil, fmt.Errorf("cannot cast JSON encoded address string (%s) to address: decoded checksum unmatch", addrStr)
}
- return addr[:], nil
+ return decoded[:addressByteSize], nil
case ArrayStatic, ArrayDynamic:
if t.childTypes[0].abiTypeID == Byte && bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
var byteArr []byte
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
index b5290c9d5..49083fdea 100644
--- a/data/abi/abi_json_test.go
+++ b/data/abi/abi_json_test.go
@@ -17,12 +17,39 @@
package abi
import (
+ "crypto/rand"
+ "math/big"
"testing"
+ "github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/test/partitiontest"
"github.com/stretchr/testify/require"
)
+func TestRandomAddressEquality(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
+ var addrBasics basics.Address
+ var addrABI []byte = make([]byte, addressByteSize)
+
+ for testCaseIndex := 0; testCaseIndex < addressTestCaseCount; testCaseIndex++ {
+ randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
+ require.NoError(t, err, "cryptographic random int init fail")
+
+ randomAddrInt.FillBytes(addrBasics[:])
+ randomAddrInt.FillBytes(addrABI)
+
+ checkSumBasics := addrBasics.GetChecksum()
+ checkSumABI, err := addressCheckSum(addrABI)
+ require.NoError(t, err, "ABI compute checksum for address slice failed")
+
+ require.Equal(t, checkSumBasics, checkSumABI,
+ "basics.Address computed checksum %v not equal to data.abi computed checksum %v",
+ )
+ }
+}
+
func TestJSONtoInterfaceValid(t *testing.T) {
partitiontest.PartitionTest(t)
var testCases = []struct {
diff --git a/data/abi/abi_type.go b/data/abi/abi_type.go
index 0ff240950..f403916b2 100644
--- a/data/abi/abi_type.go
+++ b/data/abi/abi_type.go
@@ -60,6 +60,15 @@ const (
Tuple
)
+const (
+ addressByteSize = 32
+ checksumByteSize = 4
+ singleByteSize = 1
+ singleBoolSize = 1
+ lengthEncodeByteSize = 2
+ abiEncodingLengthLimit = 1 << 16
+)
+
// Type is the struct that stores information about an ABI value's type.
type Type struct {
abiTypeID BaseType
@@ -405,13 +414,6 @@ func findBoolLR(typeList []Type, index int, delta int) int {
return until
}
-const (
- addressByteSize = 32
- singleByteSize = 1
- singleBoolSize = 1
- lengthEncodeByteSize = 2
-)
-
// ByteLen method calculates the byte length of a static ABI type.
func (t Type) ByteLen() (int, error) {
switch t.abiTypeID {
diff --git a/data/basics/address.go b/data/basics/address.go
index 412b7bf75..5eed1c512 100644
--- a/data/basics/address.go
+++ b/data/basics/address.go
@@ -24,6 +24,23 @@ import (
"github.com/algorand/go-algorand/crypto"
)
+// NOTE: Another (partial) implementation of `basics.Address` is in `data/abi`.
+// The reason of not using this `Address` in `data/abi` is that:
+// - `data/basics` has C dependencies (`go-algorand/crypto`)
+// - `go-algorand-sdk` has dependency to `go-algorand` for `ABI`
+// - if `go-algorand`'s ABI uses `basics.Address`, then it would be
+// impossible to up the version of `go-algorand` in `go-algorand-sdk`
+
+// This is discussed in:
+// - ISSUE https://github.com/algorand/go-algorand/issues/3355
+// - PR https://github.com/algorand/go-algorand/pull/3375
+
+// There are two solutions:
+// - One is to refactoring `crypto.Digest`, `crypto.Hash` and `basics.Address`
+// into packages that does not need `libsodium` crypto dependency
+// - The other is wrapping `libsodium` in a driver interface to make crypto
+// package importable (even if `libsodium` does not exist)
+
type (
// Address is a unique identifier corresponding to ownership of money
Address crypto.Digest
diff --git a/logging/telemetryhook_test.go b/logging/telemetryhook_test.go
index 85e776867..1846ed6f4 100644
--- a/logging/telemetryhook_test.go
+++ b/logging/telemetryhook_test.go
@@ -225,5 +225,10 @@ func TestAsyncTelemetryHook_QueueDepth(t *testing.T) {
close(filling)
hook.Close()
- require.Equal(t, maxDepth, len(testHook.entries()))
+ hookEntries := len(testHook.entries())
+ require.GreaterOrEqual(t, hookEntries, maxDepth)
+ // the anonymous goroutine in createAsyncHookLevels might pull an entry off the pending list before
+ // writing it off to the underlying hook. when that happens, the total number of sent entries could
+ // be one higher then the maxDepth.
+ require.LessOrEqual(t, hookEntries, maxDepth+1)
}
diff --git a/node/netprio.go b/node/netprio.go
index c8d7031a6..c322155eb 100644
--- a/node/netprio.go
+++ b/node/netprio.go
@@ -97,7 +97,7 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte {
}
signer := maxPart.VotingSigner()
- ephID := basics.OneTimeIDForRound(voteRound, signer.KeyDilution(proto))
+ ephID := basics.OneTimeIDForRound(voteRound, signer.KeyDilution(proto.DefaultKeyDilution))
rs.Round = voteRound
rs.Sender = maxPart.Address()
diff --git a/scripts/build_package.sh b/scripts/build_package.sh
index 004aa6535..3065f81bd 100755
--- a/scripts/build_package.sh
+++ b/scripts/build_package.sh
@@ -91,7 +91,11 @@ mkdir ${PKG_ROOT}/genesis
genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p ${PKG_ROOT}/genesis/${dir}
- cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json ${PKG_ROOT}/genesis/${dir}/
+ if [ -f "${REPO_DIR}/gen/${dir}/genesis.json" ]; then
+ cp ${REPO_DIR}/gen/${dir}/genesis.json ${PKG_ROOT}/genesis/${dir}/
+ else
+ cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json ${PKG_ROOT}/genesis/${dir}/
+ fi
if [ $? -ne 0 ]; then exit 1; fi
done
# Copy the appropriate network genesis.json for our default (in root ./genesis folder)
diff --git a/scripts/release/mule/sign/sign.sh b/scripts/release/mule/sign/sign.sh
index 7c9b7e388..cb0cbf42d 100755
--- a/scripts/release/mule/sign/sign.sh
+++ b/scripts/release/mule/sign/sign.sh
@@ -4,6 +4,7 @@
# TODO: This needs to be reworked a bit to support Darwin.
set -exo pipefail
+shopt -s nullglob
echo
date "+build_release begin SIGN stage %Y%m%d_%H%M%S"
@@ -100,8 +101,10 @@ for os in "${OS_TYPES[@]}"; do
gpg -u "$SIGNING_KEY_ADDR" --clearsign "$HASHFILE"
STATUSFILE="build_status_${CHANNEL}_${os}-${arch}_${VERSION}"
- gpg -u "$SIGNING_KEY_ADDR" --clearsign "$STATUSFILE"
- gzip -c "$STATUSFILE.asc" > "$STATUSFILE.asc.gz"
+ if [[ -f "$STATUSFILE" ]]; then
+ gpg -u "$SIGNING_KEY_ADDR" --clearsign "$STATUSFILE"
+ gzip -c "$STATUSFILE.asc" > "$STATUSFILE.asc.gz"
+ fi
)
fi
fi
diff --git a/test/scripts/e2e_subs/asset-misc.sh b/test/scripts/e2e_subs/asset-misc.sh
index 849b86484..73c018eb8 100755
--- a/test/scripts/e2e_subs/asset-misc.sh
+++ b/test/scripts/e2e_subs/asset-misc.sh
@@ -15,6 +15,7 @@ ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTC=$(${gcmd} account new|awk '{ print $6 }')
ACCOUNTD=$(${gcmd} account new|awk '{ print $6 }')
+ACCOUNTE=$(${gcmd} account new|awk '{ print $6 }')
ASSET_NAME='Birlot : décollage vs. ࠶🦪'
@@ -47,4 +48,105 @@ else
exit 1
fi
+# Test Scenario - check addresses are set correctly
+# case 1: asset created without specifying manager, reserve, freezer, or clawback
+MANAGER_ADDRESS=$(${gcmd} asset info --assetid ${ASSET_ID} |grep 'Manager address'|awk '{ print $3 }')
+RESERVE_ADDRESS=$(${gcmd} asset info --assetid ${ASSET_ID} |grep 'Reserve address'|awk '{ print $3 }')
+FREEZE_ADDRESS=$(${gcmd} asset info --assetid ${ASSET_ID} |grep 'Freeze address'|awk '{ print $3 }')
+CLAWBACK_ADDRESS=$(${gcmd} asset info --assetid ${ASSET_ID} |grep 'Clawback address'|awk '{ print $3 }')
+
+# check manager, reserve, freeze, and clawback are by default the creator
+if [ "$MANAGER_ADDRESS" = "$ACCOUNT" ] \
+ && [ "$RESERVE_ADDRESS" = "$ACCOUNT" ] \
+ && [ "$FREEZE_ADDRESS" = "$ACCOUNT" ] \
+ && [ "$CLAWBACK_ADDRESS" = "$ACCOUNT" ]; then
+ echo ok
+else
+ date '+asset-misc asset manager, reserve, freezer, and clawback should be creator error %Y%m%d_%H%M%S'
+ exit 1
+fi
+
+# case 2: asset created with no manager, no reserve, no freezer, and no clawback
+${gcmd} asset create --creator "${ACCOUNT}" --no-manager --no-reserve --no-freezer --no-clawback --name "${ASSET_NAME}" --unitname iamisc --total 1000000000000 --asseturl "${ASSET_URL}"
+
+IMMUTABLE_ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname iamisc|grep 'Asset ID'|awk '{ print $3 }')
+
+IMMUTABLE_MANAGER_ADDRESS=$(${gcmd} asset info --assetid ${IMMUTABLE_ASSET_ID} |grep 'Manager address'|awk '{ print $3 }')
+IMMUTABLE_RESERVE_ADDRESS=$(${gcmd} asset info --assetid ${IMMUTABLE_ASSET_ID} |grep 'Reserve address'|awk -F "[()]" '{ print $2 }')
+IMMUTABLE_FREEZE_ADDRESS=$(${gcmd} asset info --assetid ${IMMUTABLE_ASSET_ID} |grep 'Freeze address'|awk '{ print $3 }')
+IMMUTABLE_CLAWBACK_ADDRESS=$(${gcmd} asset info --assetid ${IMMUTABLE_ASSET_ID} |grep 'Clawback address'|awk '{ print $3 }')
+
+# goal asset info returns the creator's address as the reserve address when reserve address is empty
+# check goal/asset.go
+if [ "$IMMUTABLE_MANAGER_ADDRESS" = "" ] \
+ && [ "$IMMUTABLE_RESERVE_ADDRESS" = "Empty. Defaulting to creator" ] \
+ && [ "$IMMUTABLE_FREEZE_ADDRESS" = "" ] \
+ && [ "$IMMUTABLE_CLAWBACK_ADDRESS" = "" ]; then
+ echo ok
+else
+ date '+asset-misc immutable asset manager/reserve/freezer/clawback addresses error %Y%m%d_%H%M%S'
+ exit 1
+fi
+
+# case 3: asset created with manager, reserve, freezer, and clawback different from the creator
+${gcmd} asset create --creator "${ACCOUNT}" --manager "${ACCOUNTB}" --reserve "${ACCOUNTC}" --freezer "${ACCOUNTD}" --clawback "${ACCOUNTE}" --name "${ASSET_NAME}" --unitname dma --total 1000000000000 --asseturl "${ASSET_URL}"
+
+DIFF_MANAGER_ASSET_ID=$(${gcmd} asset info --creator $ACCOUNT --unitname dma|grep 'Asset ID'|awk '{ print $3 }')
+
+DMA_MANAGER_ADDRESS=$(${gcmd} asset info --assetid ${DIFF_MANAGER_ASSET_ID} |grep 'Manager address'|awk '{ print $3 }')
+DMA_RESERVE_ADDRESS=$(${gcmd} asset info --assetid ${DIFF_MANAGER_ASSET_ID} |grep 'Reserve address'|awk '{ print $3 }')
+DMA_FREEZE_ADDRESS=$(${gcmd} asset info --assetid ${DIFF_MANAGER_ASSET_ID} |grep 'Freeze address'|awk '{ print $3 }')
+DMA_CLAWBACK_ADDRESS=$(${gcmd} asset info --assetid ${DIFF_MANAGER_ASSET_ID} |grep 'Clawback address'|awk '{ print $3 }')
+
+if [ "$DMA_MANAGER_ADDRESS" = "$ACCOUNTB" ] \
+ && [ "$DMA_RESERVE_ADDRESS" = "$ACCOUNTC" ] \
+ && [ "$DMA_FREEZE_ADDRESS" = "$ACCOUNTD" ] \
+ && [ "$DMA_CLAWBACK_ADDRESS" = "$ACCOUNTE" ]; then
+ echo ok
+else
+ date '+asset-misc asset addresses with diff manager/reserve/freeze/clawback error %Y%m%d_%H%M%S'
+ exit 1
+fi
+
+# Test Scenario - check if asset is created successfully when passed in different combination of flags for addresses
+# case 1: create asset with both manager flag and no-manager flag
+if ${gcmd} asset create --creator "${ACCOUNT}" --no-manager --manager "${ACCOUNTB}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
+ date '+asset-misc asset with --manager and --no-manager flags created successfully error %Y%m%d_%H%M%S'
+ exit 1
+else
+ echo "Expected. Cannot create asset with both manager flag and no-manager flag"
+fi
+
+# case 2: create asset with both reserve flag and no-reserve flag
+if ${gcmd} asset create --creator "${ACCOUNT}" --no-reserve --reserve "${ACCOUNTC}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
+ date '+asset-misc asset with --reserve and --no-reserve flags created successfully error %Y%m%d_%H%M%S'
+ exit 1
+else
+ echo "Expected. Cannot create asset with both reserve flag and no-reserve flag"
+fi
+
+# case 3: create asset with both freezer flag and no-freezer flag
+if ${gcmd} asset create --creator "${ACCOUNT}" --no-freezer --freezer "${ACCOUNTD}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
+ date '+asset-misc asset with --freezer and --no-freezer flags created successfully error %Y%m%d_%H%M%S'
+ exit 1
+else
+ echo "Expected. Cannot create asset with both freezer flag and no-freezer flag"
+fi
+
+# case 4: create asset with both clawback flag and no-clawback flag
+if ${gcmd} asset create --creator "${ACCOUNT}" --no-clawback --clawback "${ACCOUNTE}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
+ date '+asset-misc asset with --clawback and --no-clawback flags created successfully error %Y%m%d_%H%M%S'
+ exit 1
+else
+ echo "Expected. Cannot create asset with both clawback flag and no-clawback flag"
+fi
+
+# case 5: create asset with reserve flag, no-freezer flag and no-clawback flag
+if ${gcmd} asset create --creator "${ACCOUNT}" --no-freezer --no-clawback --reserve "${ACCOUNTE}" --name "${ASSET_NAME}" --unitname errmisc --total 1000000000000 --asseturl "${ASSET_URL}"; then
+ echo "ok"
+else
+ date '+asset-misc asset with independent flags created unsuccessfully error %Y%m%d_%H%M%S'
+ exit 1
+fi
+
date '+asset-misc finish %Y%m%d_%H%M%S'
diff --git a/test/testdata/consensus/catchpointtestingprotocol.json b/test/testdata/consensus/catchpointtestingprotocol.json
index 25ad217e2..a006e7d96 100644
--- a/test/testdata/consensus/catchpointtestingprotocol.json
+++ b/test/testdata/consensus/catchpointtestingprotocol.json
@@ -36,7 +36,6 @@
"AgreementFilterTimeout": 1000000000,
"AgreementFilterTimeoutPeriod0": 1000000000,
"FastRecoveryLambda": 300000000000,
- "FastPartitionRecovery": true,
"PaysetCommit": 1,
"MaxTimestampIncrement": 25,
"SupportSignedTxnInBlock": true,
diff --git a/tools/debug/algodump/README.md b/tools/debug/algodump/README.md
new file mode 100644
index 000000000..c3108b126
--- /dev/null
+++ b/tools/debug/algodump/README.md
@@ -0,0 +1,33 @@
+# Algodump
+
+This is a tool for monitoring the messages sent over Algorand's network
+protocol.
+
+By default, the tool connects to a network in the same way that `algod`
+does, using SRV records and connecting to 4 relays from that list.
+
+You can change the network by using the `-network` flag; you will likely
+also need to specify the correct `-genesis` flag for that network (e.g.,
+`-network testnet -genesis testnet-v1.0`).
+
+You can also instruct `algodump` to connect to just one server. This may
+be useful if you want to debug a specific server, or if you want to avoid
+seeing the same message received from multiple relays. To do this, use
+the `-server` flag (e.g., `-server r-po.algorand-mainnet.network:4160`).
+
+By default, `algodump` will print all messages received. If you want to
+print just some message types, use the `-tags` flag (e.g., `-tags TX`
+to only see transactions, or `-tags AV` to see votes). The full list
+of tag types is in `protocol/tags.go`.
+
+Although `algodump` will print all message types, it might not know how
+to meaningfully display the contents of some message types. If you
+are trying to monitor messages that `algodump` doesn't know how to
+pretty-print, you will see just where the message came from, the message
+type, and the length of its payload. You can add more formatting code
+to print the contents of other messages by adding more cases to the
+`switch` statement in `dumpHandler.Handle()` in `main.go`.
+
+Finally, `algodump` by default truncates the addresses it prints (e.g.,
+the sender of a transaction or the address of a voter); you can use the
+`-long` flag to print full-length addresses.
diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go
new file mode 100644
index 000000000..b5f95a1be
--- /dev/null
+++ b/tools/debug/algodump/main.go
@@ -0,0 +1,188 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/algorand/go-deadlock"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+var serverAddress = flag.String("server", "", "Server address (host:port)")
+var genesisID = flag.String("genesis", "mainnet-v1.0", "Genesis ID")
+var networkID = flag.String("network", "mainnet", "Network ID")
+var tags = flag.String("tags", "*", "Comma-separated list of tags to dump, or * for all")
+var longFlag = flag.Bool("long", false, "Print full-length addresses and digests")
+
+type dumpHandler struct {
+ tags map[protocol.Tag]bool
+}
+
+func shortaddr(addr basics.Address) string {
+ if *longFlag {
+ return addr.String()
+ }
+ return fmt.Sprintf("%s..", addr.String()[0:8])
+}
+
+func shortdigest(d crypto.Digest) string {
+ if *longFlag {
+ return d.String()
+ }
+ return fmt.Sprintf("%s..", d.String()[0:8])
+}
+
+func (dh *dumpHandler) Handle(msg network.IncomingMessage) network.OutgoingMessage {
+ var src string
+
+ hp, ok := msg.Sender.(network.HTTPPeer)
+ if ok {
+ a := hp.GetAddress()
+ if a != *serverAddress {
+ src = " " + hp.GetAddress()
+ }
+ }
+
+ if dh.tags != nil && !dh.tags[msg.Tag] {
+ return network.OutgoingMessage{Action: network.Ignore}
+ }
+
+ ts := time.Now().Format("15:04:05.000000")
+ var data string
+ switch msg.Tag {
+ case protocol.AgreementVoteTag:
+ var v agreement.UnauthenticatedVote
+ err := protocol.Decode(msg.Data, &v)
+ if err != nil {
+ data = fmt.Sprintf("[decode error: %v]", err)
+ goto print
+ }
+
+ data = fmt.Sprintf("%d/%d/%d from %s for %s", v.R.Round, v.R.Period, v.R.Step, shortaddr(v.R.Sender), shortdigest(v.R.Proposal.BlockDigest))
+
+ case protocol.ProposalPayloadTag:
+ var p agreement.TransmittedPayload
+ err := protocol.Decode(msg.Data, &p)
+ if err != nil {
+ data = fmt.Sprintf("[decode error: %v]", err)
+ goto print
+ }
+
+ data = fmt.Sprintf("proposal %s", shortdigest(crypto.Digest(p.Block.Hash())))
+
+ case protocol.TxnTag:
+ dec := protocol.NewDecoderBytes(msg.Data)
+ for {
+ var stx transactions.SignedTxn
+ err := dec.Decode(&stx)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ data = fmt.Sprintf("[decode error: %v]", err)
+ goto print
+ }
+ if len(data) > 0 {
+ data = data + ", "
+ }
+ data = data + fmt.Sprintf("%s from %s", stx.Txn.Type, shortaddr(stx.Txn.Sender))
+ }
+ }
+
+print:
+ fmt.Printf("%s%s %s [%d bytes] %s\n", ts, src, msg.Tag, len(msg.Data), data)
+ return network.OutgoingMessage{Action: network.Ignore}
+}
+
+func setDumpHandlers(n network.GossipNode) {
+ var dh dumpHandler
+
+ if *tags == "*" {
+ // Dump all tags: nil tags
+ } else if *tags == "" {
+ // Dump nothing: empty tags
+ dh.tags = make(map[protocol.Tag]bool)
+ } else {
+ dh.tags = make(map[protocol.Tag]bool)
+ for _, t := range strings.Split(*tags, ",") {
+ dh.tags[protocol.Tag(t)] = true
+ fmt.Printf("TAG <%s>\n", t)
+ }
+ }
+
+ h := []network.TaggedMessageHandler{
+ {Tag: protocol.AgreementVoteTag, MessageHandler: &dh},
+ {Tag: protocol.CompactCertSigTag, MessageHandler: &dh},
+ {Tag: protocol.MsgOfInterestTag, MessageHandler: &dh},
+ {Tag: protocol.MsgDigestSkipTag, MessageHandler: &dh},
+ {Tag: protocol.NetPrioResponseTag, MessageHandler: &dh},
+ // {Tag: protocol.PingTag, MessageHandler: &dh},
+ // {Tag: protocol.PingReplyTag, MessageHandler: &dh},
+ {Tag: protocol.ProposalPayloadTag, MessageHandler: &dh},
+ {Tag: protocol.TopicMsgRespTag, MessageHandler: &dh},
+ {Tag: protocol.TxnTag, MessageHandler: &dh},
+ {Tag: protocol.UniCatchupReqTag, MessageHandler: &dh},
+ {Tag: protocol.UniEnsBlockReqTag, MessageHandler: &dh},
+ {Tag: protocol.VoteBundleTag, MessageHandler: &dh},
+ }
+ n.RegisterHandlers(h)
+}
+
+func main() {
+ log := logging.Base()
+ log.SetLevel(logging.Debug)
+ log.SetOutput(os.Stderr)
+
+ if *serverAddress == "" {
+ log.Infof("No server address specified; defaulting to DNS bootstrapping")
+ }
+
+ deadlock.Opts.Disable = true
+
+ flag.Parse()
+
+ conf, _ := config.LoadConfigFromDisk("/dev/null")
+ if *serverAddress != "" {
+ conf.DNSBootstrapID = ""
+ }
+
+ n, _ := network.NewWebsocketGossipNode(log,
+ conf,
+ []string{*serverAddress},
+ *genesisID,
+ protocol.NetworkID(*networkID))
+ setDumpHandlers(n)
+ n.Start()
+
+ for {
+ time.Sleep(time.Second)
+ }
+}