diff options
author | Eric Warehime <eric.warehime@gmail.com> | 2022-11-07 09:39:44 -0800 |
---|---|---|
committer | Eric Warehime <eric.warehime@gmail.com> | 2022-11-07 09:39:44 -0800 |
commit | d919d6816754156906ba92a80466de37edbac4f7 (patch) | |
tree | 561e66095200e0e9798fca2b39eeeeacead5bf80 | |
parent | 7189ff0bbefe65ebae68f72e08cce87d8ff66ec0 (diff) | |
parent | 0eae1c0cf2582220feb451741eca524f782d3c6f (diff) |
Merge remote-tracking branch 'upstream/master' into oapi-v1.11.0-backport-changesoapi-v1.11.0-backport-changes
-rw-r--r-- | .golangci.yml | 5 | ||||
-rw-r--r-- | cmd/goal/asset.go | 5 | ||||
-rw-r--r-- | daemon/algod/api/server/v2/handlers.go | 3 | ||||
-rw-r--r-- | data/transactions/logic/assembler_test.go | 4 | ||||
-rw-r--r-- | data/transactions/logic/evalAppTxn_test.go | 8 | ||||
-rw-r--r-- | ledger/acctupdates.go | 27 | ||||
-rw-r--r-- | ledger/evalindexer.go | 2 | ||||
-rw-r--r-- | ledger/internal/eval.go | 4 | ||||
-rw-r--r-- | ledger/internal/eval_test.go | 2 | ||||
-rw-r--r-- | ledger/internal/prefetcher/prefetcher_alignment_test.go | 1 | ||||
-rw-r--r-- | ledger/ledger.go | 5 | ||||
-rw-r--r-- | ledger/lruaccts.go | 40 | ||||
-rw-r--r-- | ledger/lruresources.go | 40 | ||||
-rw-r--r-- | shared/pingpong/config.go | 2 | ||||
-rw-r--r-- | shared/pingpong/pingpong.go | 39 | ||||
-rw-r--r-- | test/e2e-go/features/participation/participationExpiration_test.go | 2 | ||||
-rw-r--r-- | test/framework/fixtures/restClientFixture.go | 10 | ||||
-rwxr-xr-x | test/scripts/e2e_subs/app-assets.sh | 10 |
18 files changed, 173 insertions, 36 deletions
diff --git a/.golangci.yml b/.golangci.yml index 62490feca..0122edf82 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -16,6 +16,7 @@ linters: - revive - staticcheck - typecheck + - paralleltest severity: default-severity: error @@ -115,6 +116,10 @@ issues: # - revive - staticcheck - typecheck + # Ignore missing parallel tests in existing packages + - path: (agreement|catchup|cmd|config|crypto|daemon|data|gen|ledger|logging|netdeploy|network|node|protocol|rpcs|shared|stateproof|test|tools|util).*_test.go + linters: + - paralleltest # Add all linters here -- Comment this block out for testing linters - path: test/linttest/lintissues\.go linters: diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go index 8a0169685..89da4ef0d 100644 --- a/cmd/goal/asset.go +++ b/cmd/goal/asset.go @@ -809,8 +809,9 @@ var infoAssetCmd = &cobra.Command{ fmt.Printf("Asset ID: %d\n", assetID) fmt.Printf("Creator: %s\n", asset.Params.Creator) - reportInfof("Asset name: %s\n", derefString(asset.Params.Name)) - reportInfof("Unit name: %s\n", derefString(asset.Params.UnitName)) + reportInfof("Asset name: %s", derefString(asset.Params.Name)) + reportInfof("Unit name: %s", derefString(asset.Params.UnitName)) + reportInfof("URL: %s", derefString(asset.Params.Url)) fmt.Printf("Maximum issue: %s %s\n", assetDecimalsFmt(asset.Params.Total, asset.Params.Decimals), derefString(asset.Params.UnitName)) fmt.Printf("Reserve amount: %s %s\n", assetDecimalsFmt(res.Amount, asset.Params.Decimals), derefString(asset.Params.UnitName)) fmt.Printf("Issued: %s %s\n", assetDecimalsFmt(asset.Params.Total-res.Amount, asset.Params.Decimals), derefString(asset.Params.UnitName)) diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 34f814ffa..d528d4f44 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -1019,7 +1019,8 @@ func (v2 *Handlers) PendingTransactionInformation(ctx echo.Context, txid string, // Encoding wasn't working well without embedding "real" objects. response := PreEncodedTxInfo{ - Txn: txn.Txn, + Txn: txn.Txn, + PoolError: txn.PoolError, } if txn.ConfirmedRound != 0 { diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 5da1708b9..d53b6fc16 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -2800,10 +2800,8 @@ func TestGetSpec(t *testing.T) { require.Equal(t, "unknown opcode: nonsense", ops.Errors[1].Err.Error()) } -func TestAddPseudoDocTags(t *testing.T) { +func TestAddPseudoDocTags(t *testing.T) { //nolint:paralleltest // Not parallel because it modifies pseudoOps and opDocByName which are global maps partitiontest.PartitionTest(t) - // Not parallel because it modifies pseudoOps and opDocByName which are global maps - // t.Parallel() defer func() { delete(pseudoOps, "tests") delete(opDocByName, "multiple") diff --git a/data/transactions/logic/evalAppTxn_test.go b/data/transactions/logic/evalAppTxn_test.go index ea7bd6885..25a073ba3 100644 --- a/data/transactions/logic/evalAppTxn_test.go +++ b/data/transactions/logic/evalAppTxn_test.go @@ -1761,9 +1761,7 @@ int 1 ` for _, unified := range []bool{true, false} { - t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) { - // t.Parallel() NO! unified variable is actually shared - + t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) { //nolint:paralleltest // NO t.Parallel(). unified variable is actually shared ep, parentTx, ledger := MakeSampleEnv() ep.Proto.UnifyInnerTxIDs = unified @@ -2226,10 +2224,8 @@ func TestInnerTxIDCaching(t *testing.T) { parentAppID := basics.AppIndex(888) childAppID := basics.AppIndex(222) - for _, unified := range []bool{true, false} { + for _, unified := range []bool{true, false} { //nolint:paralleltest // NO t.Parallel(). unified variable is actually shared t.Run(fmt.Sprintf("unified=%t", unified), func(t *testing.T) { - // t.Parallel() NO! unified variable is actually shared - ep, parentTx, ledger := MakeSampleEnv() ep.Proto.UnifyInnerTxIDs = unified diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 8f1a572d1..836a7c670 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -328,6 +328,17 @@ func (au *accountUpdates) close() { au.baseKVs.prune(0) } +// flushCaches flushes any pending data in caches so that it is fully available during future lookups. +func (au *accountUpdates) flushCaches() { + au.accountsMu.Lock() + + au.baseAccounts.flushPendingWrites() + au.baseResources.flushPendingWrites() + au.baseKVs.flushPendingWrites() + + au.accountsMu.Unlock() +} + func (au *accountUpdates) LookupResource(rnd basics.Round, addr basics.Address, aidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.AccountResource, basics.Round, error) { return au.lookupResource(rnd, addr, aidx, ctype, true /* take lock */) } @@ -816,6 +827,8 @@ type accountUpdatesLedgerEvaluator struct { prevHeader bookkeeping.BlockHeader } +func (aul *accountUpdatesLedgerEvaluator) FlushCaches() {} + // GenesisHash returns the genesis hash func (aul *accountUpdatesLedgerEvaluator) GenesisHash() crypto.Digest { return aul.au.ledger.GenesisHash() @@ -1327,6 +1340,12 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address, return macct.AccountResource(), rnd, nil } + // check baseAccoiunts again to see if it does not exist + if au.baseResources.readNotFound(addr, aidx) { + // it seems the account doesnt exist + return ledgercore.AccountResource{}, rnd, nil + } + if synchronized { au.accountsMu.RUnlock() needUnlock = false @@ -1346,6 +1365,7 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address, au.baseResources.writePending(persistedData, addr) return persistedData.AccountResource(), rnd, nil } + au.baseResources.writeNotFoundPending(addr, aidx) // otherwise return empty return ledgercore.AccountResource{}, rnd, nil } @@ -1428,6 +1448,12 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add return macct.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil } + // check baseAccoiunts again to see if it does not exist + if au.baseAccounts.readNotFound(addr) { + // it seems the account doesnt exist + return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, nil + } + if synchronized { au.accountsMu.RUnlock() needUnlock = false @@ -1447,6 +1473,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add au.baseAccounts.writePending(persistedData) return persistedData.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil } + au.baseAccounts.writeNotFoundPending(addr) // otherwise return empty return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, nil } diff --git a/ledger/evalindexer.go b/ledger/evalindexer.go index daefd3035..3e2d8ca34 100644 --- a/ledger/evalindexer.go +++ b/ledger/evalindexer.go @@ -79,6 +79,8 @@ type indexerLedgerConnector struct { roundResources EvalForIndexerResources } +func (l indexerLedgerConnector) FlushCaches() {} + // BlockHdr is part of LedgerForEvaluator interface. func (l indexerLedgerConnector) BlockHdr(round basics.Round) (bookkeeping.BlockHeader, error) { if round != l.latestRound { diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go index a149c7d01..b42d24f6b 100644 --- a/ledger/internal/eval.go +++ b/ledger/internal/eval.go @@ -603,6 +603,7 @@ type LedgerForEvaluator interface { GenesisProto() config.ConsensusParams LatestTotals() (basics.Round, ledgercore.AccountTotals, error) VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error) + FlushCaches() } // EvaluatorOptions defines the evaluator creation options @@ -1500,6 +1501,9 @@ func (validator *evalTxValidator) run() { // AddBlock: Eval(context.Background(), l, blk, false, txcache, nil) // tracker: Eval(context.Background(), l, blk, false, txcache, nil) func Eval(ctx context.Context, l LedgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) { + // flush the pending writes in the cache to make everything read so far available during eval + l.FlushCaches() + eval, err := StartEvaluator(l, blk.BlockHeader, EvaluatorOptions{ PaysetHint: len(blk.Payset), diff --git a/ledger/internal/eval_test.go b/ledger/internal/eval_test.go index da73bc60a..495ff6097 100644 --- a/ledger/internal/eval_test.go +++ b/ledger/internal/eval_test.go @@ -519,6 +519,8 @@ func (ledger *evalTestLedger) StartEvaluator(hdr bookkeeping.BlockHeader, payset }) } +func (ledger *evalTestLedger) FlushCaches() {} + // GetCreatorForRound takes a CreatableIndex and a CreatableType and tries to // look up a creator address, setting ok to false if the query succeeded but no // creator was found. diff --git a/ledger/internal/prefetcher/prefetcher_alignment_test.go b/ledger/internal/prefetcher/prefetcher_alignment_test.go index 5d78ce286..2b553c974 100644 --- a/ledger/internal/prefetcher/prefetcher_alignment_test.go +++ b/ledger/internal/prefetcher/prefetcher_alignment_test.go @@ -173,6 +173,7 @@ func (l *prefetcherAlignmentTestLedger) LatestTotals() (basics.Round, ledgercore func (l *prefetcherAlignmentTestLedger) VotersForStateProof(basics.Round) (*ledgercore.VotersForRound, error) { return nil, nil } +func (l *prefetcherAlignmentTestLedger) FlushCaches() {} func parseLoadedAccountDataEntries(loadedAccountDataEntries []prefetcher.LoadedAccountDataEntry) map[basics.Address]struct{} { if len(loadedAccountDataEntries) == 0 { diff --git a/ledger/ledger.go b/ledger/ledger.go index 99fe75966..09ec1b3cd 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -812,6 +812,11 @@ func (l *Ledger) StartEvaluator(hdr bookkeeping.BlockHeader, paysetHint, maxTxnB }) } +// FlushCaches flushes any pending data in caches so that it is fully available during future lookups. +func (l *Ledger) FlushCaches() { + l.accts.flushCaches() +} + // Validate uses the ledger to validate block blk as a candidate next block. // It returns an error if blk is not the expected next block, or if blk is // not a valid block (e.g., it has duplicate transactions, overspends some diff --git a/ledger/lruaccts.go b/ledger/lruaccts.go index 2c8752c4b..f698f9de7 100644 --- a/ledger/lruaccts.go +++ b/ledger/lruaccts.go @@ -37,6 +37,9 @@ type lruAccounts struct { log logging.Logger // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingAccounts entries pendingWritesWarnThreshold int + + pendingNotFound chan basics.Address + notFound map[basics.Address]struct{} } // init initializes the lruAccounts for use. @@ -45,6 +48,8 @@ func (m *lruAccounts) init(log logging.Logger, pendingWrites int, pendingWritesW m.accountsList = newPersistedAccountList().allocateFreeNodes(pendingWrites) m.accounts = make(map[basics.Address]*persistedAccountDataListNode, pendingWrites) m.pendingAccounts = make(chan persistedAccountData, pendingWrites) + m.notFound = make(map[basics.Address]struct{}, pendingWrites) + m.pendingNotFound = make(chan basics.Address, pendingWrites) m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold } @@ -58,6 +63,13 @@ func (m *lruAccounts) read(addr basics.Address) (data persistedAccountData, has return persistedAccountData{}, false } +// readNotFound returns whether we have attempted to read this address but it did not exist in the db. +// thread locking semantics : read lock +func (m *lruAccounts) readNotFound(addr basics.Address) bool { + _, ok := m.notFound[addr] + return ok +} + // flushPendingWrites flushes the pending writes to the main lruAccounts cache. // thread locking semantics : write lock func (m *lruAccounts) flushPendingWrites() { @@ -65,12 +77,25 @@ func (m *lruAccounts) flushPendingWrites() { if pendingEntriesCount >= m.pendingWritesWarnThreshold { m.log.Warnf("lruAccounts: number of entries in pendingAccounts(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold) } + +outer: for ; pendingEntriesCount > 0; pendingEntriesCount-- { select { case pendingAccountData := <-m.pendingAccounts: m.write(pendingAccountData) default: - return + break outer + } + } + + pendingEntriesCount = len(m.pendingNotFound) +outer2: + for ; pendingEntriesCount > 0; pendingEntriesCount-- { + select { + case addr := <-m.pendingNotFound: + m.notFound[addr] = struct{}{} + default: + break outer2 } } } @@ -85,6 +110,16 @@ func (m *lruAccounts) writePending(acct persistedAccountData) { } } +// writeNotFoundPending tags an address as not existing in the db. +// the function doesn't block, and in case of a buffer overflow the entry would not be added. +// thread locking semantics : no lock is required. +func (m *lruAccounts) writeNotFoundPending(addr basics.Address) { + select { + case m.pendingNotFound <- addr: + default: + } +} + // write a single persistedAccountData to the lruAccounts cache. // when writing the entry, the round number would be used to determine if it's a newer // version of what's already on the cache or not. In all cases, the entry is going @@ -117,5 +152,8 @@ func (m *lruAccounts) prune(newSize int) (removed int) { m.accountsList.remove(back) removed++ } + + // clear the notFound list + m.notFound = make(map[basics.Address]struct{}, len(m.notFound)) return } diff --git a/ledger/lruresources.go b/ledger/lruresources.go index 8ab62f0ff..70a2a4c14 100644 --- a/ledger/lruresources.go +++ b/ledger/lruresources.go @@ -48,6 +48,9 @@ type lruResources struct { // pendingWritesWarnThreshold is the threshold beyond we would write a warning for exceeding the number of pendingResources entries pendingWritesWarnThreshold int + + pendingNotFound chan accountCreatable + notFound map[accountCreatable]struct{} } // init initializes the lruResources for use. @@ -56,6 +59,8 @@ func (m *lruResources) init(log logging.Logger, pendingWrites int, pendingWrites m.resourcesList = newPersistedResourcesList().allocateFreeNodes(pendingWrites) m.resources = make(map[accountCreatable]*persistedResourcesDataListNode, pendingWrites) m.pendingResources = make(chan cachedResourceData, pendingWrites) + m.notFound = make(map[accountCreatable]struct{}, pendingWrites) + m.pendingNotFound = make(chan accountCreatable, pendingWrites) m.log = log m.pendingWritesWarnThreshold = pendingWritesWarnThreshold } @@ -69,6 +74,13 @@ func (m *lruResources) read(addr basics.Address, aidx basics.CreatableIndex) (da return persistedResourcesData{}, false } +// readNotFound returns whether we have attempted to read this address but it did not exist in the db. +// thread locking semantics : read lock +func (m *lruResources) readNotFound(addr basics.Address, idx basics.CreatableIndex) bool { + _, ok := m.notFound[accountCreatable{address: addr, index: idx}] + return ok +} + // read the persistedResourcesData object that the lruResources has for the given address. // thread locking semantics : read lock func (m *lruResources) readAll(addr basics.Address) (ret []persistedResourcesData) { @@ -87,12 +99,25 @@ func (m *lruResources) flushPendingWrites() { if pendingEntriesCount >= m.pendingWritesWarnThreshold { m.log.Warnf("lruResources: number of entries in pendingResources(%d) exceed the warning threshold of %d", pendingEntriesCount, m.pendingWritesWarnThreshold) } + +outer: for ; pendingEntriesCount > 0; pendingEntriesCount-- { select { case pendingResourceData := <-m.pendingResources: m.write(pendingResourceData.persistedResourcesData, pendingResourceData.address) default: - return + break outer + } + } + + pendingEntriesCount = len(m.pendingNotFound) +outer2: + for ; pendingEntriesCount > 0; pendingEntriesCount-- { + select { + case key := <-m.pendingNotFound: + m.notFound[key] = struct{}{} + default: + break outer2 } } } @@ -107,6 +132,16 @@ func (m *lruResources) writePending(acct persistedResourcesData, addr basics.Add } } +// writeNotFoundPending tags an address as not existing in the db. +// the function doesn't block, and in case of a buffer overflow the entry would not be added. +// thread locking semantics : no lock is required. +func (m *lruResources) writeNotFoundPending(addr basics.Address, idx basics.CreatableIndex) { + select { + case m.pendingNotFound <- accountCreatable{address: addr, index: idx}: + default: + } +} + // write a single persistedAccountData to the lruResources cache. // when writing the entry, the round number would be used to determine if it's a newer // version of what's already on the cache or not. In all cases, the entry is going @@ -139,5 +174,8 @@ func (m *lruResources) prune(newSize int) (removed int) { m.resourcesList.remove(back) removed++ } + + // clear the notFound list + m.notFound = make(map[accountCreatable]struct{}, len(m.notFound)) return } diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go index f595b05f4..b5dd17bc5 100644 --- a/shared/pingpong/config.go +++ b/shared/pingpong/config.go @@ -36,6 +36,7 @@ type PpConfig struct { RandomizeFee bool RandomizeAmt bool RandomizeDst bool + MaxRandomDst uint64 MaxFee uint64 MinFee uint64 MaxAmt uint64 @@ -98,6 +99,7 @@ var DefaultConfig = PpConfig{ RandomizeFee: false, RandomizeAmt: false, RandomizeDst: false, + MaxRandomDst: 200000, MaxFee: 10000, MinFee: 1000, MaxAmt: 1000, diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go index 2983fc8c6..30d25d6e9 100644 --- a/shared/pingpong/pingpong.go +++ b/shared/pingpong/pingpong.go @@ -130,9 +130,10 @@ func (ppa *pingPongAccount) String() string { // WorkerState object holds a running pingpong worker type WorkerState struct { - cfg PpConfig - accounts map[string]*pingPongAccount - cinfo CreatablesInfo + cfg PpConfig + accounts map[string]*pingPongAccount + randomAccounts []string + cinfo CreatablesInfo nftStartTime int64 localNftIndex uint64 @@ -633,7 +634,11 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac *libgoal.Client) { // NewPingpong creates a new pingpong WorkerState func NewPingpong(cfg PpConfig) *WorkerState { - return &WorkerState{cfg: cfg, nftHolders: make(map[string]int)} + return &WorkerState{ + cfg: cfg, + nftHolders: make(map[string]int), + randomAccounts: make([]string, 0, cfg.MaxRandomDst), + } } func (pps *WorkerState) randAssetID() (aidx uint64) { @@ -703,11 +708,7 @@ func (pps *WorkerState) sendFromTo( fee := pps.fee() to := toList[i] - if pps.cfg.RandomizeDst { - var addr basics.Address - crypto.RandBytes(addr[:]) - to = addr.String() - } else if len(belowMinBalanceAccounts) > 0 && (crypto.RandUint64()%100 < 50) { + if len(belowMinBalanceAccounts) > 0 && (crypto.RandUint64()%100 < 50) { // make 50% of the calls attempt to refund low-balanced accounts. // ( if there is any ) // pick the first low balance account @@ -715,6 +716,20 @@ func (pps *WorkerState) sendFromTo( to = acct break } + } else if pps.cfg.RandomizeDst { + // check if we need to create a new random account, or use an existing one + if uint64(len(pps.randomAccounts)) >= pps.cfg.MaxRandomDst { + // use pre-created random account + i := rand.Int63n(int64(len(pps.randomAccounts))) + to = pps.randomAccounts[i] + } else { + // create new random account + var addr basics.Address + crypto.RandBytes(addr[:]) + to = addr.String() + // push new account + pps.randomAccounts = append(pps.randomAccounts, to) + } } // Broadcast transaction @@ -970,7 +985,11 @@ type paymentUpdate struct { func (au *paymentUpdate) apply(pps *WorkerState) { pps.accounts[au.from].balance -= (au.fee + au.amt) - pps.accounts[au.to].balance += au.amt + // update account balance + to := pps.accounts[au.to] + if to != nil { + to.balance += au.amt + } } // return true with probability 1/i diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go index b75ae1d93..de5a5ceea 100644 --- a/test/e2e-go/features/participation/participationExpiration_test.go +++ b/test/e2e-go/features/participation/participationExpiration_test.go @@ -152,7 +152,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f blk, err := sClient.BookkeepingBlock(latestRound) a.NoError(err) - a.Equal(blk.CurrentProtocol, protocolCheck) + a.Equal(string(blk.CurrentProtocol), protocolCheck) sendMoneyTxn := fixture.SendMoneyAndWait(latestRound, amountToSendInitial, transactionFee, richAccount, sAccount, "") diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go index bc8a0fdc0..c1f0ff487 100644 --- a/test/framework/fixtures/restClientFixture.go +++ b/test/framework/fixtures/restClientFixture.go @@ -23,7 +23,6 @@ import ( "unicode" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/protocol" "github.com/stretchr/testify/require" @@ -279,15 +278,8 @@ func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAn _, err := f.WaitForConfirmedTxn(roundTimeout, addr, txid) if err != nil { f.t.Logf("txn failed to confirm: ", addr, txid) - response, err := f.AlgodClient.GetRawPendingTransactions(0) + pendingTxns, err := f.LibGoalClient.GetParsedPendingTransactions(0) if err == nil { - // Parse pending transaction response - var pendingTxns struct { - TopTransactions []transactions.SignedTxn - TotalTransactions uint64 - } - err = protocol.DecodeReflect(response, &pendingTxns) - require.NoError(f.t, err) pendingTxids := make([]string, 0, pendingTxns.TotalTransactions) for _, txn := range pendingTxns.TopTransactions { pendingTxids = append(pendingTxids, txn.Txn.ID().String()) diff --git a/test/scripts/e2e_subs/app-assets.sh b/test/scripts/e2e_subs/app-assets.sh index b582733d3..98171a616 100755 --- a/test/scripts/e2e_subs/app-assets.sh +++ b/test/scripts/e2e_subs/app-assets.sh @@ -81,10 +81,10 @@ function asset-id { } APPACCT=$(python -c "import algosdk.encoding as e; print(e.encode_address(e.checksum(b'appID'+($APPID).to_bytes(8, 'big'))))") - +EXAMPLE_URL="http://example.com" function asset-create { amount=$1; shift - ${gcmd} asset create --creator "$SMALL" --total "$amount" --decimals 0 "$@" + ${gcmd} asset create --creator "$SMALL" --total "$amount" --decimals 0 "$@" --asseturl "$EXAMPLE_URL" } function asset-deposit { @@ -101,6 +101,10 @@ function clawback_addr { grep -o -E 'Clawback address: [A-Z0-9]{58}' | awk '{print $3}' } +function asset_url { + grep -o -E 'URL:.*'|awk '{print $2}' +} + function payin { amount=$1; shift ${gcmd} clerk send -f "$SMALL" -t "$APPACCT" -a "$amount" "$@" @@ -180,6 +184,8 @@ asset-optin --assetid "$ASSETID" -a $USER #opt in to asset ${gcmd} asset config --manager $SMALL --assetid $ASSETID --new-clawback $USER cb_addr=$(${gcmd} asset info --assetid $ASSETID | clawback_addr) [ "$cb_addr" = "$USER" ] +url=$(${gcmd} asset info --assetid $ASSETID | asset_url) +[ "$url" = "$EXAMPLE_URL" ] ${gcmd} asset send -f "$SMALL" -t "$USER" -a "1000" --assetid "$ASSETID" --clawback "$USER" [ $(asset_bal "$USER") = 1000 ] [ $(asset_bal "$SMALL") = 999000 ] |