summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com>2021-06-09 14:34:21 -0400
committerGitHub <noreply@github.com>2021-06-09 14:34:21 -0400
commit3b4335c91936e0bb556a437738a06348feedbb6f (patch)
tree6d433c1f157412a4b3d68cd680a31a9ac11409c5
parent5aff57ee9714769f2029a4941e8ac3b565306d13 (diff)
1k assets: consensus parameter and feature switch (#2180)feature/1000-assets
Add a new EnableUnlimitedAssets protocol option and implement a feature switch.
-rw-r--r--cmd/goal/messages_windows.go22
-rw-r--r--config/consensus.go9
-rw-r--r--daemon/algod/api/server/v1/handlers/handlers.go4
-rw-r--r--daemon/algod/api/server/v2/handlers.go4
-rw-r--r--ledger/accountdb.go651
-rw-r--r--ledger/accountdb_test.go707
-rw-r--r--ledger/acctupdates.go171
-rw-r--r--ledger/acctupdates_test.go125
-rw-r--r--ledger/appcow_test.go2
-rw-r--r--ledger/appdbg.go2
-rw-r--r--ledger/apply/application_test.go4
-rw-r--r--ledger/apply/apply.go3
-rw-r--r--ledger/apply/asset.go42
-rw-r--r--ledger/apply/keyreg_test.go2
-rw-r--r--ledger/apply/mockBalances_test.go2
-rw-r--r--ledger/cow.go148
-rw-r--r--ledger/cow_test.go4
-rw-r--r--ledger/creatablecow.go33
-rw-r--r--ledger/eval.go25
-rw-r--r--ledger/ledger.go14
-rw-r--r--ledger/ledgercore/msgp_gen.go1850
-rw-r--r--ledger/ledgercore/msgp_gen_test.go295
-rw-r--r--ledger/ledgercore/persistedacctdata.go1507
-rw-r--r--ledger/ledgercore/persistedacctdata_test.go1117
-rw-r--r--ledger/ledgercore/statedelta.go98
-rw-r--r--ledger/ledgercore/statedelta_test.go41
-rw-r--r--test/e2e-go/features/transactions/asset_test.go275
-rw-r--r--test/testdata/nettemplates/TwoNodes50EachV27.json29
28 files changed, 6089 insertions, 1097 deletions
diff --git a/cmd/goal/messages_windows.go b/cmd/goal/messages_windows.go
deleted file mode 100644
index 3d06815cd..000000000
--- a/cmd/goal/messages_windows.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (C) 2019-2021 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package main
-
-const (
- // Wallet
- infoBackupPhrase = "\n%s"
-)
diff --git a/config/consensus.go b/config/consensus.go
index 8d02f915d..1cd79fa55 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -363,6 +363,10 @@ type ConsensusParams struct {
// 5. checking that in the case of going online the VoteFirst is less or equal to the LastValid+1.
// 6. checking that in the case of going online the VoteFirst is less or equal to the next network round.
EnableKeyregCoherencyCheck bool
+
+ // EnableUnlimitedAssets allows more than MaxAssetsPerAccount assets
+ // After enabling this MaxAssetsPerAccount defines a maximum numbers of assets stored directly in a balance record
+ EnableUnlimitedAssets bool
}
// PaysetCommitType enumerates possible ways for the block header to commit to
@@ -944,6 +948,11 @@ func initConsensusProtocols() {
// Increase asset URL length to allow for IPFS URLs
vFuture.MaxAssetURLBytes = 96
+ // Enable large asset holdings and params
+ vFuture.EnableUnlimitedAssets = true
+ // 100100 Algos (MinBalance for creating 1,000,000 assets)
+ vFuture.MaximumMinimumBalance = 100100000000
+
Consensus[protocol.ConsensusFuture] = vFuture
}
diff --git a/daemon/algod/api/server/v1/handlers/handlers.go b/daemon/algod/api/server/v1/handlers/handlers.go
index 1e3b0f634..3f541aa64 100644
--- a/daemon/algod/api/server/v1/handlers/handlers.go
+++ b/daemon/algod/api/server/v1/handlers/handlers.go
@@ -757,7 +757,7 @@ func AccountInformation(ctx lib.ReqContext, context echo.Context) {
ledger := ctx.Node.Ledger()
lastRound := ledger.Latest()
- record, err := ledger.Lookup(lastRound, basics.Address(addr))
+ record, err := ledger.LookupFull(lastRound, basics.Address(addr))
if err != nil {
lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log)
return
@@ -1288,7 +1288,7 @@ func AssetInformation(ctx lib.ReqContext, context echo.Context) {
}
lastRound := ledger.Latest()
- record, err := ledger.Lookup(lastRound, creator)
+ record, err := ledger.LookupCreatableDataWithoutRewards(lastRound, creator, basics.CreatableIndex(aidx), basics.AssetCreatable)
if err != nil {
lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log)
return
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 7a74c6b2a..29dddc1ae 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -98,7 +98,7 @@ func (v2 *Handlers) AccountInformation(ctx echo.Context, address string, params
myLedger := v2.Node.Ledger()
lastRound := myLedger.Latest()
- record, err := myLedger.Lookup(lastRound, addr)
+ record, err := myLedger.LookupFull(lastRound, addr)
if err != nil {
return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
}
@@ -713,7 +713,7 @@ func (v2 *Handlers) GetAssetByID(ctx echo.Context, assetID uint64) error {
}
lastRound := ledger.Latest()
- record, err := ledger.Lookup(lastRound, creator)
+ record, err := ledger.LookupCreatableDataWithoutRewards(lastRound, creator, basics.CreatableIndex(assetID), basics.AssetCreatable)
if err != nil {
return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
}
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index ea022d54c..afa5cb486 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -21,7 +21,6 @@ import (
"context"
"database/sql"
"fmt"
- "sort"
"time"
"github.com/mattn/go-sqlite3"
@@ -157,10 +156,11 @@ type compactAccountDeltas struct {
}
type accountDelta struct {
- old dbAccountData
- new ledgercore.PersistedAccountData
- holdings map[basics.AssetIndex]ledgercore.HoldingAction
- ndeltas int
+ old dbAccountData
+ new ledgercore.PersistedAccountData
+ createdDeletedHoldings ledgercore.EntityDelta
+ createdDeletedParams ledgercore.EntityDelta
+ ndeltas int
}
// catchpointState is used to store catchpoint related variables into the catchpointstate table.
@@ -226,20 +226,25 @@ func makeCompactAccountDeltas(accountDeltas []ledgercore.AccountDeltas, baseAcco
for _, roundDelta := range accountDeltas {
for i := 0; i < roundDelta.Len(); i++ {
addr, acctDelta := roundDelta.GetByIdx(i)
- hmap := roundDelta.GetHoldingDeltas(addr)
+ params := roundDelta.GetEntityParamsDeltas(addr)
+ holdings := roundDelta.GetEntityHoldingDeltas(addr)
if prev, idx := outAccountDeltas.get(addr); idx != -1 {
+ params := prev.createdDeletedParams.Update(params)
+ holdings := prev.createdDeletedHoldings.Update(holdings)
outAccountDeltas.update(idx, accountDelta{ // update instead of upsert economizes one map lookup
- old: prev.old,
- new: acctDelta,
- holdings: hmap,
- ndeltas: prev.ndeltas + 1,
+ old: prev.old,
+ new: acctDelta,
+ createdDeletedParams: params,
+ createdDeletedHoldings: holdings,
+ ndeltas: prev.ndeltas + 1,
})
} else {
// it's a new entry.
newEntry := accountDelta{
- new: acctDelta,
- holdings: hmap,
- ndeltas: 1,
+ new: acctDelta,
+ createdDeletedParams: params,
+ createdDeletedHoldings: holdings,
+ ndeltas: 1,
}
if baseAccountData, has := baseAccounts.read(addr); has {
newEntry.old = baseAccountData
@@ -254,6 +259,109 @@ func makeCompactAccountDeltas(accountDeltas []ledgercore.AccountDeltas, baseAcco
return
}
+func loadCreatedDeletedGroups(agl ledgercore.AbstractAssetGroupList, entityDelta ledgercore.EntityDelta, create ledgercore.EntityAction, delete ledgercore.EntityAction, fetcher fetcher, adRound basics.Round) (err error) {
+ for cidx, action := range entityDelta {
+ gi := -1
+ if action == create {
+ // use FindGroup to find a possible matching group for insertion
+ gi = agl.FindGroup(basics.AssetIndex(cidx), 0)
+ } else if action == delete {
+ gi, _ = agl.FindAsset(basics.AssetIndex(cidx), 0)
+ }
+ if gi != -1 {
+ g := agl.Get(gi)
+ if !g.Loaded() {
+ var gdRound basics.Round
+ gdRound, err = g.Fetch(fetcher, nil)
+ if err != nil {
+ return
+ }
+ if gdRound != adRound {
+ return &MismatchingDatabaseRoundError{databaseRound: gdRound, memoryRound: adRound}
+ }
+ }
+ }
+ }
+ return
+}
+
+func loadOldHoldings(dbad dbAccountData, delta accountDelta, fetcher fetcher, adRound basics.Round) (dbAccountData, error) {
+ if len(delta.new.Assets) > 0 && len(dbad.pad.AccountData.Assets) == 0 {
+ dbad.pad.AccountData.Assets = make(map[basics.AssetIndex]basics.AssetHolding, len(delta.new.Assets))
+ }
+
+ // load created and deleted asset holding groups
+ err := loadCreatedDeletedGroups(&dbad.pad.ExtendedAssetHolding, delta.createdDeletedHoldings, ledgercore.ActionHoldingCreate, ledgercore.ActionHoldingDelete, fetcher, adRound)
+ if err != nil {
+ return dbAccountData{}, err
+ }
+
+ // load updated assets
+ for aidx := range delta.new.Assets {
+ gi, ai := dbad.pad.ExtendedAssetHolding.FindAsset(aidx, 0)
+ if gi != -1 {
+ g := &dbad.pad.ExtendedAssetHolding.Groups[gi]
+ if !g.Loaded() {
+ gdRound, err := g.Fetch(fetcher, nil)
+ if err != nil {
+ return dbAccountData{}, err
+ }
+ if gdRound != adRound {
+ // this should not never happen since accountsLoadOld is called as part of DB update procedure,
+ // so the DB cannot be changed
+ return dbAccountData{}, &MismatchingDatabaseRoundError{databaseRound: gdRound, memoryRound: adRound}
+ }
+ }
+ _, ai = dbad.pad.ExtendedAssetHolding.FindAsset(aidx, gi)
+ if ai != -1 {
+ // found!
+ dbad.pad.AccountData.Assets[aidx] = g.GetHolding(ai)
+ } else {
+ // no such asset => newly created
+ }
+ }
+ }
+ return dbad, nil
+}
+
+func loadOldParams(dbad dbAccountData, delta accountDelta, fetcher fetcher, adRound basics.Round) (dbAccountData, error) {
+ if len(delta.new.AssetParams) > 0 && len(dbad.pad.AccountData.AssetParams) == 0 {
+ dbad.pad.AccountData.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, len(delta.new.AssetParams))
+ }
+
+ err := loadCreatedDeletedGroups(&dbad.pad.ExtendedAssetParams, delta.createdDeletedParams, ledgercore.ActionParamsCreate, ledgercore.ActionParamsDelete, fetcher, adRound)
+ if err != nil {
+ return dbAccountData{}, err
+ }
+
+ // load updated assets
+ for aidx := range delta.new.AssetParams {
+ gi, ai := dbad.pad.ExtendedAssetParams.FindAsset(aidx, 0)
+ if gi != -1 {
+ g := &dbad.pad.ExtendedAssetParams.Groups[gi]
+ if !g.Loaded() {
+ gdRound, err := g.Fetch(fetcher, nil)
+ if err != nil {
+ return dbAccountData{}, err
+ }
+ if gdRound != adRound {
+ // this should not never happen since accountsLoadOld is called as part of DB update procedure,
+ // so the DB cannot be changed
+ return dbAccountData{}, &MismatchingDatabaseRoundError{databaseRound: gdRound, memoryRound: adRound}
+ }
+ }
+ _, ai = dbad.pad.ExtendedAssetParams.FindAsset(aidx, gi)
+ if ai != -1 {
+ // found!
+ dbad.pad.AccountData.AssetParams[aidx] = g.GetParams(ai)
+ } else {
+ // no such asset => newly created
+ }
+ }
+ }
+ return dbad, nil
+}
+
// accountsLoadOld updates the entries on the deltas.old map that matches the provided addresses.
// The round number of the dbAccountData is not updated by this function, and the caller is responsible
// for populating this field.
@@ -276,6 +384,9 @@ func (a *compactAccountDeltas) accountsLoadOld(tx *sql.Tx) (err error) {
defer func() {
a.misses = nil
}()
+
+ fetcher := makeAssetFetcher(loadStmt)
+
var rowid sql.NullInt64
var acctDataBuf []byte
for _, idx := range a.misses {
@@ -292,67 +403,23 @@ func (a *compactAccountDeltas) accountsLoadOld(tx *sql.Tx) (err error) {
return err
}
- // accountsLoadOld is called from commitRound as db.Atomic => safe to load data without additional sync
+ // accountsLoadOld is called from commitRound as db.Atomic => safe to load extension data without additional sync
+
+ // fetch holdings/params that are in new to simplify upcoming reconciliation.
+ // these are either new or modified, and needed to be written back later.
+
if dbad.pad.ExtendedAssetHolding.Count > 0 {
- // fetch holdings that are in new
- // these are either new or modified, and needed to be written back later.
- // to simplify upcoming reconciliation load them now
_, delta := a.getByIdx(idx)
- if len(delta.new.Assets) > 0 && len(dbad.pad.AccountData.Assets) == 0 {
- dbad.pad.AccountData.Assets = make(map[basics.AssetIndex]basics.AssetHolding, len(delta.new.Assets))
- }
-
- // load created and deleted asset holding groups
- for aidx, action := range delta.holdings {
- gi := -1
- if action == ledgercore.ActionCreate {
- // use FindGroup to find a possible matching group for insertion
- gi = dbad.pad.ExtendedAssetHolding.FindGroup(aidx, 0)
- } else if action == ledgercore.ActionDelete {
- gi, _ = dbad.pad.ExtendedAssetHolding.FindAsset(aidx, 0)
- }
- if gi != -1 {
- g := &dbad.pad.ExtendedAssetHolding.Groups[gi]
- if !g.Loaded() {
- groupData, gdRound, err := loadHoldingGroupData(loadStmt, g.AssetGroupKey)
- if err != nil {
- return err
- }
- if gdRound != adRound {
- // this should not never happen since accountsLoadOld is called as part of DB update procedure,
- // so the DB cannot be changed
- return &MismatchingDatabaseRoundError{databaseRound: gdRound, memoryRound: adRound}
- }
- g.Load(groupData)
- }
- }
+ dbad, err = loadOldHoldings(dbad, delta, fetcher, adRound)
+ if err != nil {
+ return err
}
-
- // load updated assets
- for aidx := range delta.new.Assets {
- gi, ai := dbad.pad.ExtendedAssetHolding.FindAsset(aidx, 0)
- if gi != -1 {
- g := &dbad.pad.ExtendedAssetHolding.Groups[gi]
- if !g.Loaded() {
- groupData, gdRound, err := loadHoldingGroupData(loadStmt, g.AssetGroupKey)
- if err != nil {
- return err
- }
- if gdRound != adRound {
- // this should not never happen since accountsLoadOld is called as part of DB update procedure,
- // so the DB cannot be changed
- return &MismatchingDatabaseRoundError{databaseRound: gdRound, memoryRound: adRound}
- }
- g.Load(groupData)
- }
- _, ai = dbad.pad.ExtendedAssetHolding.FindAsset(aidx, gi)
- if ai != -1 {
- // found!
- dbad.pad.AccountData.Assets[aidx] = g.GetHolding(ai)
- } else {
- // no such asset => newly created
- }
- }
+ }
+ if dbad.pad.ExtendedAssetParams.Count > 0 {
+ _, delta := a.getByIdx(idx)
+ dbad, err = loadOldParams(dbad, delta, fetcher, adRound)
+ if err != nil {
+ return err
}
}
} else {
@@ -1032,6 +1099,9 @@ func lookupFull(rdb db.Accessor, addr basics.Address) (dbad dbAccountData, err e
if dbad.pad.ExtendedAssetHolding.Count > 0 {
dbad.pad.Assets, dbad.pad.ExtendedAssetHolding, err = loadHoldings(loadStmt, dbad.pad.ExtendedAssetHolding, dbad.round)
}
+ if dbad.pad.ExtendedAssetParams.Count > 0 {
+ dbad.pad.AssetParams, dbad.pad.ExtendedAssetParams, err = loadParams(loadStmt, dbad.pad.ExtendedAssetParams, dbad.round)
+ }
return err
})
return err
@@ -1166,7 +1236,18 @@ func (qs *accountsDbQueries) close() {
}
func (qs *accountsDbQueries) loadHoldings(eah ledgercore.ExtendedAssetHolding, rnd basics.Round) (holdings map[basics.AssetIndex]basics.AssetHolding, result ledgercore.ExtendedAssetHolding, err error) {
- holdings, result, err = loadHoldings(qs.loadAccountGroupDataStmt, eah, rnd)
+ err = db.Retry(func() error {
+ holdings, result, err = loadHoldings(qs.loadAccountGroupDataStmt, eah, rnd)
+ return err
+ })
+ return
+}
+
+func (qs *accountsDbQueries) loadParams(eap ledgercore.ExtendedAssetParams, rnd basics.Round) (params map[basics.AssetIndex]basics.AssetParams, result ledgercore.ExtendedAssetParams, err error) {
+ err = db.Retry(func() error {
+ params, result, err = loadParams(qs.loadAccountGroupDataStmt, eap, rnd)
+ return err
+ })
return
}
@@ -1242,8 +1323,17 @@ func accountsPutTotals(tx *sql.Tx, totals ledgercore.AccountTotals, catchpointSt
return err
}
-// loadHoldings initiates all holdings leading mentioned in ExtendedAssetHolding groups.
-// baseRound specified a round number records need satisfy to. If baseRound is zero any data are OK.
+type fetcher func(key int64) (buf []byte, rnd basics.Round, err error)
+
+func makeAssetFetcher(stmt *sql.Stmt) fetcher {
+ return func(key int64) (buf []byte, rnd basics.Round, err error) {
+ buf, rnd, err = loadGroupData(stmt, key)
+ return
+ }
+}
+
+// loadHoldings initiates all holdings mentioned in ExtendedAssetHolding groups.
+// baseRound specifies a round number records need satisfy to. If baseRound is zero any data are OK.
func loadHoldings(stmt *sql.Stmt, eah ledgercore.ExtendedAssetHolding, baseRound basics.Round) (map[basics.AssetIndex]basics.AssetHolding, ledgercore.ExtendedAssetHolding, error) {
if len(eah.Groups) == 0 {
return nil, ledgercore.ExtendedAssetHolding{}, nil
@@ -1251,10 +1341,11 @@ func loadHoldings(stmt *sql.Stmt, eah ledgercore.ExtendedAssetHolding, baseRound
var err error
holdings := make(map[basics.AssetIndex]basics.AssetHolding, eah.Count)
groups := make([]ledgercore.AssetsHoldingGroup, len(eah.Groups), len(eah.Groups))
-
+ fetcher := makeAssetFetcher(stmt)
fetchedRound := basics.Round(0)
for gi := range eah.Groups {
- holdings, groups[gi], fetchedRound, err = loadHoldingGroup(stmt, eah.Groups[gi], holdings)
+ groups[gi] = eah.Groups[gi]
+ fetchedRound, err = groups[gi].Fetch(fetcher, holdings)
if err != nil {
return nil, ledgercore.ExtendedAssetHolding{}, err
}
@@ -1266,35 +1357,36 @@ func loadHoldings(stmt *sql.Stmt, eah ledgercore.ExtendedAssetHolding, baseRound
return holdings, eah, nil
}
-func loadHoldingGroup(stmt *sql.Stmt, g ledgercore.AssetsHoldingGroup, holdings map[basics.AssetIndex]basics.AssetHolding) (map[basics.AssetIndex]basics.AssetHolding, ledgercore.AssetsHoldingGroup, basics.Round, error) {
- var groupData ledgercore.AssetsHoldingGroupData
- var rnd basics.Round
- var err error
- err = db.Retry(func() error {
- groupData, rnd, err = loadHoldingGroupData(stmt, g.AssetGroupKey)
- return err
- })
- if err != nil {
- return nil, ledgercore.AssetsHoldingGroup{}, 0, err
+// loadParams initiates all params mentioned in ExtendedAssetHolding groups.
+// baseRound specifies a round number records need satisfy to. If baseRound is zero any data are OK.
+func loadParams(stmt *sql.Stmt, eap ledgercore.ExtendedAssetParams, baseRound basics.Round) (map[basics.AssetIndex]basics.AssetParams, ledgercore.ExtendedAssetParams, error) {
+ if len(eap.Groups) == 0 {
+ return nil, ledgercore.ExtendedAssetParams{}, nil
}
-
- if holdings != nil {
- aidx := g.MinAssetIndex
- for i := 0; i < len(groupData.AssetOffsets); i++ {
- aidx += groupData.AssetOffsets[i]
- holdings[aidx] = groupData.GetHolding(i)
+ var err error
+ params := make(map[basics.AssetIndex]basics.AssetParams, eap.Count)
+ groups := make([]ledgercore.AssetsParamsGroup, len(eap.Groups), len(eap.Groups))
+ fetcher := makeAssetFetcher(stmt)
+ fetchedRound := basics.Round(0)
+ for gi := range eap.Groups {
+ groups[gi] = eap.Groups[gi]
+ fetchedRound, err = groups[gi].Fetch(fetcher, params)
+ if err != nil {
+ return nil, ledgercore.ExtendedAssetParams{}, err
+ }
+ if baseRound != 0 && baseRound != fetchedRound {
+ return nil, ledgercore.ExtendedAssetParams{}, &MismatchingDatabaseRoundError{databaseRound: fetchedRound, memoryRound: baseRound}
}
}
- g.Load(groupData)
- return holdings, g, rnd, nil
+ eap.Groups = groups
+ return params, eap, nil
}
-// loadHoldingGroupData loads a single holdings group data
-func loadHoldingGroupData(stmt *sql.Stmt, key int64) (group ledgercore.AssetsHoldingGroupData, rnd basics.Round, err error) {
- var buf []byte
+// loadGroupData loads a single holdings group data
+func loadGroupData(stmt *sql.Stmt, key int64) (buf []byte, rnd basics.Round, err error) {
err = stmt.QueryRow(key).Scan(&rnd, &buf)
if err == sql.ErrNoRows {
- err = fmt.Errorf("loadHoldingGroupData failed to retrive data for key %d", key)
+ err = fmt.Errorf("loadGroupData failed to retrive data for key %d", key)
return
}
@@ -1302,38 +1394,141 @@ func loadHoldingGroupData(stmt *sql.Stmt, key int64) (group ledgercore.AssetsHol
if err != nil {
return
}
+ return
+}
+
+func modifyAssetGroup(query *sql.Stmt, agl ledgercore.AbstractAssetGroupList) (err error) {
+ var result sql.Result
+ for i := 0; i < agl.Len(); i++ {
+ result, err = query.Exec(agl.Get(i).Encode())
+ if err != nil {
+ break
+ }
+ key, err := result.LastInsertId()
+ if err != nil {
+ break
+ }
+ agl.Get(i).SetKey(key)
+ }
+ return err
+}
+
+func deleteAssetGroup(query *sql.Stmt, agl ledgercore.AbstractAssetGroupList) (err error) {
+ for i := 0; i < agl.Len(); i++ {
+ _, err = query.Exec(agl.Get(i).Key())
+ if err != nil {
+ break
+ }
+ }
+ return err
+}
+
+// collapseAssetHoldings moves all assets from ExtendedAssetHolding groups to Assets field
+func collapseAssetHoldings(qabq, qaed *sql.Stmt, eah ledgercore.ExtendedAssetHolding, updated map[basics.AssetIndex]basics.AssetHolding, deleted []basics.AssetIndex, rnd basics.Round) (assets map[basics.AssetIndex]basics.AssetHolding, err error) {
+ // 1. load all
+ assets, _, err = loadHoldings(qabq, eah, rnd)
+ if err != nil {
+ return nil, err
+ }
+ // 2. remove deleted
+ for _, aidx := range deleted {
+ delete(assets, aidx)
+ }
+ // 3. add created and modified from delta.new
+ for aidx, holding := range updated {
+ assets[aidx] = holding
+ }
+
+ // 4. delete all groups
+ for _, g := range eah.Groups {
+ qaed.Exec(g.AssetGroupKey)
+ }
+ return assets, nil
+}
+
+// collapseAssetParams moves all assets from ExtendedAssetParams groups to Assets field
+func collapseAssetParams(qabq, qaed *sql.Stmt, eap ledgercore.ExtendedAssetParams, updated map[basics.AssetIndex]basics.AssetParams, deleted []basics.AssetIndex, rnd basics.Round) (assets map[basics.AssetIndex]basics.AssetParams, err error) {
+ // 1. load all
+ assets, _, err = loadParams(qabq, eap, rnd)
+ if err != nil {
+ return nil, err
+ }
+ // 2. remove deleted
+ for _, aidx := range deleted {
+ delete(assets, aidx)
+ }
+ // 3. add created and modified from delta.new
+ for aidx, param := range updated {
+ assets[aidx] = param
+ }
+
+ // 4. delete all groups
+ for _, g := range eap.Groups {
+ qaed.Exec(g.AssetGroupKey)
+ }
+ return assets, nil
+}
+
+func assetsUpdateGroupDataDB(qaei, qaeu, qaed *sql.Stmt, agl ledgercore.AbstractAssetGroupList, loadedGroups []int, deletedKeys []int64) (err error) {
+ for _, key := range deletedKeys {
+ if _, err = qaed.Exec(key); err != nil {
+ return err
+ }
+ }
- err = protocol.Decode(buf, &group)
+ var result sql.Result
+ for _, i := range loadedGroups {
+ ag := agl.Get(i)
+ if ag.Key() != 0 { // existing entry, update
+ if _, err = qaeu.Exec(ag.Encode(), ag.Key()); err != nil {
+ break
+ }
+ } else {
+ // new entry, insert
+ if result, err = qaei.Exec(ag.Encode()); err != nil {
+ break
+ }
+ var key int64
+ if key, err = result.LastInsertId(); err != nil {
+ break
+ }
+ ag.SetKey(key)
+ }
+ }
return
}
-func accountsNewCreate(qabi *sql.Stmt, qaei *sql.Stmt, addr basics.Address, pad ledgercore.PersistedAccountData, genesisProto config.ConsensusParams, updatedAccounts []dbAccountData, updateIdx int) ([]dbAccountData, error) {
+func accountsNewCreate(qabi *sql.Stmt, qaei *sql.Stmt, addr basics.Address, pad ledgercore.PersistedAccountData, proto config.ConsensusParams, updatedAccounts []dbAccountData, updateIdx int) ([]dbAccountData, error) {
assetsThreshold := config.Consensus[protocol.ConsensusV18].MaxAssetsPerAccount
if len(pad.Assets) > assetsThreshold {
pad.ExtendedAssetHolding.ConvertToGroups(pad.Assets)
pad.AccountData.Assets = nil
}
+ if len(pad.AssetParams) > assetsThreshold {
+ pad.ExtendedAssetParams.ConvertToGroups(pad.AssetParams)
+ pad.AccountData.AssetParams = nil
+ }
+
+ // save holdings
var result sql.Result
var err error
- for i := 0; i < len(pad.ExtendedAssetHolding.Groups); i++ {
- result, err = qaei.Exec(pad.ExtendedAssetHolding.Groups[i].Encode())
- if err != nil {
- break
- }
- pad.ExtendedAssetHolding.Groups[i].AssetGroupKey, err = result.LastInsertId()
- if err != nil {
- break
- }
+ err = modifyAssetGroup(qaei, &pad.ExtendedAssetHolding)
+ if err != nil {
+ return updatedAccounts, err
}
+ // save params
+ err = modifyAssetGroup(qaei, &pad.ExtendedAssetParams)
+ if err != nil {
+ return updatedAccounts, err
+ }
+
+ normBalance := pad.AccountData.NormalizedOnlineBalance(proto)
+ result, err = qabi.Exec(addr[:], normBalance, protocol.Encode(&pad))
if err == nil {
- normBalance := pad.AccountData.NormalizedOnlineBalance(genesisProto)
- result, err = qabi.Exec(addr[:], normBalance, protocol.Encode(&pad))
- if err == nil {
- updatedAccounts[updateIdx].rowid, err = result.LastInsertId()
- updatedAccounts[updateIdx].pad = pad
- }
+ updatedAccounts[updateIdx].rowid, err = result.LastInsertId()
+ updatedAccounts[updateIdx].pad = pad
}
return updatedAccounts, err
}
@@ -1352,17 +1547,29 @@ func accountsNewDelete(qabd *sql.Stmt, qaed *sql.Stmt, addr basics.Address, dbad
if rowsAffected != 1 {
err = fmt.Errorf("failed to delete accountbase row for account %v, rowid %d", addr, dbad.rowid)
} else {
- // if no error delete extension records
- for _, g := range dbad.pad.ExtendedAssetHolding.Groups {
- result, err = qaed.Exec(g.AssetGroupKey)
- if err != nil {
- break
- }
+ // if no error => delete extension records
+ err = deleteAssetGroup(qaed, &dbad.pad.ExtendedAssetHolding)
+ if err == nil {
+ err = deleteAssetGroup(qaed, &dbad.pad.ExtendedAssetParams)
}
}
return updatedAccounts, err
}
+func filterCreatedDeleted(assets ledgercore.EntityDelta, cr ledgercore.EntityAction, dl ledgercore.EntityAction) (created, deleted []basics.AssetIndex) {
+ created = make([]basics.AssetIndex, 0, len(assets)/2)
+ deleted = make([]basics.AssetIndex, 0, len(assets)/2)
+
+ for cidx, action := range assets {
+ if action == cr {
+ created = append(created, basics.AssetIndex(cidx))
+ } else if action == dl {
+ deleted = append(deleted, basics.AssetIndex(cidx))
+ }
+ }
+ return
+}
+
// accountsNewUpdate reconciles old and new AccountData in delta.
// Precondition: all modified assets must be loaded into old's groupData and cached into Assets
func accountsNewUpdate(qabu, qabq, qaeu, qaei, qaed *sql.Stmt, addr basics.Address, delta accountDelta, genesisProto config.ConsensusParams, updatedAccounts []dbAccountData, updateIdx int) ([]dbAccountData, error) {
@@ -1373,7 +1580,7 @@ func accountsNewUpdate(qabu, qabq, qaeu, qaei, qaed *sql.Stmt, addr basics.Addre
return updatedAccounts, fmt.Errorf("extended holdings count mismatch (old) %d != %d (new)", delta.old.pad.ExtendedAssetHolding.Count, delta.new.ExtendedAssetHolding.Count)
}
- // Reconciliation asset holdings logic:
+ // Reconciliation asset holdings (and asset params) logic:
// old.Assets must always have the assets modified in new (see accountsLoadOld)
// PersistedAccountData stores the data either in Assets field or as ExtendedHoldings
// this means:
@@ -1385,61 +1592,44 @@ func accountsNewUpdate(qabu, qabq, qaeu, qaei, qaed *sql.Stmt, addr basics.Addre
// - if the result is above the threshold then merge in changes into group data
var pad ledgercore.PersistedAccountData
var err error
+ pad.AccountData = delta.new.AccountData
+
if delta.old.pad.NumAssetHoldings() <= assetsThreshold && len(delta.new.Assets) <= assetsThreshold {
- pad.AccountData = delta.new.AccountData
+ // AccountData assigned above
+ // Do not use delta.assets map of deleted/created since entire Assets is being replaced
} else if delta.old.pad.NumAssetHoldings() <= assetsThreshold && len(delta.new.Assets) > assetsThreshold {
- pad.ExtendedAssetHolding.ConvertToGroups(delta.new.Assets)
- pad.AccountData = delta.new.AccountData
+ _, deleted := filterCreatedDeleted(delta.createdDeletedHoldings, ledgercore.ActionHoldingCreate, ledgercore.ActionHoldingDelete)
+ assets := make(map[basics.AssetIndex]basics.AssetHolding, len(delta.old.pad.Assets)+len(delta.new.Assets)-len(deleted))
+ for aidx, holding := range delta.old.pad.Assets {
+ assets[aidx] = holding
+ }
+ for _, aidx := range deleted {
+ delete(assets, aidx)
+ }
+ for aidx, holding := range delta.new.Assets {
+ assets[aidx] = holding
+ }
+
+ pad.ExtendedAssetHolding.ConvertToGroups(assets)
pad.AccountData.Assets = nil
// update group data DB table
- var result sql.Result
- for i := 0; i < len(pad.ExtendedAssetHolding.Groups); i++ {
- result, err = qaei.Exec(pad.ExtendedAssetHolding.Groups[i].Encode())
- if err != nil {
- break
- }
- pad.ExtendedAssetHolding.Groups[i].AssetGroupKey, err = result.LastInsertId()
- if err != nil {
- break
- }
- }
+ err = modifyAssetGroup(qaei, &pad.ExtendedAssetHolding)
} else { // default case: delta.old.pad.NumAssetHoldings() > assetsThreshold
- deleted := make([]basics.AssetIndex, 0, len(delta.holdings))
- created := make([]basics.AssetIndex, 0, len(delta.holdings))
- for aidx, action := range delta.holdings {
- if action == ledgercore.ActionDelete {
- deleted = append(deleted, aidx)
- } else {
- created = append(created, aidx)
- }
- }
-
- pad = delta.new
+ created, deleted := filterCreatedDeleted(delta.createdDeletedHoldings, ledgercore.ActionHoldingCreate, ledgercore.ActionHoldingDelete)
+ pad.ExtendedAssetHolding = delta.new.ExtendedAssetHolding
oldPad := delta.old.pad
oldPadRound := delta.old.round
newCount := oldPad.NumAssetHoldings() + len(created) - len(deleted)
if newCount < assetsThreshold {
// Move all assets from groups to Assets field
- assets, _, err := loadHoldings(qabq, oldPad.ExtendedAssetHolding, oldPadRound)
+ assets, err := collapseAssetHoldings(qabq, qaed, oldPad.ExtendedAssetHolding, delta.new.Assets, deleted, oldPadRound)
if err != nil {
return updatedAccounts, err
}
- for _, aidx := range deleted {
- delete(assets, aidx)
- }
- // copy created + deleted from delta.new
- for aidx, holding := range delta.new.Assets {
- assets[aidx] = holding
- }
pad.AccountData.Assets = assets
-
- // now delete all old groups
- for _, g := range oldPad.ExtendedAssetHolding.Groups {
- qaed.Exec(g.AssetGroupKey)
- }
pad.ExtendedAssetHolding = ledgercore.ExtendedAssetHolding{}
} else {
// Reconcile groups data:
@@ -1447,89 +1637,128 @@ func accountsNewUpdate(qabu, qabq, qaeu, qaei, qaed *sql.Stmt, addr basics.Addre
updated := make([]basics.AssetIndex, 0, len(delta.new.Assets))
for aidx := range delta.new.Assets {
- if _, ok := delta.holdings[aidx]; !ok {
+ if _, ok := delta.createdDeletedHoldings[basics.CreatableIndex(aidx)]; !ok {
updated = append(updated, aidx)
}
}
pad.ExtendedAssetHolding = oldPad.ExtendedAssetHolding
if len(updated) > 0 {
- sort.SliceStable(updated, func(i, j int) bool { return updated[i] < updated[j] })
- gi, ai := 0, 0
- for _, aidx := range updated {
- gi, ai = pad.ExtendedAssetHolding.FindAsset(aidx, gi)
- if gi == -1 || ai == -1 {
- return updatedAccounts, fmt.Errorf("failed to find asset group for %d: (%d, %d)", aidx, gi, ai)
- }
- // group data is loaded in accountsLoadOld
- pad.ExtendedAssetHolding.Groups[gi].Update(ai, delta.new.Assets[aidx])
+ err = pad.ExtendedAssetHolding.Update(updated, delta.new.Assets)
+ if err != nil {
+ return updatedAccounts, err
}
}
if len(deleted) > 0 {
- // TODO: possible optimizations:
- // 1. pad.NumAssetHoldings() == len(deleted)
- // 2. deletion of entire group
- sort.SliceStable(deleted, func(i, j int) bool { return deleted[i] < deleted[j] })
- gi, ai := 0, 0
- for _, aidx := range deleted {
- gi, ai = pad.ExtendedAssetHolding.FindAsset(aidx, gi)
- if gi == -1 || ai == -1 {
- return updatedAccounts, fmt.Errorf("failed to find asset group for %d: (%d, %d)", aidx, gi, ai)
- }
- // group data is loaded in accountsLoadOld
- key := pad.ExtendedAssetHolding.Groups[gi].AssetGroupKey
- if pad.ExtendedAssetHolding.Delete(gi, ai) {
- // the only one asset was in the group, delete the group
- _, err = qaed.Exec(key)
- if err != nil {
- return updatedAccounts, err
- }
+ keysToDelete, err := pad.ExtendedAssetHolding.Delete(deleted)
+ if err != nil {
+ return updatedAccounts, err
+ }
+ for _, key := range keysToDelete {
+ _, err = qaed.Exec(key)
+ if err != nil {
+ return updatedAccounts, err
}
}
}
if len(created) > 0 {
- // sort created, they do not exist in old
- sort.SliceStable(created, func(i, j int) bool { return created[i] < created[j] })
pad.ExtendedAssetHolding.Insert(created, delta.new.Assets)
}
loaded, deletedKeys := pad.ExtendedAssetHolding.Merge()
// update DB
- for _, key := range deletedKeys {
- _, err = qaed.Exec(key)
+ err = assetsUpdateGroupDataDB(qaei, qaeu, qaed, &pad.ExtendedAssetHolding, loaded, deletedKeys)
+ pad.AccountData.Assets = nil
+ }
+ }
+
+ // same logic as above but for asset params
+ if delta.old.pad.NumAssetParams() <= assetsThreshold && len(delta.new.AssetParams) <= assetsThreshold {
+ // AccountData assigned above
+ // Do not use delta.assets map of deleted/created since entire AssetParams is being replaced
+ } else if delta.old.pad.NumAssetParams() <= assetsThreshold && len(delta.new.AssetParams) > assetsThreshold {
+ _, deleted := filterCreatedDeleted(delta.createdDeletedParams, ledgercore.ActionParamsCreate, ledgercore.ActionParamsDelete)
+ assets := make(map[basics.AssetIndex]basics.AssetParams, len(delta.old.pad.AssetParams)+len(delta.new.AssetParams)-len(deleted))
+ for aidx, params := range delta.old.pad.AssetParams {
+ assets[aidx] = params
+ }
+ for _, aidx := range deleted {
+ delete(assets, aidx)
+ }
+ for aidx, params := range delta.new.AssetParams {
+ assets[aidx] = params
+ }
+
+ pad.ExtendedAssetParams.ConvertToGroups(assets)
+ pad.AccountData.AssetParams = nil
+
+ // update group data DB table
+ err = modifyAssetGroup(qaei, &pad.ExtendedAssetParams)
+ } else { // default case: delta.old.pad.NumAssetHoldings() > assetsThreshold
+
+ created, deleted := filterCreatedDeleted(delta.createdDeletedParams, ledgercore.ActionParamsCreate, ledgercore.ActionParamsDelete)
+
+ pad.ExtendedAssetParams = delta.new.ExtendedAssetParams
+ oldPad := delta.old.pad
+ oldPadRound := delta.old.round
+
+ newCount := oldPad.NumAssetParams() + len(created) - len(deleted)
+ if newCount < assetsThreshold {
+ // Move all assets from groups to Assets field
+ assets, err := collapseAssetParams(qabq, qaed, oldPad.ExtendedAssetParams, delta.new.AssetParams, deleted, oldPadRound)
+ if err != nil {
+ return updatedAccounts, err
+ }
+ pad.AccountData.AssetParams = assets
+ pad.ExtendedAssetParams = ledgercore.ExtendedAssetParams{}
+ } else {
+ // Reconcile groups data:
+ // identify groups, load, update, then delete and insert, dump to the disk
+
+ updated := make([]basics.AssetIndex, 0, len(delta.new.AssetParams))
+ for aidx := range delta.new.AssetParams {
+ if _, ok := delta.createdDeletedParams[basics.CreatableIndex(aidx)]; !ok {
+ updated = append(updated, aidx)
+ }
+ }
+
+ pad.ExtendedAssetParams = oldPad.ExtendedAssetParams
+ if len(updated) > 0 {
+ err = pad.ExtendedAssetParams.Update(updated, delta.new.AssetParams)
if err != nil {
return updatedAccounts, err
}
}
- var result sql.Result
- for _, i := range loaded {
- if pad.ExtendedAssetHolding.Groups[i].AssetGroupKey != 0 { // existing entry, update
- _, err = qaeu.Exec(
- pad.ExtendedAssetHolding.Groups[i].Encode(),
- pad.ExtendedAssetHolding.Groups[i].AssetGroupKey)
- if err != nil {
- break
- }
- } else {
- // new entry, insert
- result, err = qaei.Exec(pad.ExtendedAssetHolding.Groups[i].Encode())
- if err != nil {
- break
- }
- pad.ExtendedAssetHolding.Groups[i].AssetGroupKey, err = result.LastInsertId()
+ if len(deleted) > 0 {
+ keysToDelete, err := pad.ExtendedAssetParams.Delete(deleted)
+ if err != nil {
+ return updatedAccounts, err
+ }
+ for _, key := range keysToDelete {
+ _, err = qaed.Exec(key)
if err != nil {
- break
+ return updatedAccounts, err
}
}
}
- // reset the cache in old.pad
- pad.AccountData.Assets = nil
+
+ if len(created) > 0 {
+ pad.ExtendedAssetParams.Insert(created, delta.new.AssetParams)
+ }
+
+ loaded, deletedKeys := pad.ExtendedAssetParams.Merge()
+
+ // update DB
+ err = assetsUpdateGroupDataDB(qaei, qaeu, qaed, &pad.ExtendedAssetParams, loaded, deletedKeys)
+ pad.AccountData.AssetParams = nil
}
}
+
+ // update accountbase
if err == nil {
var rowsAffected int64
normBalance := delta.new.NormalizedOnlineBalance(genesisProto)
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 25d44fb54..a0b22ca29 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -44,12 +44,12 @@ import (
type randAccountType int
const (
- simpleAccount randAccountType = iota // only basic AccountData fields
- fullAccount // some applications and assets
- largeAssetHoldingsAccount // like full but 1k+ asset holdings
+ simpleAccount randAccountType = iota // only basic AccountData fields
+ fullAccount // some applications and assets
+ largeAssetAccount // like full but 1k+ asset holdings/params
)
-var assetsThreshold = config.Consensus[protocol.ConsensusV18].MaxAssetsPerAccount
+var testAssetsThreshold = config.Consensus[protocol.ConsensusV18].MaxAssetsPerAccount
func randomAddress() basics.Address {
var addr basics.Address
@@ -88,8 +88,11 @@ func randomFullAccountData(rewardsLevel, lastCreatableID uint64, acctType randAc
data.VoteKeyDilution = crypto.RandUint64()
if 1 == (crypto.RandUint64() % 2) {
// if account has created assets, have these defined.
- data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams)
createdAssetsCount := crypto.RandUint64()%20 + 1
+ if acctType == largeAssetAccount {
+ createdAssetsCount = 1000 + uint64(crypto.RandUint64()%512)
+ }
+ data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, createdAssetsCount)
for i := uint64(0); i < createdAssetsCount; i++ {
ap := basics.AssetParams{
Total: crypto.RandUint64(),
@@ -110,9 +113,8 @@ func randomFullAccountData(rewardsLevel, lastCreatableID uint64, acctType randAc
}
if 1 == (crypto.RandUint64() % 2) {
// if account owns assets
- data.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
ownedAssetsCount := crypto.RandUint64()%20 + 1
- if acctType == largeAssetHoldingsAccount {
+ if acctType == largeAssetAccount {
ownedAssetsCount = 1000 + uint64(crypto.RandUint64()%512)
}
data.Assets = make(map[basics.AssetIndex]basics.AssetHolding, ownedAssetsCount)
@@ -300,13 +302,25 @@ func randomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew
for aidx := range new.Assets {
if _, ok := old.Assets[aidx]; !ok {
// if not in old => created
- updates.SetHoldingDelta(addr, aidx, ledgercore.ActionCreate)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
+ }
+ }
+ for aidx := range new.AssetParams {
+ if _, ok := old.AssetParams[aidx]; !ok {
+ // if not in old => created
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsCreate)
}
}
for aidx := range old.Assets {
if _, ok := new.Assets[aidx]; !ok {
// if not in new => deleted
- updates.SetHoldingDelta(addr, aidx, ledgercore.ActionDelete)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingDelete)
+ }
+ }
+ for aidx := range old.AssetParams {
+ if _, ok := new.AssetParams[aidx]; !ok {
+ // if not in new => deleted
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsDelete)
}
}
}
@@ -397,7 +411,6 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
t.Errorf("unknown status %v", d.Status)
}
}
-
require.NoError(t, err)
for a, pad := range all {
ad := accts[a]
@@ -406,6 +419,11 @@ func checkAccounts(t *testing.T, tx *sql.Tx, rnd basics.Round, accts map[basics.
} else {
require.Equal(t, pad.AccountData.Assets, ad.Assets)
}
+ if pad.ExtendedAssetParams.Count > 0 {
+ require.Equal(t, int(pad.ExtendedAssetParams.Count), len(ad.AssetParams))
+ } else {
+ require.Equal(t, pad.AccountData.AssetParams, ad.AssetParams)
+ }
}
totals, err := accountsTotals(tx, false)
@@ -505,8 +523,8 @@ func creatablesFromUpdates(base map[basics.Address]basics.AccountData, updates l
addr, update := updates.GetByIdx(i)
// no sets in Go, so iterate over
if ad, ok := base[addr]; ok {
- for idx := range ad.Assets {
- if _, ok := update.Assets[idx]; !ok {
+ for idx := range ad.AssetParams {
+ if _, ok := update.AssetParams[idx]; !ok {
creatables[basics.CreatableIndex(idx)] = ledgercore.ModifiedCreatable{
Ctype: basics.AssetCreatable,
Created: false, // exists in base, not in new => deleted
@@ -524,13 +542,13 @@ func creatablesFromUpdates(base map[basics.Address]basics.AccountData, updates l
}
}
}
- for idx := range update.Assets {
+ for idx := range update.AssetParams {
if seen[basics.CreatableIndex(idx)] {
continue
}
ad, found := base[addr]
if found {
- if _, ok := ad.Assets[idx]; !ok {
+ if _, ok := ad.AssetParams[idx]; !ok {
found = false
}
}
@@ -621,6 +639,7 @@ func TestAccountDBRound(t *testing.T) {
fixupOldPad := func(cd compactAccountDeltas) compactAccountDeltas {
for j := range cd.deltas {
cd.deltas[j].new.ExtendedAssetHolding = cd.deltas[j].old.pad.ExtendedAssetHolding
+ cd.deltas[j].new.ExtendedAssetParams = cd.deltas[j].old.pad.ExtendedAssetParams
}
return cd
}
@@ -631,7 +650,7 @@ retry:
for i := 1; i < 10; i++ {
var updates ledgercore.AccountDeltas
var newaccts map[basics.Address]basics.AccountData
- updates, newaccts, _, lastCreatableID = randomDeltasFull(20, accts, 0, lastCreatableID, largeAssetHoldingsAccount)
+ updates, newaccts, _, lastCreatableID = randomDeltasFull(20, accts, 0, lastCreatableID, largeAssetAccount)
tx, err = dbs.Wdb.Handle.Begin()
require.NoError(t, err)
@@ -640,7 +659,7 @@ retry:
aq, err := accountsDbInit(tx, tx)
require.NoError(t, err)
for _, addr := range updates.ModifiedAccounts() {
- hd := updates.GetHoldingDeltas(addr)
+ hd := updates.GetEntityHoldingDeltas(addr)
ad := accts[addr]
dbad, err := lookupFull(dbs.Rdb, addr)
prevEnd := uint64(0)
@@ -652,7 +671,7 @@ retry:
prevEnd = start + g.DeltaMaxAssetIndex
}
require.Equal(t, ad, dbad.pad.AccountData)
- if len(ad.Assets) > assetsThreshold {
+ if len(ad.Assets) > testAssetsThreshold {
for aidx := range ad.Assets {
gi, ai := dbad.pad.ExtendedAssetHolding.FindAsset(aidx, 0)
require.NotEqual(t, -1, gi)
@@ -661,11 +680,11 @@ retry:
}
require.NoError(t, err)
for aidx, action := range hd {
- if action == ledgercore.ActionDelete {
- _, ok := ad.Assets[aidx]
+ if action == ledgercore.ActionHoldingDelete {
+ _, ok := ad.Assets[basics.AssetIndex(aidx)]
require.True(t, ok)
- if len(ad.Assets) > assetsThreshold {
- gi, ai := dbad.pad.ExtendedAssetHolding.FindAsset(aidx, 0)
+ if len(ad.Assets) > testAssetsThreshold {
+ gi, ai := dbad.pad.ExtendedAssetHolding.FindAsset(basics.AssetIndex(aidx), 0)
require.NotEqual(t, -1, gi)
require.NotEqual(t, -1, ai)
}
@@ -680,7 +699,7 @@ retry:
// ensure large holdings were generated
for _, acct := range accts {
- if len(acct.Assets) > assetsThreshold {
+ if len(acct.Assets) > testAssetsThreshold {
largeHoldingsNum++
}
}
@@ -688,7 +707,7 @@ retry:
err = updatesCnt.accountsLoadOld(tx)
require.NoError(t, err)
- // because our rand functions work with AccountData, accountDelta.new does not have ExtendedAssetHolding info.
+ // because our rand functions work with AccountData, accountDelta.new does not have ExtendedAssetHolding/Params info.
// copy it from old
updatesCnt = fixupOldPad(updatesCnt)
@@ -711,6 +730,41 @@ retry:
}
}
+type updater struct {
+ t *testing.T
+ db *sql.DB
+ rnd basics.Round
+ proto config.ConsensusParams
+ baseAccounts lruAccounts
+ isHolding bool
+}
+
+func (u *updater) update(updates ledgercore.AccountDeltas) {
+ tx, err := u.db.Begin()
+ require.NoError(u.t, err)
+
+ updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, u.baseAccounts)
+ err = updatesCnt.accountsLoadOld(tx)
+ require.NoError(u.t, err)
+
+ for j := range updatesCnt.deltas {
+ if u.isHolding {
+ updatesCnt.deltas[j].new.ExtendedAssetHolding = updatesCnt.deltas[j].old.pad.ExtendedAssetHolding
+ } else {
+ updatesCnt.deltas[j].new.ExtendedAssetParams = updatesCnt.deltas[j].old.pad.ExtendedAssetParams
+ }
+ }
+
+ _, err = accountsNewRound(tx, updatesCnt, nil, u.proto, u.rnd)
+ require.NoError(u.t, err)
+ err = updateAccountsRound(tx, u.rnd, 0)
+ require.NoError(u.t, err)
+ err = tx.Commit()
+ require.NoError(u.t, err)
+
+ u.rnd++
+}
+
func TestAccountDBRoundAssetHoldings(t *testing.T) {
// deterministic test for 1000+ holdings:
// select an account, add 256 * 6 holdings, then delete one bucket, and modify others
@@ -730,11 +784,31 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
_, err = initTestAccountDB(tx, accts, proto)
require.NoError(t, err)
checkAccounts(t, tx, 0, accts)
+ err = tx.Commit()
+ require.NoError(t, err)
- // lastCreatableID stores asset or app max used index to get rid of conflicts
var baseAccounts lruAccounts
baseAccounts.init(nil, 100, 80)
- round := basics.Round(1)
+ u := updater{
+ t,
+ dbs.Wdb.Handle,
+ basics.Round(1),
+ proto,
+ baseAccounts,
+ true,
+ }
+
+ genNewAssetHoldings := func(numAssets int, lastCreatableID uint64) map[basics.AssetIndex]basics.AssetHolding {
+ assets := make(map[basics.AssetIndex]basics.AssetHolding, numAssets)
+ for i := 0; i < numAssets; i++ {
+ ah := basics.AssetHolding{
+ Amount: crypto.RandUint64(),
+ Frozen: (crypto.RandUint64()%2 == 0),
+ }
+ assets[basics.AssetIndex(lastCreatableID+uint64(i))] = ah
+ }
+ return assets
+ }
// select some random account
var addr basics.Address
@@ -746,55 +820,73 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
}
require.NotEmpty(t, addr)
- applyUpdate := func(tx *sql.Tx, updates ledgercore.AccountDeltas) {
- updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, baseAccounts)
- err = updatesCnt.accountsLoadOld(tx)
- require.NoError(t, err)
-
- for j := range updatesCnt.deltas {
- updatesCnt.deltas[j].new.ExtendedAssetHolding = updatesCnt.deltas[j].old.pad.ExtendedAssetHolding
- }
+ // lastCreatableID stores asset or app max used index to get rid of conflicts
+ lastCreatableID := 4096 + crypto.RandUint64()%512
- _, err = accountsNewRound(tx, updatesCnt, nil, proto, round)
- require.NoError(t, err)
- err = updateAccountsRound(tx, round, 0)
- require.NoError(t, err)
- round++
+ // ensure transition from Assets to ExtendedAssetHolding works well
+ // add some assets
+ var origOwnedAssetsCount int
+ oldAssets := genNewAssetHoldings(10, lastCreatableID)
+ ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding, len(oldAssets))
+ for aidx, holding := range oldAssets {
+ ad.Assets[aidx] = holding
}
- // remove all the assets first to make predictable assets distribution
+ origOwnedAssetsCount = len(ad.Assets)
var updates ledgercore.AccountDeltas
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
for aidx := range ad.Assets {
- updates.SetHoldingDelta(addr, aidx, ledgercore.ActionDelete)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
+ }
+ u.update(updates)
+
+ // generate at least 1000 assets and ensure all old 10 and new 1000 were transferred into groups
+ lastCreatableID += uint64(len(oldAssets))
+ ad.Assets = genNewAssetHoldings(1500, lastCreatableID)
+ newOwnedAssetsCount := len(ad.Assets)
+ updates = ledgercore.AccountDeltas{}
+ for aidx, holding := range oldAssets {
+ ad.Assets[aidx] = holding
}
- ad.Assets = nil
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
- applyUpdate(tx, updates)
- err = tx.Commit()
+ for aidx := range ad.Assets {
+ if _, ok := oldAssets[aidx]; !ok {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
+ }
+ }
+ u.update(updates)
+
+ dbad, err := lookupFull(dbs.Rdb, addr)
require.NoError(t, err)
+ require.Equal(t, origOwnedAssetsCount+newOwnedAssetsCount, len(dbad.pad.AccountData.Assets))
+ require.NotEmpty(t, dbad.pad.ExtendedAssetHolding)
+ require.Equal(t, origOwnedAssetsCount+newOwnedAssetsCount, int(dbad.pad.ExtendedAssetHolding.Count))
+
+ // remove all the assets first to make predictable assets distribution
+ updates = ledgercore.AccountDeltas{}
+ for aidx := range dbad.pad.Assets {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingDelete)
+ }
+ ad.Assets = nil
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ u.update(updates)
// verify removal
- require.NoError(t, err)
- dbad, err := lookupFull(dbs.Rdb, addr)
+ dbad, err = lookupFull(dbs.Rdb, addr)
require.NoError(t, err)
require.Empty(t, dbad.pad.AccountData.Assets)
require.Empty(t, dbad.pad.ExtendedAssetHolding)
// create 6 holding groups
- tx, err = dbs.Wdb.Handle.Begin()
- require.NoError(t, err)
-
holdingsNum := ledgercore.MaxHoldingGroupSize * 6
updates = ledgercore.AccountDeltas{}
ad = dbad.pad.AccountData
ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding, holdingsNum)
for aidx := 1; aidx <= holdingsNum; aidx++ {
ad.Assets[basics.AssetIndex(aidx)] = basics.AssetHolding{Amount: uint64(aidx)}
- updates.SetHoldingDelta(addr, basics.AssetIndex(aidx), ledgercore.ActionCreate)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
}
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
- applyUpdate(tx, updates)
- err = tx.Commit()
- require.NoError(t, err)
+ u.update(updates)
// verify creation
dbad, err = lookupFull(dbs.Rdb, addr)
@@ -804,14 +896,11 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
require.Equal(t, 6, len(dbad.pad.ExtendedAssetHolding.Groups))
// completely remove group 1, remove 32 assets from all other groups, update 32 other assets
- tx, err = dbs.Wdb.Handle.Begin()
- require.NoError(t, err)
-
updates = ledgercore.AccountDeltas{}
ad = dbad.pad.AccountData
for aidx := ledgercore.MaxHoldingGroupSize + 1; aidx <= 2*ledgercore.MaxHoldingGroupSize; aidx++ {
delete(ad.Assets, basics.AssetIndex(aidx))
- updates.SetHoldingDelta(addr, basics.AssetIndex(aidx), ledgercore.ActionDelete)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingDelete)
}
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
for _, gi := range []int{0, 2, 3, 4, 5} {
@@ -824,20 +913,18 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
rand.Shuffle(ledgercore.MaxHoldingGroupSize, func(i, j int) { seq[i], seq[j] = seq[j], seq[i] })
for _, aidx := range seq[:32] {
delete(ad.Assets, basics.AssetIndex(aidx))
- updates.SetHoldingDelta(addr, basics.AssetIndex(aidx), ledgercore.ActionDelete)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingDelete)
}
for _, aidx := range seq[32:64] {
ad.Assets[basics.AssetIndex(aidx)] = basics.AssetHolding{Amount: uint64(aidx * 10)}
}
- // remove reset from ad.Assets since they are not in the update
+ // remove rest from ad.Assets since they are not in the update
for _, aidx := range seq[64:] {
delete(ad.Assets, basics.AssetIndex(aidx))
}
}
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
- applyUpdate(tx, updates)
- err = tx.Commit()
- require.NoError(t, err)
+ u.update(updates)
// verify update
dbad, err = lookupFull(dbs.Rdb, addr)
@@ -864,20 +951,16 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
}
// create a new group
- tx, err = dbs.Wdb.Handle.Begin()
- require.NoError(t, err)
updates = ledgercore.AccountDeltas{}
ad = dbad.pad.AccountData
ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding, ledgercore.MaxHoldingGroupSize)
for aidx := 6*ledgercore.MaxHoldingGroupSize + 1; aidx <= 7*ledgercore.MaxHoldingGroupSize; aidx++ {
- updates.SetHoldingDelta(addr, basics.AssetIndex(aidx), ledgercore.ActionCreate)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
ad.Assets[basics.AssetIndex(aidx)] = basics.AssetHolding{Amount: uint64(aidx)}
}
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
- applyUpdate(tx, updates)
- err = tx.Commit()
- require.NoError(t, err)
+ u.update(updates)
// verify creation
dbad, err = lookupFull(dbs.Rdb, addr)
@@ -908,9 +991,6 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
}
// delete groups 0, 2, 4 and ensure holdins collapse back to ad.Assets
- tx, err = dbs.Wdb.Handle.Begin()
- require.NoError(t, err)
-
updates = ledgercore.AccountDeltas{}
ad = dbad.pad.AccountData
for _, gi := range []int{0, 2, 4} {
@@ -918,14 +998,12 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
end := (gi + 1) * ledgercore.MaxHoldingGroupSize
for aidx := start; aidx <= end; aidx++ {
delete(ad.Assets, basics.AssetIndex(aidx))
- updates.SetHoldingDelta(addr, basics.AssetIndex(aidx), ledgercore.ActionDelete)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingDelete)
}
}
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
- applyUpdate(tx, updates)
- err = tx.Commit()
- require.NoError(t, err)
+ u.update(updates)
// check removal
dbad1, err := lookupFull(dbs.Rdb, addr)
@@ -934,14 +1012,259 @@ func TestAccountDBRoundAssetHoldings(t *testing.T) {
require.Empty(t, dbad1.pad.ExtendedAssetHolding)
// delete the account
- tx, err = dbs.Wdb.Handle.Begin()
- require.NoError(t, err)
-
updates = ledgercore.AccountDeltas{}
updates.Upsert(addr, ledgercore.PersistedAccountData{})
- applyUpdate(tx, updates)
+ u.update(updates)
+ dbad, err = lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ require.Empty(t, dbad.pad)
+}
+
+func TestAccountDBRoundAssetParams(t *testing.T) {
+ // deterministic test for 1000+ params:
+ // select an account, add 14 * 100 params, then delete one bucket, and modify others
+ // ensure all params match expectations
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ dbs, _ := dbOpenTest(t, true)
+ setDbLogging(t, dbs)
+ defer dbs.Close()
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ accts := randomAccounts(20, true)
+ _, err = initTestAccountDB(tx, accts, proto)
+ require.NoError(t, err)
+ checkAccounts(t, tx, 0, accts)
err = tx.Commit()
require.NoError(t, err)
+
+ var baseAccounts lruAccounts
+ baseAccounts.init(nil, 100, 80)
+ u := updater{
+ t,
+ dbs.Wdb.Handle,
+ basics.Round(1),
+ proto,
+ baseAccounts,
+ false,
+ }
+
+ genNewAssetParams := func(numAssets int, lastCreatableID uint64) map[basics.AssetIndex]basics.AssetParams {
+ assets := make(map[basics.AssetIndex]basics.AssetParams, numAssets)
+ for i := 0; i < numAssets; i++ {
+ ap := basics.AssetParams{
+ Total: crypto.RandUint64(),
+ DefaultFrozen: (crypto.RandUint64()%2 == 0),
+ }
+ assets[basics.AssetIndex(lastCreatableID+uint64(i))] = ap
+ }
+ return assets
+ }
+
+ // select some random account
+ var addr basics.Address
+ var ad basics.AccountData
+ for a, data := range accts {
+ addr = a
+ ad = data
+ break
+ }
+ require.NotEmpty(t, addr)
+
+ // lastCreatableID stores asset or app max used index to get rid of conflicts
+ lastCreatableID := 4096 + crypto.RandUint64()%512
+
+ // ensure transition from AssetParams to ExtendedAssetParams works well
+ // add some assets
+ var origOwnedAssetsCount int
+ oldAssets := genNewAssetParams(10, lastCreatableID)
+ ad.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, len(oldAssets))
+ for aidx, params := range oldAssets {
+ ad.AssetParams[aidx] = params
+ }
+ origOwnedAssetsCount = len(ad.AssetParams)
+ var updates ledgercore.AccountDeltas
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ for aidx := range ad.AssetParams {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsCreate)
+ }
+ u.update(updates)
+
+ // generate at least 1000 assets and ensure all old 10 and new 1000 were transferred into groups
+ lastCreatableID += uint64(len(oldAssets))
+ ad.AssetParams = genNewAssetParams(1500, lastCreatableID)
+ newOwnedAssetsCount := len(ad.AssetParams)
+ updates = ledgercore.AccountDeltas{}
+ for aidx, params := range oldAssets {
+ ad.AssetParams[aidx] = params
+ }
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ for aidx := range ad.AssetParams {
+ if _, ok := oldAssets[aidx]; !ok {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsCreate)
+ }
+ }
+ u.update(updates)
+
+ dbad, err := lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ require.Equal(t, origOwnedAssetsCount+newOwnedAssetsCount, len(dbad.pad.AccountData.AssetParams))
+ require.NotEmpty(t, dbad.pad.ExtendedAssetParams)
+ require.Equal(t, origOwnedAssetsCount+newOwnedAssetsCount, int(dbad.pad.ExtendedAssetParams.Count))
+
+ // remove all the assets first to make predictable assets distribution
+ updates = ledgercore.AccountDeltas{}
+ for aidx := range dbad.pad.AssetParams {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsDelete)
+ }
+ ad.AssetParams = nil
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ u.update(updates)
+
+ // verify removal
+ require.NoError(t, err)
+ dbad, err = lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ require.Empty(t, dbad.pad.AccountData.AssetParams)
+ require.Empty(t, dbad.pad.ExtendedAssetParams)
+
+ // create 140 holding groups
+ const numGroups = 140
+ paramsNum := ledgercore.MaxParamsGroupSize * numGroups
+ updates = ledgercore.AccountDeltas{}
+ ad = dbad.pad.AccountData
+ ad.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, paramsNum)
+ for aidx := 1; aidx <= paramsNum; aidx++ {
+ ad.AssetParams[basics.AssetIndex(aidx)] = basics.AssetParams{Total: uint64(aidx)}
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsCreate)
+ }
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ u.update(updates)
+
+ // verify creation
+ dbad, err = lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ require.Equal(t, paramsNum, len(dbad.pad.AccountData.AssetParams))
+ require.Equal(t, paramsNum, int(dbad.pad.ExtendedAssetParams.Count))
+ require.Equal(t, numGroups, len(dbad.pad.ExtendedAssetParams.Groups))
+
+ // completely remove group 1,
+ // remove ledgercore.MaxParamsGroupSize/8 assets from all other groups,
+ // update ledgercore.MaxParamsGroupSize/8 in all other groups
+ updates = ledgercore.AccountDeltas{}
+ ad = dbad.pad.AccountData
+ for aidx := ledgercore.MaxParamsGroupSize + 1; aidx <= 2*ledgercore.MaxParamsGroupSize; aidx++ {
+ delete(ad.AssetParams, basics.AssetIndex(aidx))
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsDelete)
+ }
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ for gi := 0; gi < numGroups; gi++ {
+ if gi == 1 {
+ // skip group 1, processed above
+ continue
+ }
+ start := gi*ledgercore.MaxParamsGroupSize + 1
+ end := (gi + 1) * ledgercore.MaxParamsGroupSize
+ seq := make([]int, 0, ledgercore.MaxParamsGroupSize)
+ for i := start; i <= end; i++ {
+ seq = append(seq, i)
+ }
+ rand.Shuffle(ledgercore.MaxParamsGroupSize, func(i, j int) { seq[i], seq[j] = seq[j], seq[i] })
+ for _, aidx := range seq[:ledgercore.MaxParamsGroupSize/8] {
+ delete(ad.AssetParams, basics.AssetIndex(aidx))
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsDelete)
+ }
+ for _, aidx := range seq[ledgercore.MaxParamsGroupSize/8 : ledgercore.MaxParamsGroupSize/4] {
+ ad.AssetParams[basics.AssetIndex(aidx)] = basics.AssetParams{Total: uint64(aidx * 10)}
+ }
+ // remove from ad.AssetParams since they are not in the update
+ for _, aidx := range seq[ledgercore.MaxParamsGroupSize/4:] {
+ delete(ad.AssetParams, basics.AssetIndex(aidx))
+ }
+ }
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ u.update(updates)
+
+ // verify update
+ dbad, err = lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ expectedNumParams := paramsNum - ledgercore.MaxParamsGroupSize - (numGroups-1)*(ledgercore.MaxParamsGroupSize/8)
+ require.Equal(t, expectedNumParams, len(dbad.pad.AccountData.AssetParams))
+ require.Equal(t, len(dbad.pad.AccountData.AssetParams), int(dbad.pad.ExtendedAssetParams.Count))
+ require.Equal(t, expectedNumParams/ledgercore.MaxParamsGroupSize+1, len(dbad.pad.ExtendedAssetParams.Groups))
+ for gi := 0; gi < len(dbad.pad.ExtendedAssetParams.Groups); gi++ {
+ g := dbad.pad.ExtendedAssetParams.Groups[gi]
+ aidx := g.MinAssetIndex
+ for ai, offset := range g.TestGetGroupData().AssetOffsets {
+ h := g.GetParams(ai)
+ aidx += offset
+ require.True(t, h.Total == uint64(aidx) || h.Total == uint64(aidx*10))
+ require.GreaterOrEqual(t, uint64(aidx), uint64(g.MinAssetIndex))
+ require.LessOrEqual(t, uint64(aidx), uint64(g.MinAssetIndex)+g.DeltaMaxAssetIndex)
+ }
+ }
+
+ // create a new group
+ updates = ledgercore.AccountDeltas{}
+ ad = dbad.pad.AccountData
+ ad.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, ledgercore.MaxParamsGroupSize)
+ for aidx := numGroups*ledgercore.MaxParamsGroupSize + 1; aidx <= (numGroups+1)*ledgercore.MaxParamsGroupSize; aidx++ {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsCreate)
+ ad.AssetParams[basics.AssetIndex(aidx)] = basics.AssetParams{Total: uint64(aidx)}
+ }
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ u.update(updates)
+
+ // verify creation
+ dbad, err = lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ require.Equal(t, expectedNumParams+ledgercore.MaxParamsGroupSize, len(dbad.pad.AccountData.AssetParams))
+ require.Equal(t, len(dbad.pad.AccountData.AssetParams), int(dbad.pad.ExtendedAssetParams.Count))
+ require.Equal(t, expectedNumParams/ledgercore.MaxParamsGroupSize+2, len(dbad.pad.ExtendedAssetParams.Groups))
+ for gi := 0; gi < len(dbad.pad.ExtendedAssetParams.Groups); gi++ {
+ g := dbad.pad.ExtendedAssetParams.Groups[gi]
+ aidx := g.MinAssetIndex
+ for ai, offset := range g.TestGetGroupData().AssetOffsets {
+ h := g.GetParams(ai)
+ aidx += offset
+ require.True(t, h.Total == uint64(aidx) || h.Total == uint64(aidx*10))
+ require.GreaterOrEqual(t, uint64(aidx), uint64(g.MinAssetIndex))
+ require.LessOrEqual(t, uint64(aidx), uint64(g.MinAssetIndex)+g.DeltaMaxAssetIndex)
+ }
+ gi++
+ }
+
+ // delete half of groups and ensure holdins collapse back to ad.AssetParams
+ updates = ledgercore.AccountDeltas{}
+ ad = dbad.pad.AccountData
+ for gi := 0; gi < len(dbad.pad.ExtendedAssetParams.Groups); gi += 2 {
+ g := dbad.pad.ExtendedAssetParams.Groups[gi]
+ aidx := g.MinAssetIndex
+ for _, offset := range g.TestGetGroupData().AssetOffsets {
+ aidx += offset
+ delete(ad.AssetParams, basics.AssetIndex(aidx))
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionParamsDelete)
+ }
+ }
+
+ updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ u.update(updates)
+
+ // check removal
+ dbad1, err := lookupFull(dbs.Rdb, addr)
+ require.NoError(t, err)
+ expectedNumParams = expectedNumParams + ledgercore.MaxParamsGroupSize - (len(dbad.pad.ExtendedAssetParams.Groups)/2*ledgercore.MaxParamsGroupSize + 1)
+ require.Equal(t, expectedNumParams, len(dbad1.pad.AccountData.AssetParams))
+ require.Empty(t, dbad1.pad.ExtendedAssetParams)
+
+ // delete the account
+ updates = ledgercore.AccountDeltas{}
+ updates.Upsert(addr, ledgercore.PersistedAccountData{})
+ u.update(updates)
dbad, err = lookupFull(dbs.Rdb, addr)
require.NoError(t, err)
require.Empty(t, dbad.pad)
@@ -1157,7 +1480,7 @@ func benchmarkInitBalances(b testing.TB, numAccounts int, dbs db.Pair, proto con
for _, aidx := range aidxs[:numHoldings] {
if _, ok := ad.Assets[aidx]; !ok {
ad.Assets[aidx] = basics.AssetHolding{Amount: uint64(aidx), Frozen: true}
- updates.SetHoldingDelta(addr, aidx, ledgercore.ActionCreate)
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
}
}
updates.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
@@ -1268,7 +1591,8 @@ func benchLoadHolding(b *testing.B, qs *accountsDbQueries, dbad dbAccountData, a
require.NotEqual(b, -1, gi)
require.Equal(b, -1, ai)
var err error
- _, dbad.pad.ExtendedAssetHolding.Groups[gi], _, err = loadHoldingGroup(qs.loadAccountGroupDataStmt, dbad.pad.ExtendedAssetHolding.Groups[gi], nil)
+ fetcher := makeAssetFetcher(qs.loadAccountGroupDataStmt)
+ _, err = dbad.pad.ExtendedAssetHolding.Groups[gi].Fetch(fetcher, nil)
require.NoError(b, err)
_, ai = dbad.pad.ExtendedAssetHolding.FindAsset(aidx, gi)
require.NotEqual(b, -1, ai)
@@ -1301,7 +1625,7 @@ func benchmarkReadingRandomBalances(b *testing.B, inMemory bool, maxHoldingsPerA
for _, addr := range addrs {
dbad, err := qs.lookup(addr)
require.NoError(b, err)
- if !simple && len(accounts[addr].Assets) > assetsThreshold {
+ if !simple && len(accounts[addr].Assets) > testAssetsThreshold {
for aidx := range accounts[addr].Assets {
h := benchLoadHolding(b, qs, dbad, aidx)
require.NotEmpty(b, h)
@@ -1327,6 +1651,7 @@ func BenchmarkReadingRandomBalancesDiskLarge(b *testing.B) {
}{
{1, true},
{512, true},
+ {1000, true},
{2000, false},
{5000, false},
{10000, false},
@@ -1339,6 +1664,82 @@ func BenchmarkReadingRandomBalancesDiskLarge(b *testing.B) {
}
}
+func benchmarkWritingRandomBalances(b *testing.B, inMemory bool, maxHoldingsPerAccount int, largeAccountsRatio int, simple bool) {
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+ dbs, fn := dbOpenTest(b, inMemory)
+ setDbLogging(b, dbs)
+ defer cleanupTestDb(dbs, fn, inMemory)
+
+ accounts := benchmarkInitBalances(b, b.N, dbs, proto, maxHoldingsPerAccount, largeAccountsRatio)
+
+ qs, err := accountsDbInit(dbs.Rdb.Handle, dbs.Wdb.Handle)
+ require.NoError(b, err)
+
+ // read all the balances in the database, shuffled
+ addrs := make([]basics.Address, len(accounts))
+ pos := 0
+ for addr := range accounts {
+ addrs[pos] = addr
+ pos++
+ }
+ rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+
+ // only measure the actual insertion time
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ var baseAccounts lruAccounts
+ baseAccounts.init(nil, 100, 80)
+ round := basics.Round(1)
+ for _, addr := range addrs {
+ tx, err := dbs.Wdb.Handle.Begin()
+ require.NoError(b, err)
+ aidx := basics.AssetIndex(crypto.RandUint64() % uint64((len(addrs) * 10)))
+ dbad, err := qs.lookup(addr)
+ require.NoError(b, err)
+ if dbad.pad.Assets == nil {
+ dbad.pad.Assets = make(map[basics.AssetIndex]basics.AssetHolding)
+ }
+ dbad.pad.Assets[aidx] = basics.AssetHolding{Amount: uint64(aidx)}
+ updates := ledgercore.AccountDeltas{}
+ updates.Upsert(addr, dbad.pad)
+ if dbad.pad.ExtendedAssetHolding.Count > uint32(testAssetsThreshold) {
+ updates.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
+ }
+
+ updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, baseAccounts)
+ err = updatesCnt.accountsLoadOld(tx)
+ require.NoError(b, err)
+ err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, proto)
+ require.NoError(b, err)
+ _, err = accountsNewRound(tx, updatesCnt, nil, proto, round)
+ require.NoError(b, err)
+ err = updateAccountsRound(tx, round, 0)
+ require.NoError(b, err)
+ tx.Rollback()
+ }
+ }
+}
+
+func BenchmarkWritingRandomBalancesDiskLarge(b *testing.B) {
+ var tests = []struct {
+ numHoldings int
+ simple bool
+ }{
+ {1, true},
+ {512, true},
+ {1000, true},
+ {2000, false},
+ {5000, false},
+ {10000, false},
+ {100000, false},
+ }
+ for _, t := range tests {
+ b.Run(fmt.Sprintf("holdings=%d simple=%v", t.numHoldings, t.simple), func(b *testing.B) {
+ benchmarkWritingRandomBalances(b, false, t.numHoldings, 10, t.simple)
+ })
+ }
+}
+
func BenchmarkWritingRandomBalancesDisk(b *testing.B) {
totalStartupAccountsNumber := 5000000
batchCount := 1000
@@ -1503,7 +1904,7 @@ func benchmarkAcctUpdateLarge(b *testing.B, maxHoldingsPerAccount int, largeHold
encodedPad := protocol.Encode(&dbad.pad)
var numToUpdate int
var gdu []groupDataUpdate
- if len(dbad.pad.Assets) > assetsThreshold {
+ if len(dbad.pad.Assets) > testAssetsThreshold {
numToUpdate = assetUpdateRatio * len(dbad.pad.Assets) / 100
gdu = make([]groupDataUpdate, numToUpdate, numToUpdate)
k := 0
@@ -1523,7 +1924,7 @@ func benchmarkAcctUpdateLarge(b *testing.B, maxHoldingsPerAccount int, largeHold
_, err = acctUpdateStmt.Exec(dbad.pad.MicroAlgos.Raw, encodedPad, dbad.rowid)
require.NoError(b, err)
- if len(dbad.pad.Assets) > assetsThreshold {
+ if len(dbad.pad.Assets) > testAssetsThreshold {
for _, entry := range gdu {
_, err = gdUpdateStmt.Exec(entry.data, entry.gi)
}
@@ -1893,7 +2294,7 @@ func TestAccountsNewCRUD(t *testing.T) {
var createDeleteTests = []struct {
count int
}{
- {0}, {1}, {assetsThreshold + 1},
+ {0}, {1}, {testAssetsThreshold + 1},
}
temp := randomAccountData(100)
@@ -2046,7 +2447,7 @@ func TestAccountsNewCRUD(t *testing.T) {
// case 2)
// now create additional 1000 assets to exceed assetsThreshold
- numNewAssets2 := assetsThreshold
+ numNewAssets2 := testAssetsThreshold
updated = basics.AccountData{}
updated.Assets = make(map[basics.AssetIndex]basics.AssetHolding, numBaseAssets+numNewAssets1+numNewAssets2)
savedAssets := make(map[basics.AssetIndex]bool, numBaseAssets+numNewAssets1+numNewAssets2)
@@ -2114,7 +2515,7 @@ func TestAccountsNewCRUD(t *testing.T) {
// case 3.1)
// len(old.Assets) > assetsThreshold
// new count > assetsThreshold => delete, update, create few
- a.GreaterOrEqual(assetsThreshold, 1000)
+ a.GreaterOrEqual(testAssetsThreshold, 1000)
del := []basics.AssetIndex{1, 2, 3, 10, 2900}
upd := []basics.AssetIndex{4, 5, 2999}
crt := []basics.AssetIndex{9001, 9501}
@@ -2125,7 +2526,10 @@ func TestAccountsNewCRUD(t *testing.T) {
a.NotEqual(-1, gi, aidx)
g := &old.pad.ExtendedAssetHolding.Groups[gi]
if !g.Loaded() {
- groupData, _, err := loadHoldingGroupData(qs.loadAccountGroupDataStmt, g.AssetGroupKey)
+ buf, _, err := loadGroupData(qs.loadAccountGroupDataStmt, g.AssetGroupKey)
+ a.NoError(err)
+ var groupData ledgercore.AssetsHoldingGroupData
+ err = protocol.Decode(buf, &groupData)
a.NoError(err)
g.Load(groupData)
loaded[gi] = true
@@ -2136,14 +2540,14 @@ func TestAccountsNewCRUD(t *testing.T) {
old.pad.Assets[aidx] = g.GetHolding(ai)
}
- deltaHoldings := make(map[basics.AssetIndex]ledgercore.HoldingAction, len(del)+len(crt))
+ deltaHoldings := make(ledgercore.EntityDelta, len(del)+len(crt))
for _, aidx := range del {
delete(savedAssets, aidx)
- deltaHoldings[aidx] = ledgercore.ActionDelete
+ deltaHoldings[basics.CreatableIndex(aidx)] = ledgercore.ActionHoldingDelete
}
for _, aidx := range crt {
savedAssets[aidx] = true
- deltaHoldings[aidx] = ledgercore.ActionCreate
+ deltaHoldings[basics.CreatableIndex(aidx)] = ledgercore.ActionHoldingCreate
}
updated = basics.AccountData{}
@@ -2156,9 +2560,9 @@ func TestAccountsNewCRUD(t *testing.T) {
}
delta = accountDelta{
- old: old,
- new: ledgercore.PersistedAccountData{AccountData: updated, ExtendedAssetHolding: old.pad.ExtendedAssetHolding},
- holdings: deltaHoldings,
+ old: old,
+ new: ledgercore.PersistedAccountData{AccountData: updated, ExtendedAssetHolding: old.pad.ExtendedAssetHolding},
+ createdDeletedHoldings: deltaHoldings,
}
updatedAccounts, err = accountsNewUpdate(
@@ -2222,7 +2626,10 @@ func TestAccountsNewCRUD(t *testing.T) {
for gi := range old.pad.ExtendedAssetHolding.Groups {
g := &old.pad.ExtendedAssetHolding.Groups[gi]
if !g.Loaded() {
- groupData, _, err := loadHoldingGroupData(qs.loadAccountGroupDataStmt, g.AssetGroupKey)
+ buf, _, err := loadGroupData(qs.loadAccountGroupDataStmt, g.AssetGroupKey)
+ var groupData ledgercore.AssetsHoldingGroupData
+ err = protocol.Decode(buf, &groupData)
+ a.NoError(err)
a.NoError(err)
g.Load(groupData)
}
@@ -2260,22 +2667,22 @@ func TestAccountsNewCRUD(t *testing.T) {
updated = basics.AccountData{}
updated.Assets = make(map[basics.AssetIndex]basics.AssetHolding, len(upd)+len(crt))
- deltaHoldings = make(map[basics.AssetIndex]ledgercore.HoldingAction, len(del)+len(crt))
+ deltaHoldings = make(ledgercore.EntityDelta, len(del)+len(crt))
for _, aidx := range del {
- deltaHoldings[aidx] = ledgercore.ActionDelete
+ deltaHoldings[basics.CreatableIndex(aidx)] = ledgercore.ActionHoldingDelete
}
for _, aidx := range upd {
updated.Assets[aidx] = old.pad.Assets[aidx]
}
for _, aidx := range crt {
updated.Assets[aidx] = basics.AssetHolding{Amount: uint64(aidx), Frozen: true}
- deltaHoldings[aidx] = ledgercore.ActionCreate
+ deltaHoldings[basics.CreatableIndex(aidx)] = ledgercore.ActionHoldingCreate
}
delta = accountDelta{
- old: old,
- new: ledgercore.PersistedAccountData{AccountData: updated, ExtendedAssetHolding: old.pad.ExtendedAssetHolding},
- holdings: deltaHoldings,
+ old: old,
+ new: ledgercore.PersistedAccountData{AccountData: updated, ExtendedAssetHolding: old.pad.ExtendedAssetHolding},
+ createdDeletedHoldings: deltaHoldings,
}
updatedAccounts, err = accountsNewUpdate(
@@ -2423,3 +2830,103 @@ func TestLoadHolding(t *testing.T) {
a.Error(err)
a.IsType(&MismatchingDatabaseRoundError{}, err)
}
+
+func TestMakeCompactAccountDeltas(t *testing.T) {
+ a := require.New(t)
+ addr := randomAddress()
+
+ var baseAccounts lruAccounts
+ baseAccounts.init(nil, 100, 80)
+ baseAccounts.write(dbAccountData{
+ addr: addr,
+ pad: ledgercore.PersistedAccountData{
+ AccountData: basics.AccountData{
+ Assets: map[basics.AssetIndex]basics.AssetHolding{
+ 1: {},
+ 2: {},
+ },
+ AssetParams: map[basics.AssetIndex]basics.AssetParams{
+ 10: {},
+ 20: {},
+ },
+ },
+ },
+ })
+
+ var updates1 ledgercore.AccountDeltas
+ updates1.Upsert(addr, ledgercore.PersistedAccountData{
+ AccountData: basics.AccountData{
+ Assets: map[basics.AssetIndex]basics.AssetHolding{
+ 1: {},
+ 2: {},
+ 3: {},
+ },
+ AssetParams: map[basics.AssetIndex]basics.AssetParams{
+ 10: {},
+ 20: {},
+ 30: {},
+ },
+ },
+ })
+ updates1.SetEntityDelta(addr, 3, ledgercore.ActionHoldingCreate)
+ updates1.SetEntityDelta(addr, 30, ledgercore.ActionParamsCreate)
+
+ var updates2 ledgercore.AccountDeltas
+ updates2.Upsert(addr, ledgercore.PersistedAccountData{
+ AccountData: basics.AccountData{
+ Assets: map[basics.AssetIndex]basics.AssetHolding{
+ 2: {},
+ 3: {},
+ 4: {},
+ },
+ AssetParams: map[basics.AssetIndex]basics.AssetParams{
+ 20: {},
+ 30: {},
+ 40: {},
+ },
+ },
+ })
+ updates2.SetEntityDelta(addr, 4, ledgercore.ActionHoldingCreate)
+ updates2.SetEntityDelta(addr, 40, ledgercore.ActionParamsCreate)
+ updates2.SetEntityDelta(addr, 1, ledgercore.ActionHoldingDelete)
+ updates2.SetEntityDelta(addr, 10, ledgercore.ActionParamsDelete)
+
+ cd := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates1, updates2}, baseAccounts)
+
+ a.Equal(0, len(cd.misses))
+ a.Equal(1, len(cd.cache))
+ a.Contains(cd.cache, addr)
+ a.Equal(1, len(cd.addresses))
+ a.Equal(addr, cd.addresses[0])
+ a.Equal(1, len(cd.deltas))
+ d := cd.deltas[0]
+ a.Equal(addr, d.old.addr)
+
+ ad := basics.AccountData{
+ Assets: map[basics.AssetIndex]basics.AssetHolding{
+ 2: {},
+ 3: {},
+ 4: {},
+ },
+ AssetParams: map[basics.AssetIndex]basics.AssetParams{
+ 20: {},
+ 30: {},
+ 40: {},
+ },
+ }
+ a.Equal(ad, d.new.AccountData)
+
+ holdingsDelta := ledgercore.EntityDelta{
+ 1: ledgercore.ActionHoldingDelete,
+ 3: ledgercore.ActionHoldingCreate,
+ 4: ledgercore.ActionHoldingCreate,
+ }
+ a.Equal(holdingsDelta, d.createdDeletedHoldings)
+
+ paramsDelta := ledgercore.EntityDelta{
+ 10: ledgercore.ActionParamsDelete,
+ 30: ledgercore.ActionParamsCreate,
+ 40: ledgercore.ActionParamsCreate,
+ }
+ a.Equal(paramsDelta, d.createdDeletedParams)
+}
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index bc5e427db..deadfce70 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -422,10 +422,10 @@ func (au *accountUpdates) LookupWithoutRewards(rnd basics.Round, addr basics.Add
return au.lookupWithoutRewards(rnd, addr, true /* take lock*/, nil)
}
-// LookupHoldingWithoutRewards returns the account data for a given address at a given round
+// LookupCreatableDataWithoutRewards returns the account data for a given address at a given round
// with looking for the specified holding/local state in extension table(s)
-func (au *accountUpdates) LookupHoldingWithoutRewards(rnd basics.Round, addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (pad ledgercore.PersistedAccountData, err error) {
- return au.lookupHoldingWithoutRewards(rnd, addr, cidx, ctype, true)
+func (au *accountUpdates) LookupCreatableDataWithoutRewards(rnd basics.Round, addr basics.Address, locators []creatableDataLocator) (pad ledgercore.PersistedAccountData, err error) {
+ return au.lookupCreatableDataWithoutRewards(rnd, addr, locators, true)
}
// ListAssets lists the assets by their asset index, limiting to the first maxResults
@@ -944,10 +944,10 @@ func (aul *accountUpdatesLedgerEvaluator) lookupWithoutRewards(rnd basics.Round,
return aul.au.lookupWithoutRewards(rnd, addr, false /*don't sync*/, nil)
}
-// lookupHoldingWithoutRewards returns the account data for a given address at a given round
+// lookupCreatableDataWithoutRewards returns the account data for a given address at a given round
// with looking for the specified holding/local state in extension table(s)
-func (aul *accountUpdatesLedgerEvaluator) lookupHoldingWithoutRewards(rnd basics.Round, addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (pad ledgercore.PersistedAccountData, err error) {
- return aul.au.lookupHoldingWithoutRewards(rnd, addr, cidx, ctype, false)
+func (aul *accountUpdatesLedgerEvaluator) lookupCreatableDataWithoutRewards(rnd basics.Round, addr basics.Address, locators []creatableDataLocator) (pad ledgercore.PersistedAccountData, err error) {
+ return aul.au.lookupCreatableDataWithoutRewards(rnd, addr, locators, false)
}
// GetCreatorForRound returns the asset/app creator for a given asset/app index at a given round
@@ -1859,22 +1859,50 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
}
// remove deleted assets that might come from loadHoldings
- mods := delta.GetHoldingDeltas(addr)
- for aidx, action := range mods {
- if action == ledgercore.ActionDelete {
- delete(assets, aidx)
+ mods := delta.GetEntityHoldingDeltas(addr)
+ for cidx, action := range mods {
+ if action == ledgercore.ActionHoldingDelete {
+ delete(assets, basics.AssetIndex(cidx))
}
}
-
data.Assets = assets
+
+ var params map[basics.AssetIndex]basics.AssetParams
+ params, data.ExtendedAssetParams, err = au.accountsq.loadParams(data.ExtendedAssetParams, 0)
+ if err != nil {
+ return ledgercore.PersistedAccountData{}, err
+ }
+ assetParams := make(map[basics.AssetIndex]basics.AssetParams, len(data.AssetParams)+len(params))
+
+ // apply loaded extended params
+ for aidx, param := range params {
+ assetParams[aidx] = param
+ }
+ // apply deltas
+ for aidx, param := range data.AssetParams {
+ assetParams[aidx] = param
+ }
+
+ // remove deleted assets that might come from loadParams
+ mods = delta.GetEntityParamsDeltas(addr)
+ for cidx, action := range mods {
+ if action == ledgercore.ActionParamsDelete {
+ delete(assetParams, basics.AssetIndex(cidx))
+ }
+ }
+ data.AssetParams = assetParams
+
return *data, nil
}
// Check if this is the most recent round, in which case, we can
// use a cache of the most recent account state.
if offset == uint64(len(au.deltas)) {
- if full && macct.data.ExtendedAssetHolding.Count != 0 {
+ if full && (macct.data.ExtendedAssetHolding.Count != 0 || macct.data.ExtendedAssetParams.Count != 0) {
macct.data, err = loadFull(addr, &au.deltas[offset-1], &macct.data)
+ if err != nil {
+ return ledgercore.PersistedAccountData{}, err
+ }
}
return macct.data, err
}
@@ -1887,8 +1915,11 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
d, ok := au.deltas[offset].Get(addr)
if ok {
// load existing holdings and apply deltas
- if full && d.ExtendedAssetHolding.Count != 0 {
+ if full && (macct.data.ExtendedAssetHolding.Count != 0 || macct.data.ExtendedAssetParams.Count != 0) {
d, err = loadFull(addr, &au.deltas[offset], &d)
+ if err != nil {
+ return ledgercore.PersistedAccountData{}, err
+ }
}
return d, err
}
@@ -1909,6 +1940,15 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
goto retryLocked
}
}
+ if full && macct.pad.ExtendedAssetParams.Count != 0 {
+ macct.pad.AssetParams, macct.pad.ExtendedAssetParams, err = au.accountsq.loadParams(macct.pad.ExtendedAssetParams, macct.round)
+ var mismatchRoundErr *MismatchingDatabaseRoundError
+ if errors.As(err, &mismatchRoundErr) {
+ // go to waiting
+ // note, au.accountsMu is still held
+ goto retryLocked
+ }
+ }
return macct.pad, err
}
@@ -1955,40 +1995,90 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres
}
}
-// lookupWithHoldings returns the full account data for a given address at a given round.
+func lookupExtendedGroup(loadStmt *sql.Stmt, aidx basics.AssetIndex, agl ledgercore.AbstractAssetGroupList) (int, int, basics.Round, error) {
+ var err error
+ var rnd basics.Round
+ const notFound = -1
+ gi, ai := agl.FindAsset(aidx, 0)
+ if gi != notFound {
+ // if matching group found but the group is not loaded then load it
+ if ai == notFound {
+ fetcher := makeAssetFetcher(loadStmt)
+ rnd, err = agl.Get(gi).Fetch(fetcher, nil)
+ if err != nil {
+ return notFound, notFound, 0, err
+ }
+ _, ai = agl.FindAsset(aidx, gi)
+ }
+ }
+ return gi, ai, rnd, err
+}
+
+func lookupAssetHolding(loadStmt *sql.Stmt, aidx basics.AssetIndex, pad *ledgercore.PersistedAccountData) (basics.Round, error) {
+ gi, ai, rnd, err := lookupExtendedGroup(loadStmt, aidx, &pad.ExtendedAssetHolding)
+ if err != nil {
+ return 0, err
+ }
+ if gi != -1 && ai != -1 {
+ if pad.AccountData.Assets == nil {
+ // pad.AccountData.Assets might not be nil because looks up into deltas cache
+ pad.AccountData.Assets = make(map[basics.AssetIndex]basics.AssetHolding, 1)
+ }
+ pad.AccountData.Assets[aidx] = pad.ExtendedAssetHolding.Groups[gi].GetHolding(ai)
+ }
+ return rnd, nil
+}
+
+func lookupAssetParams(loadStmt *sql.Stmt, aidx basics.AssetIndex, pad *ledgercore.PersistedAccountData) (basics.Round, error) {
+ gi, ai, rnd, err := lookupExtendedGroup(loadStmt, aidx, &pad.ExtendedAssetParams)
+ if err != nil {
+ return 0, err
+ }
+ if gi != -1 && ai != -1 {
+ if pad.AccountData.Assets == nil {
+ // pad.AccountData.AssetParams might not be nil because looks up into deltas cache
+ pad.AccountData.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 1)
+ }
+ pad.AccountData.AssetParams[aidx] = pad.ExtendedAssetParams.Groups[gi].GetParams(ai)
+ }
+ return rnd, nil
+}
+
+// lookupCreatableDataWithoutRewards returns the full account data for a given address at a given round.
// The rewards are added to the AccountData before returning. Note that the function doesn't update the account with the rewards,
// even while it does return the AccoutData which represent the "rewarded" account data.
-func (au *accountUpdates) lookupHoldingWithoutRewards(rnd basics.Round, addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, synchronized bool) (pad ledgercore.PersistedAccountData, err error) {
+func (au *accountUpdates) lookupCreatableDataWithoutRewards(rnd basics.Round, addr basics.Address, locators []creatableDataLocator, synchronized bool) (pad ledgercore.PersistedAccountData, err error) {
var extLookup func(loadStmt *sql.Stmt, pad *ledgercore.PersistedAccountData) error
- if ctype == basics.AssetCreatable {
+ assetLocators := make([]creatableDataLocator, 0, len(locators))
+ for _, loc := range locators {
+ if loc.ctype == basics.AssetCreatable {
+ assetLocators = append(assetLocators, loc)
+ }
+ }
+ if len(assetLocators) > 0 {
extLookup = func(loadStmt *sql.Stmt, pad *ledgercore.PersistedAccountData) error {
- // if not extended holdings then all the holdins in pad.AccountData.Assets
- if pad.ExtendedAssetHolding.Count == 0 {
- return nil
- }
-
- gi, ai := pad.ExtendedAssetHolding.FindAsset(basics.AssetIndex(cidx), 0)
- if gi != -1 {
- // if matching group found but the group is not loaded then load it
- if ai == -1 {
- var round basics.Round
- _, pad.ExtendedAssetHolding.Groups[gi], round, err = loadHoldingGroup(loadStmt, pad.ExtendedAssetHolding.Groups[gi], nil)
+ for _, loc := range assetLocators {
+ // if not extended params then all the params in pad.AccountData.AssetParams
+ if loc.global && pad.ExtendedAssetParams.Count != 0 {
+ round, err := lookupAssetParams(loadStmt, basics.AssetIndex(loc.cidx), pad)
if err != nil {
return err
}
if round != rnd {
- au.log.Errorf("accountUpdates.lookupHoldingWithoutRewards: database round %d mismatching in-memory round %d", round, rnd)
+ au.log.Errorf("accountUpdates.lookupCreatableDataWithoutRewards: database round %d mismatching in-memory round %d", round, rnd)
return &MismatchingDatabaseRoundError{databaseRound: round, memoryRound: rnd}
}
- _, ai = pad.ExtendedAssetHolding.FindAsset(basics.AssetIndex(cidx), gi)
}
- if ai != -1 {
- if pad.AccountData.Assets == nil {
- // pad.AccountData.Assets might not be nil because looks up into deltas cache
- pad.AccountData.Assets = make(map[basics.AssetIndex]basics.AssetHolding, 1)
+ if loc.local && pad.ExtendedAssetHolding.Count != 0 {
+ round, err := lookupAssetHolding(loadStmt, basics.AssetIndex(loc.cidx), pad)
+ if err != nil {
+ return err
+ }
+ if round != rnd {
+ au.log.Errorf("accountUpdates.lookupCreatableDataWithoutRewards: database round %d mismatching in-memory round %d", round, rnd)
+ return &MismatchingDatabaseRoundError{databaseRound: round, memoryRound: rnd}
}
- pad.AccountData.Assets[basics.AssetIndex(cidx)] = pad.ExtendedAssetHolding.Groups[gi].GetHolding(ai)
}
}
return nil
@@ -2306,6 +2396,19 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb
committedRoundDigest = au.roundDigest[offset+uint64(lookback)-1]
}
+ // move newly created assets into deltas
+ for i := uint64(0); i < offset; i++ {
+ for cidx, creatable := range creatableDeltas[i] {
+ if creatable.Ctype == basics.AssetCreatable {
+ action := ledgercore.ActionParamsDelete
+ if creatable.Created {
+ action = ledgercore.ActionParamsCreate
+ }
+ deltas[i].SetEntityDelta(creatable.Creator, cidx, action)
+ }
+ }
+ }
+
// compact all the deltas - when we're trying to persist multiple rounds, we might have the same account
// being updated multiple times. When that happen, we can safely omit the intermediate updates.
compactDeltas := makeCompactAccountDeltas(deltas, au.baseAccounts)
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index bf49de7d5..b2bfbd3d0 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -1089,6 +1089,125 @@ func TestListCreatables(t *testing.T) {
listAndCompareComb(t, au, expectedDbImage)
}
+func TestLookupFull(t *testing.T) {
+ a := require.New(t)
+ inMemory := false
+ dbs, fn := dbOpenTest(t, inMemory)
+ setDbLogging(t, dbs)
+ defer cleanupTestDb(dbs, fn, inMemory)
+
+ tx, err := dbs.Wdb.Handle.Begin()
+ a.NoError(err)
+ defer tx.Rollback()
+
+ protoParams := config.Consensus[protocol.ConsensusFuture]
+
+ accts := randomAccounts(20, true)
+ _, err = initTestAccountDB(tx, accts, protoParams)
+ a.NoError(err)
+
+ getAddrAD := func(map[basics.Address]basics.AccountData) (addr basics.Address, ad basics.AccountData) {
+ for addr, ad = range accts {
+ return addr, ad // take first and exit
+ }
+ return
+ }
+
+ getBlock := func(rnd basics.Round) bookkeeping.Block {
+ blk := bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ Round: basics.Round(rnd),
+ },
+ }
+ blk.RewardsLevel = 0
+ blk.CurrentProtocol = protocol.ConsensusFuture
+ return blk
+ }
+
+ addr, ad := getAddrAD(accts)
+ au := &accountUpdates{}
+ cfg := config.GetDefaultLocal()
+ au.initialize(cfg, ".", protoParams, accts)
+ defer au.close()
+
+ ml := makeMockLedgerForTracker(t, true, 1, protocol.ConsensusFuture)
+ err = au.loadFromDisk(ml)
+ a.NoError(err)
+
+ // au.accountsq, err = accountsDbInit(tx, tx)
+ // a.NoError(err)
+
+ rnd := basics.Round(1)
+
+ // 1. generate an account with 2k assets, put it into deltas, call lookupFull
+ const numAssets = 2000
+ ad.Assets = make(map[basics.AssetIndex]basics.AssetHolding, numAssets)
+ var deltas ledgercore.AccountDeltas
+ for aidx := basics.AssetIndex(1); aidx <= numAssets; aidx++ {
+ if _, ok := ad.Assets[aidx]; !ok {
+ ad.Assets[aidx] = basics.AssetHolding{Amount: uint64(aidx), Frozen: true}
+ deltas.SetEntityDelta(addr, basics.CreatableIndex(aidx), ledgercore.ActionHoldingCreate)
+ }
+ }
+ deltas.Upsert(addr, ledgercore.PersistedAccountData{AccountData: ad})
+ accts[addr] = ad
+
+ blk := getBlock(rnd)
+ delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, deltas.Len(), 0)
+ delta.Accts.MergeAccounts(deltas)
+ au.newBlock(blk, delta)
+ au.committedUpTo(rnd)
+
+ // lookupFull is lookupWithRewards(full=true)
+ pad, err := au.lookupWithRewards(rnd, addr, true)
+ a.NoError(err)
+ a.Equal(numAssets, len(pad.AccountData.Assets))
+ for aidx, holding := range pad.AccountData.Assets {
+ a.Equal(uint64(aidx), holding.Amount)
+ }
+
+ // 2. add another delta triggering looking up not in macct but in au.deltas
+ rnd = 2
+ deltas = ledgercore.AccountDeltas{}
+ addr1, ad1 := getAddrAD(accts)
+ deltas.Upsert(addr1, ledgercore.PersistedAccountData{AccountData: ad1})
+ blk = getBlock(rnd)
+ delta = ledgercore.MakeStateDelta(&blk.BlockHeader, 0, deltas.Len(), 0)
+ delta.Accts.MergeAccounts(deltas)
+ au.newBlock(blk, delta)
+ au.committedUpTo(rnd)
+ // trick lookupWithRewards to trigger au.deltas lookup
+ savedDbRound := au.dbRound
+ au.dbRound = 1
+ pad, err = au.lookupWithRewards(rnd, addr, true)
+ a.NoError(err)
+ a.Equal(numAssets, len(pad.AccountData.Assets))
+ for aidx, holding := range pad.AccountData.Assets {
+ a.Equal(uint64(aidx), holding.Amount)
+ }
+
+ au.dbRound = savedDbRound
+
+ // 3. save the account into DB, call lookupFull
+ au.accountsWriting.Add(1)
+ au.commitRound(2, 0, 0)
+ pad, err = au.lookupWithRewards(rnd, addr, true)
+ a.NoError(err)
+ a.Equal(numAssets, len(pad.AccountData.Assets))
+ for aidx, holding := range pad.AccountData.Assets {
+ a.Equal(uint64(aidx), holding.Amount)
+ }
+
+ // 4. Empty au.baseAccounts and ensure direct read from DB works as well
+ au.baseAccounts = lruAccounts{}
+ pad, err = au.lookupWithRewards(rnd, addr, true)
+ a.NoError(err)
+ a.Equal(numAssets, len(pad.AccountData.Assets))
+ for aidx, holding := range pad.AccountData.Assets {
+ a.Equal(uint64(aidx), holding.Amount)
+ }
+}
+
func TestIsWritingCatchpointFile(t *testing.T) {
au := &accountUpdates{}
@@ -1216,6 +1335,12 @@ func accountsAll(tx *sql.Tx) (bals map[basics.Address]ledgercore.PersistedAccoun
return
}
}
+ if pad.ExtendedAssetParams.Count > 0 {
+ pad.AccountData.AssetParams, pad.ExtendedAssetParams, err = loadParams(stmt, pad.ExtendedAssetParams, 0)
+ if err != nil {
+ return
+ }
+ }
copy(addr[:], addrbuf)
bals[addr] = pad
diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go
index b4dc47145..133f7bb9c 100644
--- a/ledger/appcow_test.go
+++ b/ledger/appcow_test.go
@@ -45,7 +45,7 @@ func (ml *emptyLedger) lookup(addr basics.Address) (ledgercore.PersistedAccountD
return ledgercore.PersistedAccountData{}, nil
}
-func (ml *emptyLedger) lookupHolding(basics.Address, basics.CreatableIndex, basics.CreatableType) (ledgercore.PersistedAccountData, error) {
+func (ml *emptyLedger) lookupCreatableData(basics.Address, []creatableDataLocator) (ledgercore.PersistedAccountData, error) {
return ledgercore.PersistedAccountData{}, nil
}
diff --git a/ledger/appdbg.go b/ledger/appdbg.go
index f05fc3b28..df9a5a87f 100644
--- a/ledger/appdbg.go
+++ b/ledger/appdbg.go
@@ -53,7 +53,7 @@ func (w *ledgerForCowBaseWrapper) lookupWithoutRewards(rnd basics.Round, addr ba
return ledgercore.PersistedAccountData{AccountData: ad}, rnd, err
}
-func (w *ledgerForCowBaseWrapper) lookupHoldingWithoutRewards(rnd basics.Round, addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.PersistedAccountData, error) {
+func (w *ledgerForCowBaseWrapper) lookupCreatableDataWithoutRewards(rnd basics.Round, addr basics.Address, locators []creatableDataLocator) (ledgercore.PersistedAccountData, error) {
ad, _, err := w.l.LookupWithoutRewards(rnd, addr)
return ledgercore.PersistedAccountData{AccountData: ad}, err
}
diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go
index ab46811bc..cbbba99b4 100644
--- a/ledger/apply/application_test.go
+++ b/ledger/apply/application_test.go
@@ -132,7 +132,7 @@ func (b *testBalances) Get(addr basics.Address, withPendingRewards bool) (basics
return ad, nil
}
-func (b *testBalances) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.AccountData, error) {
+func (b *testBalances) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, global bool, local bool) (basics.AccountData, error) {
ad, ok := b.balances[addr]
if !ok {
return basics.AccountData{}, fmt.Errorf("mock balance not found")
@@ -206,7 +206,7 @@ func (b *testBalancesPass) Get(addr basics.Address, withPendingRewards bool) (ba
return ad, nil
}
-func (b *testBalancesPass) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.AccountData, error) {
+func (b *testBalancesPass) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, global bool, local bool) (basics.AccountData, error) {
ad, ok := b.balances[addr]
if !ok {
return basics.AccountData{}, fmt.Errorf("mock balance not found")
diff --git a/ledger/apply/apply.go b/ledger/apply/apply.go
index 20cf6fb6c..1c08e5107 100644
--- a/ledger/apply/apply.go
+++ b/ledger/apply/apply.go
@@ -32,7 +32,8 @@ type Balances interface {
Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error)
// GetEx is like Get(addr, false), but also loads specific creatable
- GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.AccountData, error)
+ // params and holding flags correspond to Params/AppGlobal and Assets/AppLocalState maps depending on creatable type
+ GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, params bool, holding bool) (basics.AccountData, error)
Put(basics.Address, basics.AccountData) error
diff --git a/ledger/apply/asset.go b/ledger/apply/asset.go
index 58ae68436..18c4763f4 100644
--- a/ledger/apply/asset.go
+++ b/ledger/apply/asset.go
@@ -52,7 +52,7 @@ func getParams(balances Balances, aidx basics.AssetIndex) (params basics.AssetPa
return
}
- creatorRecord, err := balances.Get(creator, false)
+ creatorRecord, err := balances.GetEx(creator, basics.CreatableIndex(aidx), basics.AssetCreatable, true, false)
if err != nil {
return
}
@@ -74,7 +74,7 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
// Ensure index is never zero
newidx := basics.AssetIndex(txnCounter + 1)
- record, err := balances.GetEx(header.Sender, basics.CreatableIndex(newidx), basics.AssetCreatable)
+ record, err := balances.GetEx(header.Sender, basics.CreatableIndex(newidx), basics.AssetCreatable, true, true)
if err != nil {
return err
}
@@ -92,7 +92,10 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
Amount: cc.AssetParams.Total,
}
- if len(record.Assets) > balances.ConsensusParams().MaxAssetsPerAccount {
+ // This condition gets triggered only for pre EnableUnlimitedAssets consensus protocol.
+ // A code running EnableUnlimitedAssets protocol gets Assets map containing a requested asset and all modified assets in this group (or even a block).
+ if !balances.ConsensusParams().EnableUnlimitedAssets &&
+ len(record.Assets) > balances.ConsensusParams().MaxAssetsPerAccount {
return fmt.Errorf("too many assets in account: %d > %d", len(record.Assets), balances.ConsensusParams().MaxAssetsPerAccount)
}
@@ -123,7 +126,7 @@ func AssetConfig(cc transactions.AssetConfigTxnFields, header transactions.Heade
return fmt.Errorf("this transaction should be issued by the manager. It is issued by %v, manager key %v", header.Sender, params.Manager)
}
- record, err := balances.GetEx(creator, basics.CreatableIndex(cc.ConfigAsset), basics.AssetCreatable)
+ record, err := balances.GetEx(creator, basics.CreatableIndex(cc.ConfigAsset), basics.AssetCreatable, true, true)
if err != nil {
return err
}
@@ -176,7 +179,9 @@ func takeOut(balances Balances, addr basics.Address, asset basics.AssetIndex, am
return nil
}
- snd, err := balances.GetEx(addr, basics.CreatableIndex(asset), basics.AssetCreatable)
+ const fetchParams = false
+ const fetchHolding = true
+ snd, err := balances.GetEx(addr, basics.CreatableIndex(asset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
@@ -206,7 +211,9 @@ func putIn(balances Balances, addr basics.Address, asset basics.AssetIndex, amou
return nil
}
- rcv, err := balances.GetEx(addr, basics.CreatableIndex(asset), basics.AssetCreatable)
+ const fetchParams = false
+ const fetchHolding = true
+ rcv, err := balances.GetEx(addr, basics.CreatableIndex(asset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
@@ -257,7 +264,9 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
// Allocate a slot for asset (self-transfer of zero amount).
if ct.AssetAmount == 0 && ct.AssetReceiver == source && !clawback {
- snd, err := balances.GetEx(source, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable)
+ const fetchParams = false
+ const fetchHolding = true
+ snd, err := balances.GetEx(source, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
@@ -274,7 +283,8 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
sndHolding.Frozen = params.DefaultFrozen
snd.Assets[ct.XferAsset] = sndHolding
- if len(snd.Assets) > balances.ConsensusParams().MaxAssetsPerAccount {
+ if !balances.ConsensusParams().EnableUnlimitedAssets &&
+ len(snd.Assets) > balances.ConsensusParams().MaxAssetsPerAccount {
return fmt.Errorf("too many assets in account: %d > %d", len(snd.Assets), balances.ConsensusParams().MaxAssetsPerAccount)
}
@@ -311,7 +321,9 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
// Fetch the sender balance record. We will use this to ensure
// that the sender is not the creator of the asset, and to
// figure out how much of the asset to move.
- snd, err := balances.GetEx(source, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable)
+ var fetchParams = true
+ var fetchHolding = true
+ snd, err := balances.GetEx(source, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
@@ -332,7 +344,9 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
// Fetch the destination balance record to check if we are
// closing out to the creator
- dst, err := balances.GetEx(ct.AssetCloseTo, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable)
+ fetchParams = true
+ fetchHolding = false
+ dst, err := balances.GetEx(ct.AssetCloseTo, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
@@ -362,7 +376,9 @@ func AssetTransfer(ct transactions.AssetTransferTxnFields, header transactions.H
}
// Delete the slot from the account.
- snd, err = balances.GetEx(source, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable)
+ fetchParams = false
+ fetchHolding = true
+ snd, err = balances.GetEx(source, basics.CreatableIndex(ct.XferAsset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
@@ -398,7 +414,9 @@ func AssetFreeze(cf transactions.AssetFreezeTxnFields, header transactions.Heade
}
// Get the account to be frozen/unfrozen.
- record, err := balances.GetEx(cf.FreezeAccount, basics.CreatableIndex(cf.FreezeAsset), basics.AssetCreatable)
+ const fetchParams = false
+ const fetchHolding = true
+ record, err := balances.GetEx(cf.FreezeAccount, basics.CreatableIndex(cf.FreezeAsset), basics.AssetCreatable, fetchParams, fetchHolding)
if err != nil {
return err
}
diff --git a/ledger/apply/keyreg_test.go b/ledger/apply/keyreg_test.go
index 67f71ca3f..21125cbe3 100644
--- a/ledger/apply/keyreg_test.go
+++ b/ledger/apply/keyreg_test.go
@@ -40,7 +40,7 @@ func (balances keyregTestBalances) Get(addr basics.Address, withPendingRewards b
return balances.addrs[addr], nil
}
-func (balances keyregTestBalances) GetEx(basics.Address, basics.CreatableIndex, basics.CreatableType) (basics.AccountData, error) {
+func (balances keyregTestBalances) GetEx(basics.Address, basics.CreatableIndex, basics.CreatableType, bool, bool) (basics.AccountData, error) {
return basics.AccountData{}, nil
}
diff --git a/ledger/apply/mockBalances_test.go b/ledger/apply/mockBalances_test.go
index 28848dbca..fb5c645d7 100644
--- a/ledger/apply/mockBalances_test.go
+++ b/ledger/apply/mockBalances_test.go
@@ -69,7 +69,7 @@ func (balances mockBalances) Get(addr basics.Address, withPendingRewards bool) (
return balances.b[addr], nil
}
-func (balances mockBalances) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.AccountData, error) {
+func (balances mockBalances) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, global bool, local bool) (basics.AccountData, error) {
return balances.b[addr], nil
}
diff --git a/ledger/cow.go b/ledger/cow.go
index 00fc1884c..9f380da5a 100644
--- a/ledger/cow.go
+++ b/ledger/cow.go
@@ -24,6 +24,7 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/protocol"
)
@@ -39,7 +40,7 @@ import (
type roundCowParent interface {
// lookout with rewards
lookup(basics.Address) (ledgercore.PersistedAccountData, error)
- lookupHolding(basics.Address, basics.CreatableIndex, basics.CreatableType) (ledgercore.PersistedAccountData, error)
+ lookupCreatableData(basics.Address, []creatableDataLocator) (ledgercore.PersistedAccountData, error)
checkDup(basics.Round, basics.Round, transactions.Txid, ledgercore.Txlease) error
txnCounter() uint64
getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error)
@@ -66,7 +67,10 @@ type roundCowState struct {
sdeltas map[basics.Address]map[storagePtr]*storageDelta
// getPadCache provides compatibility between mods that uses PersistedAccountData
- // and balances interface implementation (Get, GetEx, Put, PutWithCreatable) that work with AccountData view to PersistedAccountData
+ // and balances interface implementation (Get, GetEx, Put, PutWithCreatable)
+ // that work with AccountData view to PersistedAccountData.
+ // The idea is Getters populate getPadCache and return AccountData portion,
+ // and Putters find corresponding PersistedAccountData there without looking into DB
getPadCache map[basics.Address]ledgercore.PersistedAccountData
// either or not maintain compatibility with original app refactoring behavior
@@ -167,31 +171,86 @@ func (cb *roundCowState) lookup(addr basics.Address) (pad ledgercore.PersistedAc
}
// lookupWithHolding is gets account data but also fetches asset holding or app local data for a specified creatable
-func (cb *roundCowState) lookupHolding(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (data ledgercore.PersistedAccountData, err error) {
+func (cb *roundCowState) lookupCreatableData(addr basics.Address, locators []creatableDataLocator) (data ledgercore.PersistedAccountData, err error) {
pad, modified := cb.mods.Accts.Get(addr)
if modified {
- exist := false
- if ctype == basics.AssetCreatable {
- _, exist = pad.AccountData.Assets[basics.AssetIndex(cidx)]
- } else {
- _, exist = pad.AccountData.AppLocalStates[basics.AppIndex(cidx)]
- }
+ foundInModified := make([]bool, 0, len(locators))
+ for _, loc := range locators {
+ globalExist := false
+ localExist := false
+ if loc.ctype == basics.AssetCreatable {
+ if loc.global {
+ _, globalExist = pad.AccountData.AssetParams[basics.AssetIndex(loc.cidx)]
+ }
+ if loc.local {
+ _, localExist = pad.AccountData.Assets[basics.AssetIndex(loc.cidx)]
+ }
+ } else {
+ if loc.global {
+ _, globalExist = pad.AccountData.AppParams[basics.AppIndex(loc.cidx)]
+ }
+ if loc.local {
+ _, localExist = pad.AccountData.AppLocalStates[basics.AppIndex(loc.cidx)]
+ }
+ }
- if exist {
+ onlyGlobal := loc.global && globalExist && !loc.local
+ onlyLocal := loc.local && localExist && !loc.global
+ bothGlobalLocal := loc.global && globalExist && loc.local && localExist
+ found := onlyGlobal || onlyLocal || bothGlobalLocal
+ foundInModified = append(foundInModified, found)
+ }
+ found := 0
+ for _, val := range foundInModified {
+ if !val {
+ break
+ }
+ found++
+ }
+ // all requested items were found in modified data => return
+ if found == len(locators) {
return pad, nil
}
}
- parentPad, err := cb.lookupParent.lookupHolding(addr, cidx, ctype)
+ parentPad, err := cb.lookupParent.lookupCreatableData(addr, locators)
if !modified {
cb.getPadCache[addr] = parentPad
return parentPad, err
}
- // data from cb.mods.Accts is newer than from lookupParent.lookupHolding, so add the asset if any
- if holding, ok := parentPad.AccountData.Assets[basics.AssetIndex(cidx)]; ok {
- pad.AccountData.Assets[basics.AssetIndex(cidx)] = holding
+ // data from cb.mods.Accts is newer than from lookupParent -> lookupHolding/lookupParams
+ // so add assets if they do not exist in new
+ for _, loc := range locators {
+ if loc.ctype == basics.AssetCreatable {
+ if loc.global {
+ params, parentOk := parentPad.AccountData.AssetParams[basics.AssetIndex(loc.cidx)]
+ if _, ok := pad.AccountData.AssetParams[basics.AssetIndex(loc.cidx)]; !ok && parentOk {
+ pad.AccountData.AssetParams[basics.AssetIndex(loc.cidx)] = params
+ }
+ }
+ if loc.local {
+ holding, parentOk := parentPad.AccountData.Assets[basics.AssetIndex(loc.cidx)]
+ if _, ok := pad.AccountData.Assets[basics.AssetIndex(loc.cidx)]; !ok && parentOk {
+ pad.AccountData.Assets[basics.AssetIndex(loc.cidx)] = holding
+ }
+ }
+ } else {
+ if loc.global {
+ params, parentOk := parentPad.AccountData.AppParams[basics.AppIndex(loc.cidx)]
+ if _, ok := pad.AccountData.AppParams[basics.AppIndex(loc.cidx)]; !ok && parentOk {
+ pad.AccountData.AppParams[basics.AppIndex(loc.cidx)] = params
+ }
+ }
+ if loc.local {
+ states, parentOk := parentPad.AccountData.AppLocalStates[basics.AppIndex(loc.cidx)]
+ if _, ok := pad.AccountData.AppLocalStates[basics.AppIndex(loc.cidx)]; !ok && parentOk {
+ pad.AccountData.AppLocalStates[basics.AppIndex(loc.cidx)] = states
+ }
+ }
+ }
}
+
cb.getPadCache[addr] = pad
return pad, nil
}
@@ -234,7 +293,66 @@ func (cb *roundCowState) put(addr basics.Address, new basics.AccountData, newCre
pad.AccountData = new
cb.mods.Accts.Upsert(addr, pad)
} else {
- panic(fmt.Sprintf("Address %s does not have entry in getPadCache", addr.String()))
+ logging.Base().Errorf("address %s does not have entry in getPadCache", addr.String())
+
+ // Try to recover.
+ // Problem: getPadCache have to keep AccountData with the asset/app used in lookupCreatableData call,
+ // and put() has no idea what the client needed (that's why getPadCache exists).
+ // So need to preload all elements that makes sense:
+ // - for asset params they are either newCreatable or deletedCreatable
+ // - for asset holdings it is more complicated and need to load all data from new.Assets
+ pad, err := cb.lookup(addr)
+ if err != nil {
+ // well, seems like something really wrong, panic
+ panic(fmt.Sprintf("Recovering attempt after %s missing in getPadCache failed: %s", addr.String(), err.Error()))
+ }
+
+ if pad.ExtendedAssetParams.Count != 0 {
+ var target basics.CreatableIndex
+ if newCreatable != nil && newCreatable.Type == basics.AssetCreatable {
+ target = newCreatable.Index
+ }
+ if deletedCreatable != nil && deletedCreatable.Type == basics.AssetCreatable {
+ target = deletedCreatable.Index
+ }
+ if target != 0 {
+ pad2, err := cb.lookupCreatableData(addr, []creatableDataLocator{{cidx: target, ctype: basics.AssetCreatable, global: true, local: false}})
+ if err != nil {
+ // well, seems like something really wrong, panic
+ panic(fmt.Sprintf("Recovering attempt after %s missing in getPadCache and asset params %d failed: %s", addr.String(), target, err.Error()))
+ }
+ pad2.AccountData = new
+ cb.mods.Accts.Upsert(addr, pad2)
+ }
+ }
+
+ if pad.ExtendedAssetHolding.Count != 0 {
+ // There are some extension records, need to fetch all holdings that are in new
+ // to ensure underlying code has all needed data
+ locators := make([]creatableDataLocator, 0, len(new.Assets))
+ for aidx := range new.Assets {
+ locators = append(locators, creatableDataLocator{cidx: basics.CreatableIndex(aidx), ctype: basics.AssetCreatable, global: false, local: true})
+ }
+
+ pad2, err := cb.lookupCreatableData(addr, locators)
+ if err != nil {
+ // well, seems like something really wrong, panic
+ panic(fmt.Sprintf("Recovering attempt after %s missing in getPadCache and asset holdings failed: %s", addr.String(), err.Error()))
+ }
+ for i, g := range pad2.ExtendedAssetHolding.Groups {
+ if g.Loaded() {
+ pad.ExtendedAssetHolding.Groups[i] = g
+ }
+ }
+ pad.AccountData = new
+ cb.mods.Accts.Upsert(addr, pad)
+ }
+
+ if pad.ExtendedAssetParams.Count == 0 && pad.ExtendedAssetHolding.Count == 0 {
+ // if no extension records, store a value from regular lookup
+ pad.AccountData = new
+ cb.mods.Accts.Upsert(addr, pad)
+ }
}
if newCreatable != nil {
diff --git a/ledger/cow_test.go b/ledger/cow_test.go
index 3490fb60f..6e5038f49 100644
--- a/ledger/cow_test.go
+++ b/ledger/cow_test.go
@@ -37,7 +37,7 @@ func (ml *mockLedger) lookup(addr basics.Address) (ledgercore.PersistedAccountDa
return ledgercore.PersistedAccountData{AccountData: ml.balanceMap[addr]}, nil
}
-func (ml *mockLedger) lookupHolding(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.PersistedAccountData, error) {
+func (ml *mockLedger) lookupCreatableData(addr basics.Address, locators []creatableDataLocator) (ledgercore.PersistedAccountData, error) {
return ledgercore.PersistedAccountData{AccountData: ml.balanceMap[addr]}, nil
}
@@ -121,7 +121,7 @@ func TestCowBalance(t *testing.T) {
checkCow(t, c0, accts0)
checkCow(t, c1, accts0)
- require.Panics(t, func() {
+ require.NotPanics(t, func() {
c1.put(randomAddress(), basics.AccountData{}, nil, nil)
})
diff --git a/ledger/creatablecow.go b/ledger/creatablecow.go
index dd2c8a6c1..771b8331d 100644
--- a/ledger/creatablecow.go
+++ b/ledger/creatablecow.go
@@ -26,7 +26,8 @@ import (
// Allocate creates kv storage for a given {addr, aidx, global}. It is called on app creation (global) or opting in (local)
// Allocate also registers an asset holding as created
func (cb *roundCowState) Allocate(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, global bool, space basics.StateSchema) error {
- if ctype == basics.AppCreatable {
+ switch ctype {
+ case basics.AppCreatable:
// Check that account is not already opted in
aidx := basics.AppIndex(cidx)
allocated, err := cb.allocated(addr, aidx, global)
@@ -34,7 +35,7 @@ func (cb *roundCowState) Allocate(addr basics.Address, cidx basics.CreatableInde
return err
}
if allocated {
- err = fmt.Errorf("cannot allocate storage, %v", errAlreadyStorage(addr, aidx, global))
+ err = fmt.Errorf("cannot allocate storage, %w", errAlreadyStorage(addr, aidx, global))
return err
}
@@ -45,22 +46,20 @@ func (cb *roundCowState) Allocate(addr basics.Address, cidx basics.CreatableInde
lsd.action = allocAction
lsd.maxCounts = &space
-
- return nil
- }
-
- if ctype == basics.AssetCreatable {
- cb.mods.Accts.SetHoldingDelta(addr, basics.AssetIndex(cidx), ledgercore.ActionCreate)
- return nil
+ case basics.AssetCreatable:
+ cb.mods.Accts.SetEntityDelta(addr, cidx, ledgercore.ActionHoldingCreate)
+ default:
+ return fmt.Errorf("not supported creatable type %v", ctype)
}
- return fmt.Errorf("not supported creatable type %v", ctype)
+ return nil
}
// Deallocate clears storage for {addr, aidx, global}. It happens on app deletion (global) or closing out (local)
// Deallocate also registers an asset holding as deleted
func (cb *roundCowState) Deallocate(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, global bool) error {
- if ctype == basics.AppCreatable {
+ switch ctype {
+ case basics.AppCreatable:
// Check that account has allocated storage
aidx := basics.AppIndex(cidx)
allocated, err := cb.allocated(addr, aidx, global)
@@ -81,13 +80,11 @@ func (cb *roundCowState) Deallocate(addr basics.Address, cidx basics.CreatableIn
lsd.counts = &basics.StateSchema{}
lsd.maxCounts = &basics.StateSchema{}
lsd.kvCow = make(stateDelta)
- return nil
- }
-
- if ctype == basics.AssetCreatable {
- cb.mods.Accts.SetHoldingDelta(addr, basics.AssetIndex(cidx), ledgercore.ActionDelete)
- return nil
+ case basics.AssetCreatable:
+ cb.mods.Accts.SetEntityDelta(addr, cidx, ledgercore.ActionHoldingDelete)
+ default:
+ return fmt.Errorf("not supported creatable type %v", ctype)
}
- return fmt.Errorf("not supported creatable type %v", ctype)
+ return nil
}
diff --git a/ledger/eval.go b/ledger/eval.go
index b5a8daa6f..b63a7fab8 100644
--- a/ledger/eval.go
+++ b/ledger/eval.go
@@ -97,8 +97,8 @@ func (x *roundCowBase) lookup(addr basics.Address) (ledgercore.PersistedAccountD
return pad, err
}
-func (x *roundCowBase) lookupHolding(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (data ledgercore.PersistedAccountData, err error) {
- return x.l.lookupHoldingWithoutRewards(x.rnd, addr, cidx, ctype)
+func (x *roundCowBase) lookupCreatableData(addr basics.Address, locators []creatableDataLocator) (data ledgercore.PersistedAccountData, err error) {
+ return x.l.lookupCreatableDataWithoutRewards(x.rnd, addr, locators)
}
func (x *roundCowBase) checkDup(firstValid, lastValid basics.Round, txid transactions.Txid, txl ledgercore.Txlease) error {
@@ -242,8 +242,12 @@ func (cs *roundCowState) Get(addr basics.Address, withPendingRewards bool) (basi
return pad.AccountData, nil
}
-func (cs *roundCowState) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.AccountData, error) {
- pad, err := cs.lookupHolding(addr, cidx, ctype)
+func (cs *roundCowState) GetEx(addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType, params bool, holding bool) (basics.AccountData, error) {
+ if !cs.proto.EnableUnlimitedAssets {
+ return cs.Get(addr, false)
+ }
+
+ pad, err := cs.lookupCreatableData(addr, []creatableDataLocator{{cidx: cidx, ctype: ctype, global: params, local: holding}})
if err != nil {
return basics.AccountData{}, err
}
@@ -366,6 +370,17 @@ type BlockEvaluator struct {
l ledgerForEvaluator
}
+type creatableDataLocator struct {
+ // cidx is an asset or app index
+ cidx basics.CreatableIndex
+ // ctype is either asset or app
+ ctype basics.CreatableType
+ // global specified asset or app params lookup
+ global bool
+ // local specifies asset holding or app local state lookup
+ local bool
+}
+
type ledgerForEvaluator interface {
ledgerForCowBase
GenesisHash() crypto.Digest
@@ -379,7 +394,7 @@ type ledgerForCowBase interface {
LookupWithoutRewards(basics.Round, basics.Address) (basics.AccountData, basics.Round, error)
lookupWithoutRewards(basics.Round, basics.Address) (ledgercore.PersistedAccountData, basics.Round, error)
- lookupHoldingWithoutRewards(basics.Round, basics.Address, basics.CreatableIndex, basics.CreatableType) (ledgercore.PersistedAccountData, error)
+ lookupCreatableDataWithoutRewards(basics.Round, basics.Address, []creatableDataLocator) (ledgercore.PersistedAccountData, error)
checkDup(config.ConsensusParams, basics.Round, basics.Round, basics.Round, transactions.Txid, TxLease) error
getCreatorForRound(basics.Round, basics.CreatableIndex, basics.CreatableType) (basics.Address, bool, error)
}
diff --git a/ledger/ledger.go b/ledger/ledger.go
index 6b65e7ec5..f72b0e7ff 100644
--- a/ledger/ledger.go
+++ b/ledger/ledger.go
@@ -474,6 +474,14 @@ func (l *Ledger) LookupFull(rnd basics.Round, addr basics.Address) (basics.Accou
return data.AccountData, nil
}
+// LookupCreatableDataWithoutRewards returns account data containing asset or app params if any
+func (l *Ledger) LookupCreatableDataWithoutRewards(rnd basics.Round, addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.AccountData, error) {
+ params := true
+ holdings := false
+ pad, err := l.lookupCreatableDataWithoutRewards(rnd, addr, []creatableDataLocator{{cidx: cidx, ctype: ctype, global: params, local: holdings}})
+ return pad.AccountData, err
+}
+
func (l *Ledger) lookupWithoutRewards(rnd basics.Round, addr basics.Address) (ledgercore.PersistedAccountData, basics.Round, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
@@ -486,12 +494,12 @@ func (l *Ledger) lookupWithoutRewards(rnd basics.Round, addr basics.Address) (le
return pad, validThrough, nil
}
-// lookupHoldingWithoutRewards is like lookupWithoutRewards but also loads the specified holding/local state
-func (l *Ledger) lookupHoldingWithoutRewards(rnd basics.Round, addr basics.Address, cidx basics.CreatableIndex, ctype basics.CreatableType) (ledgercore.PersistedAccountData, error) {
+// lookupCreatableDataWithoutRewards is like lookupWithoutRewards but also loads the specified holding/local state
+func (l *Ledger) lookupCreatableDataWithoutRewards(rnd basics.Round, addr basics.Address, locators []creatableDataLocator) (ledgercore.PersistedAccountData, error) {
l.trackerMu.RLock()
defer l.trackerMu.RUnlock()
- data, err := l.accts.LookupHoldingWithoutRewards(rnd, addr, cidx, ctype)
+ data, err := l.accts.LookupCreatableDataWithoutRewards(rnd, addr, locators)
if err != nil {
return ledgercore.PersistedAccountData{}, err
}
diff --git a/ledger/ledgercore/msgp_gen.go b/ledger/ledgercore/msgp_gen.go
index 8417ed86a..945c5241a 100644
--- a/ledger/ledgercore/msgp_gen.go
+++ b/ledger/ledgercore/msgp_gen.go
@@ -26,6 +26,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// AssetGroupDesc
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// AssetsCommonGroupData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// AssetsHoldingGroup
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -42,6 +58,22 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// AssetsParamsGroup
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
+// AssetsParamsGroupData
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// ExtendedAssetHolding
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -50,6 +82,14 @@ import (
// |-----> (*) Msgsize
// |-----> (*) MsgIsZero
//
+// ExtendedAssetParams
+// |-----> (*) MarshalMsg
+// |-----> (*) CanMarshalMsg
+// |-----> (*) UnmarshalMsg
+// |-----> (*) CanUnmarshalMsg
+// |-----> (*) Msgsize
+// |-----> (*) MsgIsZero
+//
// PersistedAccountData
// |-----> (*) MarshalMsg
// |-----> (*) CanMarshalMsg
@@ -814,11 +854,11 @@ func (z *AlgoCount) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
-func (z *AssetsHoldingGroup) MarshalMsg(b []byte) (o []byte) {
+func (z *AssetGroupDesc) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0001Len := uint32(4)
- var zb0001Mask uint8 /* 7 bits */
+ var zb0001Mask uint8 /* 5 bits */
if (*z).Count == 0 {
zb0001Len--
zb0001Mask |= 0x2
@@ -829,11 +869,11 @@ func (z *AssetsHoldingGroup) MarshalMsg(b []byte) (o []byte) {
}
if (*z).AssetGroupKey == 0 {
zb0001Len--
- zb0001Mask |= 0x10
+ zb0001Mask |= 0x8
}
if (*z).MinAssetIndex.MsgIsZero() {
zb0001Len--
- zb0001Mask |= 0x40
+ zb0001Mask |= 0x10
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
@@ -848,12 +888,12 @@ func (z *AssetsHoldingGroup) MarshalMsg(b []byte) (o []byte) {
o = append(o, 0xa1, 0x64)
o = msgp.AppendUint64(o, (*z).DeltaMaxAssetIndex)
}
- if (zb0001Mask & 0x10) == 0 { // if not empty
+ if (zb0001Mask & 0x8) == 0 { // if not empty
// string "k"
o = append(o, 0xa1, 0x6b)
o = msgp.AppendInt64(o, (*z).AssetGroupKey)
}
- if (zb0001Mask & 0x40) == 0 { // if not empty
+ if (zb0001Mask & 0x10) == 0 { // if not empty
// string "m"
o = append(o, 0xa1, 0x6d)
o = (*z).MinAssetIndex.MarshalMsg(o)
@@ -862,13 +902,13 @@ func (z *AssetsHoldingGroup) MarshalMsg(b []byte) (o []byte) {
return
}
-func (_ *AssetsHoldingGroup) CanMarshalMsg(z interface{}) bool {
- _, ok := (z).(*AssetsHoldingGroup)
+func (_ *AssetGroupDesc) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetGroupDesc)
return ok
}
// UnmarshalMsg implements msgp.Unmarshaler
-func (z *AssetsHoldingGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
+func (z *AssetGroupDesc) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 int
@@ -925,7 +965,7 @@ func (z *AssetsHoldingGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
if zb0002 {
- (*z) = AssetsHoldingGroup{}
+ (*z) = AssetGroupDesc{}
}
for zb0001 > 0 {
zb0001--
@@ -972,6 +1012,339 @@ func (z *AssetsHoldingGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
return
}
+func (_ *AssetGroupDesc) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetGroupDesc)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *AssetGroupDesc) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint32Size + 2 + (*z).MinAssetIndex.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Int64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *AssetGroupDesc) MsgIsZero() bool {
+ return ((*z).Count == 0) && ((*z).MinAssetIndex.MsgIsZero()) && ((*z).DeltaMaxAssetIndex == 0) && ((*z).AssetGroupKey == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *AssetsCommonGroupData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0002Len := uint32(1)
+ var zb0002Mask uint8 /* 2 bits */
+ if len((*z).AssetOffsets) == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x2
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x2) == 0 { // if not empty
+ // string "ao"
+ o = append(o, 0xa2, 0x61, 0x6f)
+ if (*z).AssetOffsets == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetOffsets)))
+ }
+ for zb0001 := range (*z).AssetOffsets {
+ o = (*z).AssetOffsets[zb0001].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *AssetsCommonGroupData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsCommonGroupData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *AssetsCommonGroupData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetOffsets")
+ return
+ }
+ if zb0004 > MaxHoldingGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(MaxHoldingGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "AssetOffsets")
+ return
+ }
+ if zb0005 {
+ (*z).AssetOffsets = nil
+ } else if (*z).AssetOffsets != nil && cap((*z).AssetOffsets) >= zb0004 {
+ (*z).AssetOffsets = ((*z).AssetOffsets)[:zb0004]
+ } else {
+ (*z).AssetOffsets = make([]basics.AssetIndex, zb0004)
+ }
+ for zb0001 := range (*z).AssetOffsets {
+ bts, err = (*z).AssetOffsets[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetOffsets", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = AssetsCommonGroupData{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ao":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetOffsets")
+ return
+ }
+ if zb0006 > MaxHoldingGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(MaxHoldingGroupSize))
+ err = msgp.WrapError(err, "AssetOffsets")
+ return
+ }
+ if zb0007 {
+ (*z).AssetOffsets = nil
+ } else if (*z).AssetOffsets != nil && cap((*z).AssetOffsets) >= zb0006 {
+ (*z).AssetOffsets = ((*z).AssetOffsets)[:zb0006]
+ } else {
+ (*z).AssetOffsets = make([]basics.AssetIndex, zb0006)
+ }
+ for zb0001 := range (*z).AssetOffsets {
+ bts, err = (*z).AssetOffsets[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetOffsets", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *AssetsCommonGroupData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsCommonGroupData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *AssetsCommonGroupData) Msgsize() (s int) {
+ s = 1 + 3 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).AssetOffsets {
+ s += (*z).AssetOffsets[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *AssetsCommonGroupData) MsgIsZero() bool {
+ return (len((*z).AssetOffsets) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *AssetsHoldingGroup) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(4)
+ var zb0001Mask uint8 /* 7 bits */
+ if (*z).AssetGroupDesc.Count == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).AssetGroupDesc.DeltaMaxAssetIndex == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).AssetGroupDesc.AssetGroupKey == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).AssetGroupDesc.MinAssetIndex.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "c"
+ o = append(o, 0xa1, 0x63)
+ o = msgp.AppendUint32(o, (*z).AssetGroupDesc.Count)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "d"
+ o = append(o, 0xa1, 0x64)
+ o = msgp.AppendUint64(o, (*z).AssetGroupDesc.DeltaMaxAssetIndex)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendInt64(o, (*z).AssetGroupDesc.AssetGroupKey)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = (*z).AssetGroupDesc.MinAssetIndex.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *AssetsHoldingGroup) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsHoldingGroup)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *AssetsHoldingGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).AssetGroupDesc.Count, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Count")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).AssetGroupDesc.MinAssetIndex.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MinAssetIndex")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).AssetGroupDesc.DeltaMaxAssetIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "DeltaMaxAssetIndex")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).AssetGroupDesc.AssetGroupKey, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetGroupKey")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = AssetsHoldingGroup{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "c":
+ (*z).AssetGroupDesc.Count, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Count")
+ return
+ }
+ case "m":
+ bts, err = (*z).AssetGroupDesc.MinAssetIndex.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MinAssetIndex")
+ return
+ }
+ case "d":
+ (*z).AssetGroupDesc.DeltaMaxAssetIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DeltaMaxAssetIndex")
+ return
+ }
+ case "k":
+ (*z).AssetGroupDesc.AssetGroupKey, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetGroupKey")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
func (_ *AssetsHoldingGroup) CanUnmarshalMsg(z interface{}) bool {
_, ok := (z).(*AssetsHoldingGroup)
return ok
@@ -979,13 +1352,13 @@ func (_ *AssetsHoldingGroup) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *AssetsHoldingGroup) Msgsize() (s int) {
- s = 1 + 2 + msgp.Uint32Size + 2 + (*z).MinAssetIndex.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Int64Size
+ s = 1 + 2 + msgp.Uint32Size + 2 + (*z).AssetGroupDesc.MinAssetIndex.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Int64Size
return
}
// MsgIsZero returns whether this is a zero value
func (z *AssetsHoldingGroup) MsgIsZero() bool {
- return ((*z).Count == 0) && ((*z).MinAssetIndex.MsgIsZero()) && ((*z).DeltaMaxAssetIndex == 0) && ((*z).AssetGroupKey == 0)
+ return ((*z).AssetGroupDesc.Count == 0) && ((*z).AssetGroupDesc.MinAssetIndex.MsgIsZero()) && ((*z).AssetGroupDesc.DeltaMaxAssetIndex == 0) && ((*z).AssetGroupDesc.AssetGroupKey == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -998,7 +1371,7 @@ func (z *AssetsHoldingGroupData) MarshalMsg(b []byte) (o []byte) {
zb0004Len--
zb0004Mask |= 0x2
}
- if len((*z).AssetOffsets) == 0 {
+ if len((*z).AssetsCommonGroupData.AssetOffsets) == 0 {
zb0004Len--
zb0004Mask |= 0x4
}
@@ -1024,13 +1397,13 @@ func (z *AssetsHoldingGroupData) MarshalMsg(b []byte) (o []byte) {
if (zb0004Mask & 0x4) == 0 { // if not empty
// string "ao"
o = append(o, 0xa2, 0x61, 0x6f)
- if (*z).AssetOffsets == nil {
+ if (*z).AssetsCommonGroupData.AssetOffsets == nil {
o = msgp.AppendNil(o)
} else {
- o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetOffsets)))
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetsCommonGroupData.AssetOffsets)))
}
- for zb0001 := range (*z).AssetOffsets {
- o = (*z).AssetOffsets[zb0001].MarshalMsg(o)
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ o = (*z).AssetsCommonGroupData.AssetOffsets[zb0001].MarshalMsg(o)
}
}
if (zb0004Mask & 0x8) == 0 { // if not empty
@@ -1082,14 +1455,14 @@ func (z *AssetsHoldingGroupData) UnmarshalMsg(bts []byte) (o []byte, err error)
return
}
if zb0007 {
- (*z).AssetOffsets = nil
- } else if (*z).AssetOffsets != nil && cap((*z).AssetOffsets) >= zb0006 {
- (*z).AssetOffsets = ((*z).AssetOffsets)[:zb0006]
+ (*z).AssetsCommonGroupData.AssetOffsets = nil
+ } else if (*z).AssetsCommonGroupData.AssetOffsets != nil && cap((*z).AssetsCommonGroupData.AssetOffsets) >= zb0006 {
+ (*z).AssetsCommonGroupData.AssetOffsets = ((*z).AssetsCommonGroupData.AssetOffsets)[:zb0006]
} else {
- (*z).AssetOffsets = make([]basics.AssetIndex, zb0006)
+ (*z).AssetsCommonGroupData.AssetOffsets = make([]basics.AssetIndex, zb0006)
}
- for zb0001 := range (*z).AssetOffsets {
- bts, err = (*z).AssetOffsets[zb0001].UnmarshalMsg(bts)
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ bts, err = (*z).AssetsCommonGroupData.AssetOffsets[zb0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "struct-from-array", "AssetOffsets", zb0001)
return
@@ -1191,14 +1564,14 @@ func (z *AssetsHoldingGroupData) UnmarshalMsg(bts []byte) (o []byte, err error)
return
}
if zb0013 {
- (*z).AssetOffsets = nil
- } else if (*z).AssetOffsets != nil && cap((*z).AssetOffsets) >= zb0012 {
- (*z).AssetOffsets = ((*z).AssetOffsets)[:zb0012]
+ (*z).AssetsCommonGroupData.AssetOffsets = nil
+ } else if (*z).AssetsCommonGroupData.AssetOffsets != nil && cap((*z).AssetsCommonGroupData.AssetOffsets) >= zb0012 {
+ (*z).AssetsCommonGroupData.AssetOffsets = ((*z).AssetsCommonGroupData.AssetOffsets)[:zb0012]
} else {
- (*z).AssetOffsets = make([]basics.AssetIndex, zb0012)
+ (*z).AssetsCommonGroupData.AssetOffsets = make([]basics.AssetIndex, zb0012)
}
- for zb0001 := range (*z).AssetOffsets {
- bts, err = (*z).AssetOffsets[zb0001].UnmarshalMsg(bts)
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ bts, err = (*z).AssetsCommonGroupData.AssetOffsets[zb0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "AssetOffsets", zb0001)
return
@@ -1279,8 +1652,8 @@ func (_ *AssetsHoldingGroupData) CanUnmarshalMsg(z interface{}) bool {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *AssetsHoldingGroupData) Msgsize() (s int) {
s = 1 + 3 + msgp.ArrayHeaderSize
- for zb0001 := range (*z).AssetOffsets {
- s += (*z).AssetOffsets[zb0001].Msgsize()
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ s += (*z).AssetsCommonGroupData.AssetOffsets[zb0001].Msgsize()
}
s += 2 + msgp.ArrayHeaderSize + (len((*z).Amounts) * (msgp.Uint64Size)) + 2 + msgp.ArrayHeaderSize + (len((*z).Frozens) * (msgp.BoolSize))
return
@@ -1288,7 +1661,1160 @@ func (z *AssetsHoldingGroupData) Msgsize() (s int) {
// MsgIsZero returns whether this is a zero value
func (z *AssetsHoldingGroupData) MsgIsZero() bool {
- return (len((*z).AssetOffsets) == 0) && (len((*z).Amounts) == 0) && (len((*z).Frozens) == 0)
+ return (len((*z).AssetsCommonGroupData.AssetOffsets) == 0) && (len((*z).Amounts) == 0) && (len((*z).Frozens) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *AssetsParamsGroup) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0001Len := uint32(4)
+ var zb0001Mask uint8 /* 7 bits */
+ if (*z).AssetGroupDesc.Count == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x2
+ }
+ if (*z).AssetGroupDesc.DeltaMaxAssetIndex == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x4
+ }
+ if (*z).AssetGroupDesc.AssetGroupKey == 0 {
+ zb0001Len--
+ zb0001Mask |= 0x10
+ }
+ if (*z).AssetGroupDesc.MinAssetIndex.MsgIsZero() {
+ zb0001Len--
+ zb0001Mask |= 0x40
+ }
+ // variable map header, size zb0001Len
+ o = append(o, 0x80|uint8(zb0001Len))
+ if zb0001Len != 0 {
+ if (zb0001Mask & 0x2) == 0 { // if not empty
+ // string "c"
+ o = append(o, 0xa1, 0x63)
+ o = msgp.AppendUint32(o, (*z).AssetGroupDesc.Count)
+ }
+ if (zb0001Mask & 0x4) == 0 { // if not empty
+ // string "d"
+ o = append(o, 0xa1, 0x64)
+ o = msgp.AppendUint64(o, (*z).AssetGroupDesc.DeltaMaxAssetIndex)
+ }
+ if (zb0001Mask & 0x10) == 0 { // if not empty
+ // string "k"
+ o = append(o, 0xa1, 0x6b)
+ o = msgp.AppendInt64(o, (*z).AssetGroupDesc.AssetGroupKey)
+ }
+ if (zb0001Mask & 0x40) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ o = (*z).AssetGroupDesc.MinAssetIndex.MarshalMsg(o)
+ }
+ }
+ return
+}
+
+func (_ *AssetsParamsGroup) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsParamsGroup)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *AssetsParamsGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0001 int
+ var zb0002 bool
+ zb0001, zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0001, zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).AssetGroupDesc.Count, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Count")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ bts, err = (*z).AssetGroupDesc.MinAssetIndex.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MinAssetIndex")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).AssetGroupDesc.DeltaMaxAssetIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "DeltaMaxAssetIndex")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ zb0001--
+ (*z).AssetGroupDesc.AssetGroupKey, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetGroupKey")
+ return
+ }
+ }
+ if zb0001 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0001)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 {
+ (*z) = AssetsParamsGroup{}
+ }
+ for zb0001 > 0 {
+ zb0001--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "c":
+ (*z).AssetGroupDesc.Count, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Count")
+ return
+ }
+ case "m":
+ bts, err = (*z).AssetGroupDesc.MinAssetIndex.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MinAssetIndex")
+ return
+ }
+ case "d":
+ (*z).AssetGroupDesc.DeltaMaxAssetIndex, bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DeltaMaxAssetIndex")
+ return
+ }
+ case "k":
+ (*z).AssetGroupDesc.AssetGroupKey, bts, err = msgp.ReadInt64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetGroupKey")
+ return
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *AssetsParamsGroup) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsParamsGroup)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *AssetsParamsGroup) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint32Size + 2 + (*z).AssetGroupDesc.MinAssetIndex.Msgsize() + 2 + msgp.Uint64Size + 2 + msgp.Int64Size
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *AssetsParamsGroup) MsgIsZero() bool {
+ return ((*z).AssetGroupDesc.Count == 0) && ((*z).AssetGroupDesc.MinAssetIndex.MsgIsZero()) && ((*z).AssetGroupDesc.DeltaMaxAssetIndex == 0) && ((*z).AssetGroupDesc.AssetGroupKey == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
+func (z *AssetsParamsGroupData) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0014Len := uint32(12)
+ var zb0014Mask uint16 /* 13 bits */
+ if len((*z).AssetsCommonGroupData.AssetOffsets) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x2
+ }
+ if len((*z).Clawbacks) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x4
+ }
+ if len((*z).Decimals) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x8
+ }
+ if len((*z).DefaultFrozens) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x10
+ }
+ if len((*z).MetadataHash) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x20
+ }
+ if len((*z).URLs) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x40
+ }
+ if len((*z).Managers) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x80
+ }
+ if len((*z).AssetNames) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x100
+ }
+ if len((*z).Reserves) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x200
+ }
+ if len((*z).Totals) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x400
+ }
+ if len((*z).UnitNames) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x800
+ }
+ if len((*z).Freezes) == 0 {
+ zb0014Len--
+ zb0014Mask |= 0x1000
+ }
+ // variable map header, size zb0014Len
+ o = append(o, 0x80|uint8(zb0014Len))
+ if zb0014Len != 0 {
+ if (zb0014Mask & 0x2) == 0 { // if not empty
+ // string "ao"
+ o = append(o, 0xa2, 0x61, 0x6f)
+ if (*z).AssetsCommonGroupData.AssetOffsets == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetsCommonGroupData.AssetOffsets)))
+ }
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ o = (*z).AssetsCommonGroupData.AssetOffsets[zb0001].MarshalMsg(o)
+ }
+ }
+ if (zb0014Mask & 0x4) == 0 { // if not empty
+ // string "c"
+ o = append(o, 0xa1, 0x63)
+ if (*z).Clawbacks == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Clawbacks)))
+ }
+ for zb0013 := range (*z).Clawbacks {
+ o = (*z).Clawbacks[zb0013].MarshalMsg(o)
+ }
+ }
+ if (zb0014Mask & 0x8) == 0 { // if not empty
+ // string "d"
+ o = append(o, 0xa1, 0x64)
+ if (*z).Decimals == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Decimals)))
+ }
+ for zb0003 := range (*z).Decimals {
+ o = msgp.AppendUint32(o, (*z).Decimals[zb0003])
+ }
+ }
+ if (zb0014Mask & 0x10) == 0 { // if not empty
+ // string "f"
+ o = append(o, 0xa1, 0x66)
+ if (*z).DefaultFrozens == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).DefaultFrozens)))
+ }
+ for zb0004 := range (*z).DefaultFrozens {
+ o = msgp.AppendBool(o, (*z).DefaultFrozens[zb0004])
+ }
+ }
+ if (zb0014Mask & 0x20) == 0 { // if not empty
+ // string "h"
+ o = append(o, 0xa1, 0x68)
+ if (*z).MetadataHash == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).MetadataHash)))
+ }
+ for zb0008 := range (*z).MetadataHash {
+ o = msgp.AppendBytes(o, ((*z).MetadataHash[zb0008])[:])
+ }
+ }
+ if (zb0014Mask & 0x40) == 0 { // if not empty
+ // string "l"
+ o = append(o, 0xa1, 0x6c)
+ if (*z).URLs == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).URLs)))
+ }
+ for zb0007 := range (*z).URLs {
+ o = msgp.AppendString(o, (*z).URLs[zb0007])
+ }
+ }
+ if (zb0014Mask & 0x80) == 0 { // if not empty
+ // string "m"
+ o = append(o, 0xa1, 0x6d)
+ if (*z).Managers == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Managers)))
+ }
+ for zb0010 := range (*z).Managers {
+ o = (*z).Managers[zb0010].MarshalMsg(o)
+ }
+ }
+ if (zb0014Mask & 0x100) == 0 { // if not empty
+ // string "n"
+ o = append(o, 0xa1, 0x6e)
+ if (*z).AssetNames == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).AssetNames)))
+ }
+ for zb0006 := range (*z).AssetNames {
+ o = msgp.AppendString(o, (*z).AssetNames[zb0006])
+ }
+ }
+ if (zb0014Mask & 0x200) == 0 { // if not empty
+ // string "r"
+ o = append(o, 0xa1, 0x72)
+ if (*z).Reserves == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Reserves)))
+ }
+ for zb0011 := range (*z).Reserves {
+ o = (*z).Reserves[zb0011].MarshalMsg(o)
+ }
+ }
+ if (zb0014Mask & 0x400) == 0 { // if not empty
+ // string "t"
+ o = append(o, 0xa1, 0x74)
+ if (*z).Totals == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Totals)))
+ }
+ for zb0002 := range (*z).Totals {
+ o = msgp.AppendUint64(o, (*z).Totals[zb0002])
+ }
+ }
+ if (zb0014Mask & 0x800) == 0 { // if not empty
+ // string "u"
+ o = append(o, 0xa1, 0x75)
+ if (*z).UnitNames == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).UnitNames)))
+ }
+ for zb0005 := range (*z).UnitNames {
+ o = msgp.AppendString(o, (*z).UnitNames[zb0005])
+ }
+ }
+ if (zb0014Mask & 0x1000) == 0 { // if not empty
+ // string "z"
+ o = append(o, 0xa1, 0x7a)
+ if (*z).Freezes == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Freezes)))
+ }
+ for zb0012 := range (*z).Freezes {
+ o = (*z).Freezes[zb0012].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *AssetsParamsGroupData) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsParamsGroupData)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *AssetsParamsGroupData) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0014 int
+ var zb0015 bool
+ zb0014, zb0015, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0014, zb0015, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0016 int
+ var zb0017 bool
+ zb0016, zb0017, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetOffsets")
+ return
+ }
+ if zb0016 > MaxHoldingGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0016), uint64(MaxHoldingGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "AssetOffsets")
+ return
+ }
+ if zb0017 {
+ (*z).AssetsCommonGroupData.AssetOffsets = nil
+ } else if (*z).AssetsCommonGroupData.AssetOffsets != nil && cap((*z).AssetsCommonGroupData.AssetOffsets) >= zb0016 {
+ (*z).AssetsCommonGroupData.AssetOffsets = ((*z).AssetsCommonGroupData.AssetOffsets)[:zb0016]
+ } else {
+ (*z).AssetsCommonGroupData.AssetOffsets = make([]basics.AssetIndex, zb0016)
+ }
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ bts, err = (*z).AssetsCommonGroupData.AssetOffsets[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetOffsets", zb0001)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0018 int
+ var zb0019 bool
+ zb0018, zb0019, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Totals")
+ return
+ }
+ if zb0018 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0018), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "Totals")
+ return
+ }
+ if zb0019 {
+ (*z).Totals = nil
+ } else if (*z).Totals != nil && cap((*z).Totals) >= zb0018 {
+ (*z).Totals = ((*z).Totals)[:zb0018]
+ } else {
+ (*z).Totals = make([]uint64, zb0018)
+ }
+ for zb0002 := range (*z).Totals {
+ (*z).Totals[zb0002], bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Totals", zb0002)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0020 int
+ var zb0021 bool
+ zb0020, zb0021, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Decimals")
+ return
+ }
+ if zb0020 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0020), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "Decimals")
+ return
+ }
+ if zb0021 {
+ (*z).Decimals = nil
+ } else if (*z).Decimals != nil && cap((*z).Decimals) >= zb0020 {
+ (*z).Decimals = ((*z).Decimals)[:zb0020]
+ } else {
+ (*z).Decimals = make([]uint32, zb0020)
+ }
+ for zb0003 := range (*z).Decimals {
+ (*z).Decimals[zb0003], bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Decimals", zb0003)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0022 int
+ var zb0023 bool
+ zb0022, zb0023, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "DefaultFrozens")
+ return
+ }
+ if zb0022 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0022), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "DefaultFrozens")
+ return
+ }
+ if zb0023 {
+ (*z).DefaultFrozens = nil
+ } else if (*z).DefaultFrozens != nil && cap((*z).DefaultFrozens) >= zb0022 {
+ (*z).DefaultFrozens = ((*z).DefaultFrozens)[:zb0022]
+ } else {
+ (*z).DefaultFrozens = make([]bool, zb0022)
+ }
+ for zb0004 := range (*z).DefaultFrozens {
+ (*z).DefaultFrozens[zb0004], bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "DefaultFrozens", zb0004)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0024 int
+ var zb0025 bool
+ zb0024, zb0025, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnitNames")
+ return
+ }
+ if zb0024 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0024), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "UnitNames")
+ return
+ }
+ if zb0025 {
+ (*z).UnitNames = nil
+ } else if (*z).UnitNames != nil && cap((*z).UnitNames) >= zb0024 {
+ (*z).UnitNames = ((*z).UnitNames)[:zb0024]
+ } else {
+ (*z).UnitNames = make([]string, zb0024)
+ }
+ for zb0005 := range (*z).UnitNames {
+ (*z).UnitNames[zb0005], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "UnitNames", zb0005)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0026 int
+ var zb0027 bool
+ zb0026, zb0027, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetNames")
+ return
+ }
+ if zb0026 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0026), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "AssetNames")
+ return
+ }
+ if zb0027 {
+ (*z).AssetNames = nil
+ } else if (*z).AssetNames != nil && cap((*z).AssetNames) >= zb0026 {
+ (*z).AssetNames = ((*z).AssetNames)[:zb0026]
+ } else {
+ (*z).AssetNames = make([]string, zb0026)
+ }
+ for zb0006 := range (*z).AssetNames {
+ (*z).AssetNames[zb0006], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "AssetNames", zb0006)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0028 int
+ var zb0029 bool
+ zb0028, zb0029, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "URLs")
+ return
+ }
+ if zb0028 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0028), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "URLs")
+ return
+ }
+ if zb0029 {
+ (*z).URLs = nil
+ } else if (*z).URLs != nil && cap((*z).URLs) >= zb0028 {
+ (*z).URLs = ((*z).URLs)[:zb0028]
+ } else {
+ (*z).URLs = make([]string, zb0028)
+ }
+ for zb0007 := range (*z).URLs {
+ (*z).URLs[zb0007], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "URLs", zb0007)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0030 int
+ var zb0031 bool
+ zb0030, zb0031, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
+ return
+ }
+ if zb0030 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0030), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "MetadataHash")
+ return
+ }
+ if zb0031 {
+ (*z).MetadataHash = nil
+ } else if (*z).MetadataHash != nil && cap((*z).MetadataHash) >= zb0030 {
+ (*z).MetadataHash = ((*z).MetadataHash)[:zb0030]
+ } else {
+ (*z).MetadataHash = make([][32]byte, zb0030)
+ }
+ for zb0008 := range (*z).MetadataHash {
+ bts, err = msgp.ReadExactBytes(bts, ((*z).MetadataHash[zb0008])[:])
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "MetadataHash", zb0008)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0032 int
+ var zb0033 bool
+ zb0032, zb0033, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Managers")
+ return
+ }
+ if zb0032 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0032), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "Managers")
+ return
+ }
+ if zb0033 {
+ (*z).Managers = nil
+ } else if (*z).Managers != nil && cap((*z).Managers) >= zb0032 {
+ (*z).Managers = ((*z).Managers)[:zb0032]
+ } else {
+ (*z).Managers = make([]basics.Address, zb0032)
+ }
+ for zb0010 := range (*z).Managers {
+ bts, err = (*z).Managers[zb0010].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Managers", zb0010)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0034 int
+ var zb0035 bool
+ zb0034, zb0035, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Reserves")
+ return
+ }
+ if zb0034 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0034), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "Reserves")
+ return
+ }
+ if zb0035 {
+ (*z).Reserves = nil
+ } else if (*z).Reserves != nil && cap((*z).Reserves) >= zb0034 {
+ (*z).Reserves = ((*z).Reserves)[:zb0034]
+ } else {
+ (*z).Reserves = make([]basics.Address, zb0034)
+ }
+ for zb0011 := range (*z).Reserves {
+ bts, err = (*z).Reserves[zb0011].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Reserves", zb0011)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0036 int
+ var zb0037 bool
+ zb0036, zb0037, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Freezes")
+ return
+ }
+ if zb0036 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0036), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "Freezes")
+ return
+ }
+ if zb0037 {
+ (*z).Freezes = nil
+ } else if (*z).Freezes != nil && cap((*z).Freezes) >= zb0036 {
+ (*z).Freezes = ((*z).Freezes)[:zb0036]
+ } else {
+ (*z).Freezes = make([]basics.Address, zb0036)
+ }
+ for zb0012 := range (*z).Freezes {
+ bts, err = (*z).Freezes[zb0012].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Freezes", zb0012)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ zb0014--
+ var zb0038 int
+ var zb0039 bool
+ zb0038, zb0039, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Clawbacks")
+ return
+ }
+ if zb0038 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0038), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "struct-from-array", "Clawbacks")
+ return
+ }
+ if zb0039 {
+ (*z).Clawbacks = nil
+ } else if (*z).Clawbacks != nil && cap((*z).Clawbacks) >= zb0038 {
+ (*z).Clawbacks = ((*z).Clawbacks)[:zb0038]
+ } else {
+ (*z).Clawbacks = make([]basics.Address, zb0038)
+ }
+ for zb0013 := range (*z).Clawbacks {
+ bts, err = (*z).Clawbacks[zb0013].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Clawbacks", zb0013)
+ return
+ }
+ }
+ }
+ if zb0014 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0014)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0015 {
+ (*z) = AssetsParamsGroupData{}
+ }
+ for zb0014 > 0 {
+ zb0014--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "ao":
+ var zb0040 int
+ var zb0041 bool
+ zb0040, zb0041, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetOffsets")
+ return
+ }
+ if zb0040 > MaxHoldingGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0040), uint64(MaxHoldingGroupSize))
+ err = msgp.WrapError(err, "AssetOffsets")
+ return
+ }
+ if zb0041 {
+ (*z).AssetsCommonGroupData.AssetOffsets = nil
+ } else if (*z).AssetsCommonGroupData.AssetOffsets != nil && cap((*z).AssetsCommonGroupData.AssetOffsets) >= zb0040 {
+ (*z).AssetsCommonGroupData.AssetOffsets = ((*z).AssetsCommonGroupData.AssetOffsets)[:zb0040]
+ } else {
+ (*z).AssetsCommonGroupData.AssetOffsets = make([]basics.AssetIndex, zb0040)
+ }
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ bts, err = (*z).AssetsCommonGroupData.AssetOffsets[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetOffsets", zb0001)
+ return
+ }
+ }
+ case "t":
+ var zb0042 int
+ var zb0043 bool
+ zb0042, zb0043, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Totals")
+ return
+ }
+ if zb0042 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0042), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "Totals")
+ return
+ }
+ if zb0043 {
+ (*z).Totals = nil
+ } else if (*z).Totals != nil && cap((*z).Totals) >= zb0042 {
+ (*z).Totals = ((*z).Totals)[:zb0042]
+ } else {
+ (*z).Totals = make([]uint64, zb0042)
+ }
+ for zb0002 := range (*z).Totals {
+ (*z).Totals[zb0002], bts, err = msgp.ReadUint64Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Totals", zb0002)
+ return
+ }
+ }
+ case "d":
+ var zb0044 int
+ var zb0045 bool
+ zb0044, zb0045, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Decimals")
+ return
+ }
+ if zb0044 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0044), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "Decimals")
+ return
+ }
+ if zb0045 {
+ (*z).Decimals = nil
+ } else if (*z).Decimals != nil && cap((*z).Decimals) >= zb0044 {
+ (*z).Decimals = ((*z).Decimals)[:zb0044]
+ } else {
+ (*z).Decimals = make([]uint32, zb0044)
+ }
+ for zb0003 := range (*z).Decimals {
+ (*z).Decimals[zb0003], bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Decimals", zb0003)
+ return
+ }
+ }
+ case "f":
+ var zb0046 int
+ var zb0047 bool
+ zb0046, zb0047, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DefaultFrozens")
+ return
+ }
+ if zb0046 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0046), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "DefaultFrozens")
+ return
+ }
+ if zb0047 {
+ (*z).DefaultFrozens = nil
+ } else if (*z).DefaultFrozens != nil && cap((*z).DefaultFrozens) >= zb0046 {
+ (*z).DefaultFrozens = ((*z).DefaultFrozens)[:zb0046]
+ } else {
+ (*z).DefaultFrozens = make([]bool, zb0046)
+ }
+ for zb0004 := range (*z).DefaultFrozens {
+ (*z).DefaultFrozens[zb0004], bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "DefaultFrozens", zb0004)
+ return
+ }
+ }
+ case "u":
+ var zb0048 int
+ var zb0049 bool
+ zb0048, zb0049, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnitNames")
+ return
+ }
+ if zb0048 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0048), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "UnitNames")
+ return
+ }
+ if zb0049 {
+ (*z).UnitNames = nil
+ } else if (*z).UnitNames != nil && cap((*z).UnitNames) >= zb0048 {
+ (*z).UnitNames = ((*z).UnitNames)[:zb0048]
+ } else {
+ (*z).UnitNames = make([]string, zb0048)
+ }
+ for zb0005 := range (*z).UnitNames {
+ (*z).UnitNames[zb0005], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "UnitNames", zb0005)
+ return
+ }
+ }
+ case "n":
+ var zb0050 int
+ var zb0051 bool
+ zb0050, zb0051, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetNames")
+ return
+ }
+ if zb0050 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0050), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "AssetNames")
+ return
+ }
+ if zb0051 {
+ (*z).AssetNames = nil
+ } else if (*z).AssetNames != nil && cap((*z).AssetNames) >= zb0050 {
+ (*z).AssetNames = ((*z).AssetNames)[:zb0050]
+ } else {
+ (*z).AssetNames = make([]string, zb0050)
+ }
+ for zb0006 := range (*z).AssetNames {
+ (*z).AssetNames[zb0006], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "AssetNames", zb0006)
+ return
+ }
+ }
+ case "l":
+ var zb0052 int
+ var zb0053 bool
+ zb0052, zb0053, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "URLs")
+ return
+ }
+ if zb0052 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0052), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "URLs")
+ return
+ }
+ if zb0053 {
+ (*z).URLs = nil
+ } else if (*z).URLs != nil && cap((*z).URLs) >= zb0052 {
+ (*z).URLs = ((*z).URLs)[:zb0052]
+ } else {
+ (*z).URLs = make([]string, zb0052)
+ }
+ for zb0007 := range (*z).URLs {
+ (*z).URLs[zb0007], bts, err = msgp.ReadStringBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "URLs", zb0007)
+ return
+ }
+ }
+ case "h":
+ var zb0054 int
+ var zb0055 bool
+ zb0054, zb0055, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "MetadataHash")
+ return
+ }
+ if zb0054 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0054), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "MetadataHash")
+ return
+ }
+ if zb0055 {
+ (*z).MetadataHash = nil
+ } else if (*z).MetadataHash != nil && cap((*z).MetadataHash) >= zb0054 {
+ (*z).MetadataHash = ((*z).MetadataHash)[:zb0054]
+ } else {
+ (*z).MetadataHash = make([][32]byte, zb0054)
+ }
+ for zb0008 := range (*z).MetadataHash {
+ bts, err = msgp.ReadExactBytes(bts, ((*z).MetadataHash[zb0008])[:])
+ if err != nil {
+ err = msgp.WrapError(err, "MetadataHash", zb0008)
+ return
+ }
+ }
+ case "m":
+ var zb0056 int
+ var zb0057 bool
+ zb0056, zb0057, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Managers")
+ return
+ }
+ if zb0056 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0056), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "Managers")
+ return
+ }
+ if zb0057 {
+ (*z).Managers = nil
+ } else if (*z).Managers != nil && cap((*z).Managers) >= zb0056 {
+ (*z).Managers = ((*z).Managers)[:zb0056]
+ } else {
+ (*z).Managers = make([]basics.Address, zb0056)
+ }
+ for zb0010 := range (*z).Managers {
+ bts, err = (*z).Managers[zb0010].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Managers", zb0010)
+ return
+ }
+ }
+ case "r":
+ var zb0058 int
+ var zb0059 bool
+ zb0058, zb0059, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Reserves")
+ return
+ }
+ if zb0058 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0058), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "Reserves")
+ return
+ }
+ if zb0059 {
+ (*z).Reserves = nil
+ } else if (*z).Reserves != nil && cap((*z).Reserves) >= zb0058 {
+ (*z).Reserves = ((*z).Reserves)[:zb0058]
+ } else {
+ (*z).Reserves = make([]basics.Address, zb0058)
+ }
+ for zb0011 := range (*z).Reserves {
+ bts, err = (*z).Reserves[zb0011].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Reserves", zb0011)
+ return
+ }
+ }
+ case "z":
+ var zb0060 int
+ var zb0061 bool
+ zb0060, zb0061, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Freezes")
+ return
+ }
+ if zb0060 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0060), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "Freezes")
+ return
+ }
+ if zb0061 {
+ (*z).Freezes = nil
+ } else if (*z).Freezes != nil && cap((*z).Freezes) >= zb0060 {
+ (*z).Freezes = ((*z).Freezes)[:zb0060]
+ } else {
+ (*z).Freezes = make([]basics.Address, zb0060)
+ }
+ for zb0012 := range (*z).Freezes {
+ bts, err = (*z).Freezes[zb0012].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Freezes", zb0012)
+ return
+ }
+ }
+ case "c":
+ var zb0062 int
+ var zb0063 bool
+ zb0062, zb0063, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Clawbacks")
+ return
+ }
+ if zb0062 > MaxParamsGroupSize {
+ err = msgp.ErrOverflow(uint64(zb0062), uint64(MaxParamsGroupSize))
+ err = msgp.WrapError(err, "Clawbacks")
+ return
+ }
+ if zb0063 {
+ (*z).Clawbacks = nil
+ } else if (*z).Clawbacks != nil && cap((*z).Clawbacks) >= zb0062 {
+ (*z).Clawbacks = ((*z).Clawbacks)[:zb0062]
+ } else {
+ (*z).Clawbacks = make([]basics.Address, zb0062)
+ }
+ for zb0013 := range (*z).Clawbacks {
+ bts, err = (*z).Clawbacks[zb0013].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Clawbacks", zb0013)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *AssetsParamsGroupData) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*AssetsParamsGroupData)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *AssetsParamsGroupData) Msgsize() (s int) {
+ s = 1 + 3 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).AssetsCommonGroupData.AssetOffsets {
+ s += (*z).AssetsCommonGroupData.AssetOffsets[zb0001].Msgsize()
+ }
+ s += 2 + msgp.ArrayHeaderSize + (len((*z).Totals) * (msgp.Uint64Size)) + 2 + msgp.ArrayHeaderSize + (len((*z).Decimals) * (msgp.Uint32Size)) + 2 + msgp.ArrayHeaderSize + (len((*z).DefaultFrozens) * (msgp.BoolSize)) + 2 + msgp.ArrayHeaderSize
+ for zb0005 := range (*z).UnitNames {
+ s += msgp.StringPrefixSize + len((*z).UnitNames[zb0005])
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0006 := range (*z).AssetNames {
+ s += msgp.StringPrefixSize + len((*z).AssetNames[zb0006])
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0007 := range (*z).URLs {
+ s += msgp.StringPrefixSize + len((*z).URLs[zb0007])
+ }
+ s += 2 + msgp.ArrayHeaderSize + (len((*z).MetadataHash) * (32 * (msgp.ByteSize))) + 2 + msgp.ArrayHeaderSize
+ for zb0010 := range (*z).Managers {
+ s += (*z).Managers[zb0010].Msgsize()
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0011 := range (*z).Reserves {
+ s += (*z).Reserves[zb0011].Msgsize()
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0012 := range (*z).Freezes {
+ s += (*z).Freezes[zb0012].Msgsize()
+ }
+ s += 2 + msgp.ArrayHeaderSize
+ for zb0013 := range (*z).Clawbacks {
+ s += (*z).Clawbacks[zb0013].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *AssetsParamsGroupData) MsgIsZero() bool {
+ return (len((*z).AssetsCommonGroupData.AssetOffsets) == 0) && (len((*z).Totals) == 0) && (len((*z).Decimals) == 0) && (len((*z).DefaultFrozens) == 0) && (len((*z).UnitNames) == 0) && (len((*z).AssetNames) == 0) && (len((*z).URLs) == 0) && (len((*z).MetadataHash) == 0) && (len((*z).Managers) == 0) && (len((*z).Reserves) == 0) && (len((*z).Freezes) == 0) && (len((*z).Clawbacks) == 0)
}
// MarshalMsg implements msgp.Marshaler
@@ -1473,11 +2999,192 @@ func (z *ExtendedAssetHolding) MsgIsZero() bool {
}
// MarshalMsg implements msgp.Marshaler
+func (z *ExtendedAssetParams) MarshalMsg(b []byte) (o []byte) {
+ o = msgp.Require(b, z.Msgsize())
+ // omitempty: check for empty values
+ zb0002Len := uint32(2)
+ var zb0002Mask uint8 /* 3 bits */
+ if (*z).Count == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x2
+ }
+ if len((*z).Groups) == 0 {
+ zb0002Len--
+ zb0002Mask |= 0x4
+ }
+ // variable map header, size zb0002Len
+ o = append(o, 0x80|uint8(zb0002Len))
+ if zb0002Len != 0 {
+ if (zb0002Mask & 0x2) == 0 { // if not empty
+ // string "c"
+ o = append(o, 0xa1, 0x63)
+ o = msgp.AppendUint32(o, (*z).Count)
+ }
+ if (zb0002Mask & 0x4) == 0 { // if not empty
+ // string "gs"
+ o = append(o, 0xa2, 0x67, 0x73)
+ if (*z).Groups == nil {
+ o = msgp.AppendNil(o)
+ } else {
+ o = msgp.AppendArrayHeader(o, uint32(len((*z).Groups)))
+ }
+ for zb0001 := range (*z).Groups {
+ o = (*z).Groups[zb0001].MarshalMsg(o)
+ }
+ }
+ }
+ return
+}
+
+func (_ *ExtendedAssetParams) CanMarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ExtendedAssetParams)
+ return ok
+}
+
+// UnmarshalMsg implements msgp.Unmarshaler
+func (z *ExtendedAssetParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
+ var field []byte
+ _ = field
+ var zb0002 int
+ var zb0003 bool
+ zb0002, zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
+ if _, ok := err.(msgp.TypeError); ok {
+ zb0002, zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0002 > 0 {
+ zb0002--
+ (*z).Count, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Count")
+ return
+ }
+ }
+ if zb0002 > 0 {
+ zb0002--
+ var zb0004 int
+ var zb0005 bool
+ zb0004, zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Groups")
+ return
+ }
+ if zb0004 > 4096 {
+ err = msgp.ErrOverflow(uint64(zb0004), uint64(4096))
+ err = msgp.WrapError(err, "struct-from-array", "Groups")
+ return
+ }
+ if zb0005 {
+ (*z).Groups = nil
+ } else if (*z).Groups != nil && cap((*z).Groups) >= zb0004 {
+ (*z).Groups = ((*z).Groups)[:zb0004]
+ } else {
+ (*z).Groups = make([]AssetsParamsGroup, zb0004)
+ }
+ for zb0001 := range (*z).Groups {
+ bts, err = (*z).Groups[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "Groups", zb0001)
+ return
+ }
+ }
+ }
+ if zb0002 > 0 {
+ err = msgp.ErrTooManyArrayFields(zb0002)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array")
+ return
+ }
+ }
+ } else {
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ if zb0003 {
+ (*z) = ExtendedAssetParams{}
+ }
+ for zb0002 > 0 {
+ zb0002--
+ field, bts, err = msgp.ReadMapKeyZC(bts)
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ switch string(field) {
+ case "c":
+ (*z).Count, bts, err = msgp.ReadUint32Bytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Count")
+ return
+ }
+ case "gs":
+ var zb0006 int
+ var zb0007 bool
+ zb0006, zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Groups")
+ return
+ }
+ if zb0006 > 4096 {
+ err = msgp.ErrOverflow(uint64(zb0006), uint64(4096))
+ err = msgp.WrapError(err, "Groups")
+ return
+ }
+ if zb0007 {
+ (*z).Groups = nil
+ } else if (*z).Groups != nil && cap((*z).Groups) >= zb0006 {
+ (*z).Groups = ((*z).Groups)[:zb0006]
+ } else {
+ (*z).Groups = make([]AssetsParamsGroup, zb0006)
+ }
+ for zb0001 := range (*z).Groups {
+ bts, err = (*z).Groups[zb0001].UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "Groups", zb0001)
+ return
+ }
+ }
+ default:
+ err = msgp.ErrNoField(string(field))
+ if err != nil {
+ err = msgp.WrapError(err)
+ return
+ }
+ }
+ }
+ }
+ o = bts
+ return
+}
+
+func (_ *ExtendedAssetParams) CanUnmarshalMsg(z interface{}) bool {
+ _, ok := (z).(*ExtendedAssetParams)
+ return ok
+}
+
+// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
+func (z *ExtendedAssetParams) Msgsize() (s int) {
+ s = 1 + 2 + msgp.Uint32Size + 3 + msgp.ArrayHeaderSize
+ for zb0001 := range (*z).Groups {
+ s += (*z).Groups[zb0001].Msgsize()
+ }
+ return
+}
+
+// MsgIsZero returns whether this is a zero value
+func (z *ExtendedAssetParams) MsgIsZero() bool {
+ return ((*z).Count == 0) && (len((*z).Groups) == 0)
+}
+
+// MarshalMsg implements msgp.Marshaler
func (z *PersistedAccountData) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0009Len := uint32(17)
- var zb0009Mask uint32 /* 19 bits */
+ zb0009Len := uint32(18)
+ var zb0009Mask uint32 /* 20 bits */
if (*z).AccountData.MicroAlgos.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x4
@@ -1502,50 +3209,54 @@ func (z *PersistedAccountData) MarshalMsg(b []byte) (o []byte) {
zb0009Len--
zb0009Mask |= 0x80
}
- if (*z).AccountData.RewardsBase == 0 {
+ if (*z).ExtendedAssetParams.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x100
}
- if (*z).AccountData.RewardedMicroAlgos.MsgIsZero() {
+ if (*z).AccountData.RewardsBase == 0 {
zb0009Len--
zb0009Mask |= 0x200
}
- if (*z).AccountData.Status.MsgIsZero() {
+ if (*z).AccountData.RewardedMicroAlgos.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x400
}
- if (*z).AccountData.SelectionID.MsgIsZero() {
+ if (*z).AccountData.Status.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x800
}
- if (*z).AccountData.AuthAddr.MsgIsZero() {
+ if (*z).AccountData.SelectionID.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x1000
}
- if (*z).AccountData.TotalExtraAppPages == 0 {
+ if (*z).AccountData.AuthAddr.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x2000
}
- if (*z).AccountData.TotalAppSchema.MsgIsZero() {
+ if (*z).AccountData.TotalExtraAppPages == 0 {
zb0009Len--
zb0009Mask |= 0x4000
}
- if (*z).AccountData.VoteID.MsgIsZero() {
+ if (*z).AccountData.TotalAppSchema.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x8000
}
- if (*z).AccountData.VoteFirstValid.MsgIsZero() {
+ if (*z).AccountData.VoteID.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x10000
}
- if (*z).AccountData.VoteKeyDilution == 0 {
+ if (*z).AccountData.VoteFirstValid.MsgIsZero() {
zb0009Len--
zb0009Mask |= 0x20000
}
- if (*z).AccountData.VoteLastValid.MsgIsZero() {
+ if (*z).AccountData.VoteKeyDilution == 0 {
zb0009Len--
zb0009Mask |= 0x40000
}
+ if (*z).AccountData.VoteLastValid.MsgIsZero() {
+ zb0009Len--
+ zb0009Mask |= 0x80000
+ }
// variable map header, size zb0009Len
o = msgp.AppendMapHeader(o, zb0009Len)
if zb0009Len != 0 {
@@ -1635,61 +3346,66 @@ func (z *PersistedAccountData) MarshalMsg(b []byte) (o []byte) {
}
}
if (zb0009Mask & 0x80) == 0 { // if not empty
- // string "eash"
- o = append(o, 0xa4, 0x65, 0x61, 0x73, 0x68)
+ // string "eah"
+ o = append(o, 0xa3, 0x65, 0x61, 0x68)
o = (*z).ExtendedAssetHolding.MarshalMsg(o)
}
if (zb0009Mask & 0x100) == 0 { // if not empty
+ // string "eap"
+ o = append(o, 0xa3, 0x65, 0x61, 0x70)
+ o = (*z).ExtendedAssetParams.MarshalMsg(o)
+ }
+ if (zb0009Mask & 0x200) == 0 { // if not empty
// string "ebase"
o = append(o, 0xa5, 0x65, 0x62, 0x61, 0x73, 0x65)
o = msgp.AppendUint64(o, (*z).AccountData.RewardsBase)
}
- if (zb0009Mask & 0x200) == 0 { // if not empty
+ if (zb0009Mask & 0x400) == 0 { // if not empty
// string "ern"
o = append(o, 0xa3, 0x65, 0x72, 0x6e)
o = (*z).AccountData.RewardedMicroAlgos.MarshalMsg(o)
}
- if (zb0009Mask & 0x400) == 0 { // if not empty
+ if (zb0009Mask & 0x800) == 0 { // if not empty
// string "onl"
o = append(o, 0xa3, 0x6f, 0x6e, 0x6c)
o = (*z).AccountData.Status.MarshalMsg(o)
}
- if (zb0009Mask & 0x800) == 0 { // if not empty
+ if (zb0009Mask & 0x1000) == 0 { // if not empty
// string "sel"
o = append(o, 0xa3, 0x73, 0x65, 0x6c)
o = (*z).AccountData.SelectionID.MarshalMsg(o)
}
- if (zb0009Mask & 0x1000) == 0 { // if not empty
+ if (zb0009Mask & 0x2000) == 0 { // if not empty
// string "spend"
o = append(o, 0xa5, 0x73, 0x70, 0x65, 0x6e, 0x64)
o = (*z).AccountData.AuthAddr.MarshalMsg(o)
}
- if (zb0009Mask & 0x2000) == 0 { // if not empty
+ if (zb0009Mask & 0x4000) == 0 { // if not empty
// string "teap"
o = append(o, 0xa4, 0x74, 0x65, 0x61, 0x70)
o = msgp.AppendUint32(o, (*z).AccountData.TotalExtraAppPages)
}
- if (zb0009Mask & 0x4000) == 0 { // if not empty
+ if (zb0009Mask & 0x8000) == 0 { // if not empty
// string "tsch"
o = append(o, 0xa4, 0x74, 0x73, 0x63, 0x68)
o = (*z).AccountData.TotalAppSchema.MarshalMsg(o)
}
- if (zb0009Mask & 0x8000) == 0 { // if not empty
+ if (zb0009Mask & 0x10000) == 0 { // if not empty
// string "vote"
o = append(o, 0xa4, 0x76, 0x6f, 0x74, 0x65)
o = (*z).AccountData.VoteID.MarshalMsg(o)
}
- if (zb0009Mask & 0x10000) == 0 { // if not empty
+ if (zb0009Mask & 0x20000) == 0 { // if not empty
// string "voteFst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x46, 0x73, 0x74)
o = (*z).AccountData.VoteFirstValid.MarshalMsg(o)
}
- if (zb0009Mask & 0x20000) == 0 { // if not empty
+ if (zb0009Mask & 0x40000) == 0 { // if not empty
// string "voteKD"
o = append(o, 0xa6, 0x76, 0x6f, 0x74, 0x65, 0x4b, 0x44)
o = msgp.AppendUint64(o, (*z).AccountData.VoteKeyDilution)
}
- if (zb0009Mask & 0x40000) == 0 { // if not empty
+ if (zb0009Mask & 0x80000) == 0 { // if not empty
// string "voteLst"
o = append(o, 0xa7, 0x76, 0x6f, 0x74, 0x65, 0x4c, 0x73, 0x74)
o = (*z).AccountData.VoteLastValid.MarshalMsg(o)
@@ -1965,6 +3681,14 @@ func (z *PersistedAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
}
if zb0009 > 0 {
+ zb0009--
+ bts, err = (*z).ExtendedAssetParams.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExtendedAssetParams")
+ return
+ }
+ }
+ if zb0009 > 0 {
err = msgp.ErrTooManyArrayFields(zb0009)
if err != nil {
err = msgp.WrapError(err, "struct-from-array")
@@ -2195,12 +3919,18 @@ func (z *PersistedAccountData) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "TotalExtraAppPages")
return
}
- case "eash":
+ case "eah":
bts, err = (*z).ExtendedAssetHolding.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "ExtendedAssetHolding")
return
}
+ case "eap":
+ bts, err = (*z).ExtendedAssetParams.UnmarshalMsg(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExtendedAssetParams")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -2253,11 +3983,11 @@ func (z *PersistedAccountData) Msgsize() (s int) {
s += 0 + zb0007.Msgsize() + zb0008.Msgsize()
}
}
- s += 5 + (*z).AccountData.TotalAppSchema.Msgsize() + 5 + msgp.Uint32Size + 5 + (*z).ExtendedAssetHolding.Msgsize()
+ s += 5 + (*z).AccountData.TotalAppSchema.Msgsize() + 5 + msgp.Uint32Size + 4 + (*z).ExtendedAssetHolding.Msgsize() + 4 + (*z).ExtendedAssetParams.Msgsize()
return
}
// MsgIsZero returns whether this is a zero value
func (z *PersistedAccountData) MsgIsZero() bool {
- return ((*z).AccountData.Status.MsgIsZero()) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid.MsgIsZero()) && ((*z).AccountData.VoteLastValid.MsgIsZero()) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && ((*z).AccountData.TotalAppSchema.MsgIsZero()) && ((*z).AccountData.TotalExtraAppPages == 0) && ((*z).ExtendedAssetHolding.MsgIsZero())
+ return ((*z).AccountData.Status.MsgIsZero()) && ((*z).AccountData.MicroAlgos.MsgIsZero()) && ((*z).AccountData.RewardsBase == 0) && ((*z).AccountData.RewardedMicroAlgos.MsgIsZero()) && ((*z).AccountData.VoteID.MsgIsZero()) && ((*z).AccountData.SelectionID.MsgIsZero()) && ((*z).AccountData.VoteFirstValid.MsgIsZero()) && ((*z).AccountData.VoteLastValid.MsgIsZero()) && ((*z).AccountData.VoteKeyDilution == 0) && (len((*z).AccountData.AssetParams) == 0) && (len((*z).AccountData.Assets) == 0) && ((*z).AccountData.AuthAddr.MsgIsZero()) && (len((*z).AccountData.AppLocalStates) == 0) && (len((*z).AccountData.AppParams) == 0) && ((*z).AccountData.TotalAppSchema.MsgIsZero()) && ((*z).AccountData.TotalExtraAppPages == 0) && ((*z).ExtendedAssetHolding.MsgIsZero()) && ((*z).ExtendedAssetParams.MsgIsZero())
}
diff --git a/ledger/ledgercore/msgp_gen_test.go b/ledger/ledgercore/msgp_gen_test.go
index d2742bb41..9526b81b5 100644
--- a/ledger/ledgercore/msgp_gen_test.go
+++ b/ledger/ledgercore/msgp_gen_test.go
@@ -129,6 +129,124 @@ func BenchmarkUnmarshalAlgoCount(b *testing.B) {
}
}
+func TestMarshalUnmarshalAssetGroupDesc(t *testing.T) {
+ v := AssetGroupDesc{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingAssetGroupDesc(t *testing.T) {
+ protocol.RunEncodingTest(t, &AssetGroupDesc{})
+}
+
+func BenchmarkMarshalMsgAssetGroupDesc(b *testing.B) {
+ v := AssetGroupDesc{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgAssetGroupDesc(b *testing.B) {
+ v := AssetGroupDesc{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalAssetGroupDesc(b *testing.B) {
+ v := AssetGroupDesc{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalAssetsCommonGroupData(t *testing.T) {
+ v := AssetsCommonGroupData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingAssetsCommonGroupData(t *testing.T) {
+ protocol.RunEncodingTest(t, &AssetsCommonGroupData{})
+}
+
+func BenchmarkMarshalMsgAssetsCommonGroupData(b *testing.B) {
+ v := AssetsCommonGroupData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgAssetsCommonGroupData(b *testing.B) {
+ v := AssetsCommonGroupData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalAssetsCommonGroupData(b *testing.B) {
+ v := AssetsCommonGroupData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalAssetsHoldingGroup(t *testing.T) {
v := AssetsHoldingGroup{}
bts := v.MarshalMsg(nil)
@@ -247,6 +365,124 @@ func BenchmarkUnmarshalAssetsHoldingGroupData(b *testing.B) {
}
}
+func TestMarshalUnmarshalAssetsParamsGroup(t *testing.T) {
+ v := AssetsParamsGroup{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingAssetsParamsGroup(t *testing.T) {
+ protocol.RunEncodingTest(t, &AssetsParamsGroup{})
+}
+
+func BenchmarkMarshalMsgAssetsParamsGroup(b *testing.B) {
+ v := AssetsParamsGroup{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgAssetsParamsGroup(b *testing.B) {
+ v := AssetsParamsGroup{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalAssetsParamsGroup(b *testing.B) {
+ v := AssetsParamsGroup{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
+func TestMarshalUnmarshalAssetsParamsGroupData(t *testing.T) {
+ v := AssetsParamsGroupData{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingAssetsParamsGroupData(t *testing.T) {
+ protocol.RunEncodingTest(t, &AssetsParamsGroupData{})
+}
+
+func BenchmarkMarshalMsgAssetsParamsGroupData(b *testing.B) {
+ v := AssetsParamsGroupData{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgAssetsParamsGroupData(b *testing.B) {
+ v := AssetsParamsGroupData{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalAssetsParamsGroupData(b *testing.B) {
+ v := AssetsParamsGroupData{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalExtendedAssetHolding(t *testing.T) {
v := ExtendedAssetHolding{}
bts := v.MarshalMsg(nil)
@@ -306,6 +542,65 @@ func BenchmarkUnmarshalExtendedAssetHolding(b *testing.B) {
}
}
+func TestMarshalUnmarshalExtendedAssetParams(t *testing.T) {
+ v := ExtendedAssetParams{}
+ bts := v.MarshalMsg(nil)
+ left, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
+ }
+
+ left, err = msgp.Skip(bts)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(left) > 0 {
+ t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
+ }
+}
+
+func TestRandomizedEncodingExtendedAssetParams(t *testing.T) {
+ protocol.RunEncodingTest(t, &ExtendedAssetParams{})
+}
+
+func BenchmarkMarshalMsgExtendedAssetParams(b *testing.B) {
+ v := ExtendedAssetParams{}
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ v.MarshalMsg(nil)
+ }
+}
+
+func BenchmarkAppendMsgExtendedAssetParams(b *testing.B) {
+ v := ExtendedAssetParams{}
+ bts := make([]byte, 0, v.Msgsize())
+ bts = v.MarshalMsg(bts[0:0])
+ b.SetBytes(int64(len(bts)))
+ b.ReportAllocs()
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ bts = v.MarshalMsg(bts[0:0])
+ }
+}
+
+func BenchmarkUnmarshalExtendedAssetParams(b *testing.B) {
+ v := ExtendedAssetParams{}
+ bts := v.MarshalMsg(nil)
+ b.ReportAllocs()
+ b.SetBytes(int64(len(bts)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ _, err := v.UnmarshalMsg(bts)
+ if err != nil {
+ b.Fatal(err)
+ }
+ }
+}
+
func TestMarshalUnmarshalPersistedAccountData(t *testing.T) {
v := PersistedAccountData{}
bts := v.MarshalMsg(nil)
diff --git a/ledger/ledgercore/persistedacctdata.go b/ledger/ledgercore/persistedacctdata.go
index f0b536933..3887525c8 100644
--- a/ledger/ledgercore/persistedacctdata.go
+++ b/ledger/ledgercore/persistedacctdata.go
@@ -17,18 +17,21 @@
package ledgercore
import (
+ "fmt"
"sort"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
)
-// MaxHoldingGroupSize specifies maximum size of AssetsHoldingGroup
-const MaxHoldingGroupSize = 256
+// MaxHoldingGroupSize specifies maximum number of entries in AssetsHoldingGroup.groupData
+const MaxHoldingGroupSize = 256 // 256 entries take approx 3473 bytes
-// AssetsHoldingGroup is a metadata for asset group data (AssetsHoldingGroupData)
-// that is stored separately
-type AssetsHoldingGroup struct {
+// MaxParamsGroupSize specifies maximum number of entries in AssetsParamsGroup.groupData
+const MaxParamsGroupSize = 14 // 14 entries take approx 3665 bytes
+
+// AssetGroupDesc is asset group descriptor
+type AssetGroupDesc struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// assets count in the group
@@ -43,41 +46,82 @@ type AssetsHoldingGroup struct {
// A foreign key to the accountext table to the appropriate AssetsHoldingGroupData entry
// AssetGroupKey is 0 for newly created entries and filled after persisting to DB
AssetGroupKey int64 `codec:"k"`
+}
+// AssetsHoldingGroup is a metadata for asset group data (AssetsHoldingGroupData)
+// that is stored separately
+type AssetsHoldingGroup struct {
+ AssetGroupDesc
// groupData is an actual group data
groupData AssetsHoldingGroupData
+ // loaded indicates either groupData loaded or not
+ loaded bool
+}
+// AssetsParamsGroup is a metadata for asset group data (AssetsParamsGroupData)
+// that is stored separately
+type AssetsParamsGroup struct {
+ AssetGroupDesc
+ // groupData is an actual group data
+ groupData AssetsParamsGroupData
// loaded indicates either groupData loaded or not
loaded bool
}
-// AssetsHoldingGroupData is an actual asset holding data
-type AssetsHoldingGroupData struct {
+// AssetsCommonGroupData is common data type for Holding and Params storing AssetOffsets data
+type AssetsCommonGroupData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
// offset relative to MinAssetIndex and differential afterward
- // assetId1 = AmountsAssetIndicesOffsets[0] + MinAssetIndex and assetIdx1 == MinAssetIndex
- // assetId2 = AmountsAssetIndicesOffsets[1] + assetIdx1
- // assetId3 = AmountsAssetIndicesOffsets[2] + assetIdx2
- AssetOffsets []basics.AssetIndex `codec:"ao,allocbound=MaxHoldingGroupSize"`
+ // assetId1 = AssetOffsets[0] + MinAssetIndex and assetIdx1 == MinAssetIndex
+ // assetId2 = AssetOffsets[1] + assetIdx1
+ // assetId3 = AssetOffsets[2] + assetIdx2
+ AssetOffsets []basics.AssetIndex `codec:"ao,omitemptyarray,allocbound=MaxHoldingGroupSize"`
+}
- // Holding amount
- // same number of elements as in AmountsAssetIndicesOffsets
- Amounts []uint64 `codec:"a,allocbound=MaxHoldingGroupSize"`
+// AssetsParamsGroupData is an actual asset param data
+type AssetsParamsGroupData struct {
+ AssetsCommonGroupData
+
+ // same number of elements as in AssetOffsets
+ Totals []uint64 `codec:"t,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ Decimals []uint32 `codec:"d,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ DefaultFrozens []bool `codec:"f,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ UnitNames []string `codec:"u,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ AssetNames []string `codec:"n,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ URLs []string `codec:"l,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ MetadataHash [][32]byte `codec:"h,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ Managers []basics.Address `codec:"m,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ Reserves []basics.Address `codec:"r,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ Freezes []basics.Address `codec:"z,omitemptyarray,allocbound=MaxParamsGroupSize"`
+ Clawbacks []basics.Address `codec:"c,omitemptyarray,allocbound=MaxParamsGroupSize"`
+}
- // Holding "frozen" flag
- // same number of elements as in AmountsAssetIndicesOffsets
- Frozens []bool `codec:"f,allocbound=MaxHoldingGroupSize"`
+// AssetsHoldingGroupData is an actual asset holding data
+type AssetsHoldingGroupData struct {
+ AssetsCommonGroupData
+
+ // same number of elements as in AssetOffsets
+ Amounts []uint64 `codec:"a,omitemptyarray,allocbound=MaxHoldingGroupSize"`
+ Frozens []bool `codec:"f,omitemptyarray,allocbound=MaxHoldingGroupSize"`
}
const maxEncodedGroupsSize = 4096
-// ExtendedAssetHolding is AccountData's extension for storing asset holdings
+// ExtendedAssetHolding is AccountData's extension for storing asset holding
type ExtendedAssetHolding struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
Count uint32 `codec:"c"`
- Groups []AssetsHoldingGroup `codec:"gs,allocbound=maxEncodedGroupsSize"` // 1M asset holdings
+ Groups []AssetsHoldingGroup `codec:"gs,omitemptyarray,allocbound=maxEncodedGroupsSize"` // 1M holdings
+}
+
+// ExtendedAssetParams is AccountData's extension for storing asset params
+type ExtendedAssetParams struct {
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+
+ Count uint32 `codec:"c"`
+ Groups []AssetsParamsGroup `codec:"gs,omitemptyarray,allocbound=maxEncodedGroupsSize"` // TODO, 1M params
}
// PersistedAccountData represents actual data stored in DB
@@ -85,7 +129,8 @@ type PersistedAccountData struct {
_struct struct{} `codec:",omitempty,omitemptyarray"`
basics.AccountData
- ExtendedAssetHolding ExtendedAssetHolding `codec:"eash"`
+ ExtendedAssetHolding ExtendedAssetHolding `codec:"eah,omitempty"`
+ ExtendedAssetParams ExtendedAssetParams `codec:"eap,omitempty"`
}
// SortAssetIndex is a copy from data/basics/sort.go
@@ -106,6 +151,86 @@ func (a SortAppIndex) Len() int { return len(a) }
func (a SortAppIndex) Less(i, j int) bool { return a[i] < a[j] }
func (a SortAppIndex) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+// AbstractAssetGroupData abstacts common properties for Holding and Params group data
+type AbstractAssetGroupData interface {
+ Find(aidx basics.AssetIndex, base basics.AssetIndex) int
+ AssetDeltaValue(ai int) basics.AssetIndex
+}
+
+// AbstractAssetGroup represets interface for Holding and Params group
+type AbstractAssetGroup interface {
+ MinAsset() basics.AssetIndex
+ MaxAsset() basics.AssetIndex
+ HasSpace() bool
+ Loaded() bool
+ GroupData() AbstractAssetGroupData
+ AssetCount() uint32
+ AssetAt(ai int) basics.AssetIndex
+ Update(ai int, data interface{})
+ Encode() []byte
+ Key() int64
+ SetKey(key int64)
+ Reset()
+ // Fetch loads group data using fetcher into a map provided assets argument
+ Fetch(fetcher func(int64) ([]byte, basics.Round, error), assets interface{}) (basics.Round, error)
+
+ delete(ai int)
+ slice(pos uint32, maxDelta uint64)
+ groupFromPosition(pos uint32, length uint32, capacity uint32, minAssetIndex basics.AssetIndex, maxDelta uint64) interface{}
+ mergeIn(other AbstractAssetGroup, pos uint32) (delta basics.AssetIndex)
+ sliceRight(pos uint32, delta basics.AssetIndex)
+}
+
+// AbstractAssetGroupList enables operations on concrete Holding or Params groups
+type AbstractAssetGroupList interface {
+ // Get returns abstract group
+ Get(gi int) AbstractAssetGroup
+ // Len returns number of groups in the list
+ Len() int
+ // Totals returns number of assets inside all the the groups
+ Total() uint32
+ // Reset initializes group to hold count assets in length groups
+ Reset(count uint32, length int)
+ // Assign assigns to value group to group[gi]
+ Assign(gi int, group interface{})
+ // ReleaseGroup removes group gi from the groups list
+ ReleaseGroup(gi int)
+
+ // FindGroup returns group index where asset aidx belongs to, or -1 otherwise
+ FindGroup(aidx basics.AssetIndex, startIdx int) int
+ // FindAsset returns group index and asset index if found and (-1, -1) otherwise.
+ // If a matching group found but the group is not loaded yet, it returns (groupIdx, -1)
+ FindAsset(aidx basics.AssetIndex, startIdx int) (int, int)
+
+ // dropGroup is similar to ReleaseGroup but does not modify GroupList
+ // assuming it was changed by a caller
+ dropGroup(gi int)
+ // delete asset by index ai from group gi
+ deleteByIndex(gi int, ai int)
+ // prependNewGroup, prependNewGroup and insertNewGroupAfter create new group with one data element
+ prependNewGroup(aidx basics.AssetIndex, data interface{})
+ appendNewGroup(aidx basics.AssetIndex, data interface{})
+ insertNewGroupAfter(gi int, aidx basics.AssetIndex, data interface{})
+ // insertInto adds new asset data into group at index gi
+ insertInto(gi int, aidx basics.AssetIndex, data interface{})
+ // insertAfter inserts a group into position gi+1
+ insertAfter(gi int, group interface{})
+ // split group at index gi into two groups, and returns group index suitable for asset aidx insertion
+ split(gi int, aidx basics.AssetIndex) int
+}
+
+type groupBuilder interface {
+ newGroup(size int) groupBuilder
+ newElement(offset basics.AssetIndex, data interface{}) groupBuilder
+ build(desc AssetGroupDesc) interface{}
+}
+
+type flattener interface {
+ Count() uint32
+ Data(idx int) interface{}
+ AssetIndex(idx int) basics.AssetIndex
+}
+
// EncodedMaxAssetsPerAccount is a copy from basics package to resolve deps in msgp-generated file
var EncodedMaxAssetsPerAccount = basics.EncodedMaxAssetsPerAccount
@@ -123,12 +248,33 @@ func (pad PersistedAccountData) NumAssetHoldings() int {
return len(pad.AccountData.Assets)
}
+// NumAssetParams returns number of assets in the account
+func (pad PersistedAccountData) NumAssetParams() int {
+ if pad.ExtendedAssetParams.Count > 0 {
+ return int(pad.ExtendedAssetParams.Count)
+ }
+ return len(pad.AccountData.AssetParams)
+}
+
func (gd *AssetsHoldingGroupData) update(ai int, hodl basics.AssetHolding) {
gd.Amounts[ai] = hodl.Amount
gd.Frozens[ai] = hodl.Frozen
}
-// delete the ai-th element in the group holding array. The method expect the ai to be a valid index.
+func (gd *AssetsParamsGroupData) update(ai int, params basics.AssetParams) {
+ gd.Totals[ai] = params.Total
+ gd.Decimals[ai] = params.Decimals
+ gd.DefaultFrozens[ai] = params.DefaultFrozen
+ gd.UnitNames[ai] = params.UnitName
+ gd.AssetNames[ai] = params.AssetName
+ gd.URLs[ai] = params.URL
+ copy(gd.MetadataHash[ai][:], params.MetadataHash[:])
+ copy(gd.Managers[ai][:], params.Manager[:])
+ copy(gd.Reserves[ai][:], params.Reserve[:])
+ copy(gd.Freezes[ai][:], params.Freeze[:])
+ copy(gd.Clawbacks[ai][:], params.Clawback[:])
+}
+
func (gd *AssetsHoldingGroupData) delete(ai int) {
if ai == 0 {
gd.AssetOffsets = gd.AssetOffsets[1:]
@@ -140,17 +286,71 @@ func (gd *AssetsHoldingGroupData) delete(ai int) {
gd.Amounts = gd.Amounts[:len(gd.Amounts)-1]
gd.Frozens = gd.Frozens[:len(gd.Frozens)-1]
} else {
+ length := len(gd.AssetOffsets)
gd.AssetOffsets[ai+1] += gd.AssetOffsets[ai]
copy(gd.AssetOffsets[ai:], gd.AssetOffsets[ai+1:])
- gd.AssetOffsets = gd.AssetOffsets[:len(gd.AssetOffsets)-1]
+ gd.AssetOffsets = gd.AssetOffsets[:length-1]
+ // copy all and then slice to remove the last element
copy(gd.Amounts[ai:], gd.Amounts[ai+1:])
- gd.Amounts = gd.Amounts[:len(gd.Amounts)-1]
copy(gd.Frozens[ai:], gd.Frozens[ai+1:])
- gd.Frozens = gd.Frozens[:len(gd.Frozens)-1]
+
+ gd.slice(0, length-1)
+ }
+}
+
+func (gd *AssetsParamsGroupData) delete(ai int) {
+ length := len(gd.AssetOffsets)
+ if ai == 0 {
+ gd.AssetOffsets = gd.AssetOffsets[1:]
+ gd.AssetOffsets[0] = 0
+ gd.slice(1, length)
+ } else if ai == len(gd.AssetOffsets)-1 {
+ gd.AssetOffsets = gd.AssetOffsets[:len(gd.AssetOffsets)-1]
+ gd.Totals = gd.Totals[:len(gd.Totals)-1]
+ gd.slice(0, length-1)
+ } else {
+
+ gd.AssetOffsets[ai+1] += gd.AssetOffsets[ai]
+ copy(gd.AssetOffsets[ai:], gd.AssetOffsets[ai+1:])
+ gd.AssetOffsets = gd.AssetOffsets[:length-1]
+
+ // copy all and then slice to remove the last element
+ copy(gd.Totals[ai:], gd.Totals[ai+1:])
+ copy(gd.Decimals[ai:], gd.Decimals[ai+1:])
+ copy(gd.DefaultFrozens[ai:], gd.DefaultFrozens[ai+1:])
+ copy(gd.UnitNames[ai:], gd.UnitNames[ai+1:])
+ copy(gd.AssetNames[ai:], gd.AssetNames[ai+1:])
+ copy(gd.URLs[ai:], gd.URLs[ai+1:])
+ copy(gd.MetadataHash[ai:], gd.MetadataHash[ai+1:])
+ copy(gd.Managers[ai:], gd.Managers[ai+1:])
+ copy(gd.Reserves[ai:], gd.Reserves[ai+1:])
+ copy(gd.Freezes[ai:], gd.Freezes[ai+1:])
+ copy(gd.Clawbacks[ai:], gd.Clawbacks[ai+1:])
+
+ gd.slice(0, length-1)
}
}
+func (gd *AssetsHoldingGroupData) slice(start, end int) {
+ gd.Amounts = gd.Amounts[start:end]
+ gd.Frozens = gd.Frozens[start:end]
+}
+
+func (gd *AssetsParamsGroupData) slice(start, end int) {
+ gd.Totals = gd.Totals[start:end]
+ gd.Decimals = gd.Decimals[start:end]
+ gd.DefaultFrozens = gd.DefaultFrozens[start:end]
+ gd.UnitNames = gd.UnitNames[start:end]
+ gd.AssetNames = gd.AssetNames[start:end]
+ gd.URLs = gd.URLs[start:end]
+ gd.MetadataHash = gd.MetadataHash[start:end]
+ gd.Managers = gd.Managers[start:end]
+ gd.Reserves = gd.Reserves[start:end]
+ gd.Freezes = gd.Freezes[start:end]
+ gd.Clawbacks = gd.Clawbacks[start:end]
+}
+
// GetHolding returns AssetHolding from group data by asset index ai
func (gd AssetsHoldingGroupData) GetHolding(ai int) basics.AssetHolding {
return basics.AssetHolding{Amount: gd.Amounts[ai], Frozen: gd.Frozens[ai]}
@@ -167,14 +367,15 @@ func (g AssetsHoldingGroup) Encode() []byte {
return protocol.Encode(&g.groupData)
}
-// TestGetGroupData returns group data. Used in tests only
-func (g AssetsHoldingGroup) TestGetGroupData() AssetsHoldingGroupData {
- return g.groupData
+// Encode returns msgp-encoded group data
+func (g AssetsParamsGroup) Encode() []byte {
+ // TODO: use GetEncodingBuf/PutEncodingBuf
+ return protocol.Encode(&g.groupData)
}
// Update an asset holding by index
-func (g *AssetsHoldingGroup) Update(ai int, holdings basics.AssetHolding) {
- g.groupData.update(ai, holdings)
+func (g *AssetsHoldingGroup) update(ai int, holding basics.AssetHolding) {
+ g.groupData.update(ai, holding)
}
// Loaded return a boolean flag indicated if the group loaded or not
@@ -188,25 +389,64 @@ func (g *AssetsHoldingGroup) Load(gd AssetsHoldingGroupData) {
g.loaded = true
}
-// delete an asset at position ai in this group
-func (g *AssetsHoldingGroup) delete(ai int) {
+// Load sets a group data value in the group
+func (g *AssetsParamsGroup) Load(gd AssetsParamsGroupData) {
+ g.groupData = gd
+ g.loaded = true
+}
+
+// Delete removes asset by index ai from a group
+func (g *AssetGroupDesc) Delete(ai int, ag AbstractAssetGroup) {
// although a group with only one element is handled by a caller
// add a safety check here
- if g.Count == 1 {
- *g = AssetsHoldingGroup{}
+ if ag.AssetCount() == 1 {
+ ag.Reset()
return
}
+ agd := ag.GroupData()
if ai == 0 {
// when deleting the first element, update MinAssetIndex and DeltaMaxAssetIndex
- g.MinAssetIndex += g.groupData.AssetOffsets[1]
- g.DeltaMaxAssetIndex -= uint64(g.groupData.AssetOffsets[1])
+ g.MinAssetIndex += agd.AssetDeltaValue(1)
+ g.DeltaMaxAssetIndex -= uint64(agd.AssetDeltaValue(1))
} else if uint32(ai) == g.Count-1 {
// when deleting the last element, update DeltaMaxAssetIndex
- g.DeltaMaxAssetIndex -= uint64(g.groupData.AssetOffsets[len(g.groupData.AssetOffsets)-1])
+ g.DeltaMaxAssetIndex -= uint64(agd.AssetDeltaValue(int(ag.AssetCount() - 1)))
}
- g.groupData.delete(ai)
+
+ ag.delete(ai)
g.Count--
+ return
+}
+
+func (g *AssetsHoldingGroup) delete(ai int) {
+ g.groupData.delete(ai)
+}
+
+func (g *AssetsParamsGroup) delete(ai int) {
+ g.groupData.delete(ai)
+}
+
+// GetParams returns AssetParams from group data by asset index ai
+func (gd AssetsParamsGroupData) GetParams(ai int) basics.AssetParams {
+ return basics.AssetParams{
+ Total: gd.Totals[ai],
+ Decimals: gd.Decimals[ai],
+ DefaultFrozen: gd.DefaultFrozens[ai],
+ UnitName: gd.UnitNames[ai],
+ AssetName: gd.AssetNames[ai],
+ URL: gd.URLs[ai],
+ MetadataHash: gd.MetadataHash[ai],
+ Manager: gd.Managers[ai],
+ Reserve: gd.Reserves[ai],
+ Freeze: gd.Freezes[ai],
+ Clawback: gd.Clawbacks[ai],
+ }
+}
+
+// GetParams returns AssetParams from group data by asset index ai
+func (g AssetsParamsGroup) GetParams(ai int) basics.AssetParams {
+ return g.groupData.GetParams(ai)
}
// insert asset aidx into current group. It should not exist in the group
@@ -215,6 +455,7 @@ func (g *AssetsHoldingGroup) insert(aidx basics.AssetIndex, holding basics.Asset
// prepend
g.groupData.Amounts = append([]uint64{holding.Amount}, g.groupData.Amounts...)
g.groupData.Frozens = append([]bool{holding.Frozen}, g.groupData.Frozens...)
+
g.groupData.AssetOffsets[0] = g.MinAssetIndex - aidx
g.groupData.AssetOffsets = append([]basics.AssetIndex{0}, g.groupData.AssetOffsets...)
g.DeltaMaxAssetIndex += uint64(g.MinAssetIndex - aidx)
@@ -223,6 +464,7 @@ func (g *AssetsHoldingGroup) insert(aidx basics.AssetIndex, holding basics.Asset
// append
g.groupData.Amounts = append(g.groupData.Amounts, holding.Amount)
g.groupData.Frozens = append(g.groupData.Frozens, holding.Frozen)
+
lastAssetIndex := g.MinAssetIndex + basics.AssetIndex(g.DeltaMaxAssetIndex)
delta := aidx - lastAssetIndex
g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, delta)
@@ -254,123 +496,736 @@ func (g *AssetsHoldingGroup) insert(aidx basics.AssetIndex, holding basics.Asset
g.Count++
}
-// Delete an asset located withing Groups array at index gi and index ai within the group.
-// Both gi and ai must be valid indices.
-// It returns true if the group is gone and needs to be removed from DB.
-func (e *ExtendedAssetHolding) Delete(gi int, ai int) bool {
- if e.Groups[gi].Count == 1 {
- if gi < len(e.Groups)-1 {
- copy(e.Groups[gi:], e.Groups[gi+1:])
+func (g *AssetsParamsGroup) insert(aidx basics.AssetIndex, params basics.AssetParams) {
+ if aidx < g.MinAssetIndex {
+ // prepend
+ g.groupData.Totals = append([]uint64{params.Total}, g.groupData.Totals...)
+ g.groupData.Decimals = append([]uint32{params.Decimals}, g.groupData.Decimals...)
+ g.groupData.DefaultFrozens = append([]bool{params.DefaultFrozen}, g.groupData.DefaultFrozens...)
+ g.groupData.UnitNames = append([]string{params.UnitName}, g.groupData.UnitNames...)
+ g.groupData.AssetNames = append([]string{params.AssetName}, g.groupData.AssetNames...)
+ g.groupData.URLs = append([]string{params.URL}, g.groupData.URLs...)
+ g.groupData.MetadataHash = append([][32]byte{params.MetadataHash}, g.groupData.MetadataHash...)
+ g.groupData.Managers = append([]basics.Address{params.Manager}, g.groupData.Managers...)
+ g.groupData.Reserves = append([]basics.Address{params.Reserve}, g.groupData.Reserves...)
+ g.groupData.Freezes = append([]basics.Address{params.Freeze}, g.groupData.Freezes...)
+ g.groupData.Clawbacks = append([]basics.Address{params.Clawback}, g.groupData.Clawbacks...)
+
+ g.groupData.AssetOffsets[0] = g.MinAssetIndex - aidx
+ g.groupData.AssetOffsets = append([]basics.AssetIndex{0}, g.groupData.AssetOffsets...)
+ g.DeltaMaxAssetIndex += uint64(g.MinAssetIndex - aidx)
+ g.MinAssetIndex = aidx
+ } else if aidx > g.MinAssetIndex+basics.AssetIndex(g.DeltaMaxAssetIndex) {
+ // append
+ g.groupData.Totals = append(g.groupData.Totals, params.Total)
+ g.groupData.Decimals = append(g.groupData.Decimals, params.Decimals)
+ g.groupData.DefaultFrozens = append(g.groupData.DefaultFrozens, params.DefaultFrozen)
+ g.groupData.UnitNames = append(g.groupData.UnitNames, params.UnitName)
+ g.groupData.AssetNames = append(g.groupData.AssetNames, params.AssetName)
+ g.groupData.URLs = append(g.groupData.URLs, params.URL)
+ g.groupData.MetadataHash = append(g.groupData.MetadataHash, params.MetadataHash)
+ g.groupData.Managers = append(g.groupData.Managers, params.Manager)
+ g.groupData.Reserves = append(g.groupData.Reserves, params.Reserve)
+ g.groupData.Freezes = append(g.groupData.Freezes, params.Freeze)
+ g.groupData.Clawbacks = append(g.groupData.Clawbacks, params.Clawback)
+
+ lastAssetIndex := g.MinAssetIndex + basics.AssetIndex(g.DeltaMaxAssetIndex)
+ delta := aidx - lastAssetIndex
+ g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, delta)
+ g.DeltaMaxAssetIndex = uint64(aidx - g.MinAssetIndex)
+ } else {
+ // find position and insert
+ cur := g.MinAssetIndex
+ for ai, d := range g.groupData.AssetOffsets {
+ cur += d
+ if aidx < cur {
+ g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, 0)
+ copy(g.groupData.AssetOffsets[ai:], g.groupData.AssetOffsets[ai-1:])
+ prev := cur - d
+ g.groupData.AssetOffsets[ai] = aidx - prev
+ g.groupData.AssetOffsets[ai+1] = cur - aidx
+
+ g.groupData.Totals = append(g.groupData.Totals, 0)
+ copy(g.groupData.Totals[ai:], g.groupData.Totals[ai-1:])
+ g.groupData.Totals[ai] = params.Total
+
+ g.groupData.Decimals = append(g.groupData.Decimals, 0)
+ copy(g.groupData.Decimals[ai:], g.groupData.Decimals[ai-1:])
+ g.groupData.Decimals[ai] = params.Decimals
+
+ g.groupData.DefaultFrozens = append(g.groupData.DefaultFrozens, false)
+ copy(g.groupData.DefaultFrozens[ai:], g.groupData.DefaultFrozens[ai-1:])
+ g.groupData.DefaultFrozens[ai] = params.DefaultFrozen
+
+ g.groupData.UnitNames = append(g.groupData.UnitNames, "")
+ copy(g.groupData.UnitNames[ai:], g.groupData.UnitNames[ai-1:])
+ g.groupData.UnitNames[ai] = params.UnitName
+
+ g.groupData.AssetNames = append(g.groupData.AssetNames, "")
+ copy(g.groupData.AssetNames[ai:], g.groupData.AssetNames[ai-1:])
+ g.groupData.AssetNames[ai] = params.AssetName
+
+ g.groupData.URLs = append(g.groupData.URLs, "")
+ copy(g.groupData.URLs[ai:], g.groupData.URLs[ai-1:])
+ g.groupData.URLs[ai] = params.URL
+
+ g.groupData.MetadataHash = append(g.groupData.MetadataHash, [32]byte{})
+ copy(g.groupData.MetadataHash[ai:], g.groupData.MetadataHash[ai-1:])
+ g.groupData.MetadataHash[ai] = params.MetadataHash
+
+ g.groupData.Managers = append(g.groupData.Managers, basics.Address{})
+ copy(g.groupData.Managers[ai:], g.groupData.Managers[ai-1:])
+ g.groupData.Managers[ai] = params.Manager
+
+ g.groupData.Reserves = append(g.groupData.Reserves, basics.Address{})
+ copy(g.groupData.Reserves[ai:], g.groupData.Reserves[ai-1:])
+ g.groupData.Reserves[ai] = params.Reserve
+
+ g.groupData.Freezes = append(g.groupData.Freezes, basics.Address{})
+ copy(g.groupData.Freezes[ai:], g.groupData.Freezes[ai-1:])
+ g.groupData.Freezes[ai] = params.Freeze
+
+ g.groupData.Clawbacks = append(g.groupData.Clawbacks, basics.Address{})
+ copy(g.groupData.Clawbacks[ai:], g.groupData.Clawbacks[ai-1:])
+ g.groupData.Clawbacks[ai] = params.Clawback
+
+ break
+ }
+ }
+ }
+ g.Count++
+
+}
+
+func (g *AssetsHoldingGroup) mergeIn(other AbstractAssetGroup, pos uint32) (delta basics.AssetIndex) {
+ groupDelta := other.MinAsset() - (g.MinAssetIndex + basics.AssetIndex(g.DeltaMaxAssetIndex))
+
+ g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, other.GroupData().AssetDeltaValue(0)+groupDelta)
+ for j := 1; j < int(pos); j++ {
+ offset := other.GroupData().AssetDeltaValue(j)
+ g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, offset)
+ delta += offset
+ }
+ g.DeltaMaxAssetIndex += uint64(delta + groupDelta)
+ gd := other.(*AssetsHoldingGroup).groupData
+ g.groupData.Amounts = append(g.groupData.Amounts, gd.Amounts[:pos]...)
+ g.groupData.Frozens = append(g.groupData.Frozens, gd.Frozens[:pos]...)
+ g.Count += pos
+
+ return delta
+}
+
+func (g *AssetsParamsGroup) mergeIn(other AbstractAssetGroup, pos uint32) (delta basics.AssetIndex) {
+ groupDelta := other.MinAsset() - (g.MinAssetIndex + basics.AssetIndex(g.DeltaMaxAssetIndex))
+
+ g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, other.GroupData().AssetDeltaValue(0)+groupDelta)
+ for j := 1; j < int(pos); j++ {
+ offset := other.GroupData().AssetDeltaValue(j)
+ g.groupData.AssetOffsets = append(g.groupData.AssetOffsets, offset)
+ delta += offset
+ }
+ g.DeltaMaxAssetIndex += uint64(delta + groupDelta)
+ gd := other.(*AssetsParamsGroup).groupData
+
+ g.groupData.Totals = append(g.groupData.Totals, gd.Totals[:pos]...)
+ g.groupData.Decimals = append(g.groupData.Decimals, gd.Decimals[:pos]...)
+ g.groupData.DefaultFrozens = append(g.groupData.DefaultFrozens, gd.DefaultFrozens[:pos]...)
+ g.groupData.UnitNames = append(g.groupData.UnitNames, gd.UnitNames[:pos]...)
+ g.groupData.AssetNames = append(g.groupData.AssetNames, gd.AssetNames[:pos]...)
+ g.groupData.URLs = append(g.groupData.URLs, gd.URLs[:pos]...)
+ g.groupData.MetadataHash = append(g.groupData.MetadataHash, gd.MetadataHash[:pos]...)
+ g.groupData.Managers = append(g.groupData.Managers, gd.Managers[:pos]...)
+ g.groupData.Reserves = append(g.groupData.Reserves, gd.Reserves[:pos]...)
+ g.groupData.Freezes = append(g.groupData.Freezes, gd.Freezes[:pos]...)
+ g.groupData.Clawbacks = append(g.groupData.Clawbacks, gd.Clawbacks[:pos]...)
+ g.Count += pos
+
+ return delta
+}
+
+// Loaded return a boolean flag indicated if the group loaded or not
+func (g AssetsParamsGroup) Loaded() bool {
+ return g.loaded
+}
+
+func (g *AssetsParamsGroup) update(ai int, params basics.AssetParams) {
+ g.groupData.update(ai, params)
+}
+
+// Find returns asset index in AssetOffsets by given AssetIndex and group base AssetIndex value
+func (g *AssetsCommonGroupData) Find(aidx basics.AssetIndex, base basics.AssetIndex) int {
+ // linear search because AssetOffsets is delta-encoded, not values
+ cur := base
+ for ai, d := range g.AssetOffsets {
+ cur = d + cur
+ if aidx == cur {
+ return ai
}
- e.Groups[len(e.Groups)-1] = AssetsHoldingGroup{} // release AssetsHoldingGroup data
- e.Groups = e.Groups[:len(e.Groups)-1]
- e.Count--
- return true
}
- e.Groups[gi].delete(ai)
- e.Count--
- return false
+ return -1
+}
+
+// AssetDeltaValue returns asset offset value at index ai.
+// It does not check boundaries.
+func (g *AssetsCommonGroupData) AssetDeltaValue(ai int) basics.AssetIndex {
+ return g.AssetOffsets[ai]
+}
+
+// HasSpace returns true if this group has space to accommodate one more asset entry
+func (g *AssetsHoldingGroup) HasSpace() bool {
+ return g.Count < MaxHoldingGroupSize
+}
+
+// HasSpace returns true if this group has space to accommodate one more asset entry
+func (g *AssetsParamsGroup) HasSpace() bool {
+ return g.Count < MaxParamsGroupSize
+}
+
+// MinAsset returns min (base) AssetIndex value for this group
+func (g *AssetGroupDesc) MinAsset() basics.AssetIndex {
+ return g.MinAssetIndex
+}
+
+// MaxAsset returns max AssetIndex value in this group
+func (g *AssetGroupDesc) MaxAsset() basics.AssetIndex {
+ return g.MinAssetIndex + basics.AssetIndex(g.DeltaMaxAssetIndex)
+}
+
+// AssetCount returns number of assets in this group
+func (g *AssetGroupDesc) AssetCount() uint32 {
+ return g.Count
}
-// splitInsert splits the group identified by gi
-// and inserts a new asset into appropriate left or right part of the split.
-func (e *ExtendedAssetHolding) splitInsert(gi int, aidx basics.AssetIndex, holding basics.AssetHolding) {
- g := e.Groups[gi]
- pos := g.Count / 2
+// SetKey sets id of a DB record containing actual this group data
+func (g *AssetGroupDesc) SetKey(key int64) {
+ g.AssetGroupKey = key
+}
+
+// Key returns id of a DB record containing actual this group data
+func (g *AssetGroupDesc) Key() int64 {
+ return g.AssetGroupKey
+}
+
+// AssetAt returns asset value at postion ai
+func (g *AssetsHoldingGroup) AssetAt(ai int) basics.AssetIndex {
+ asset := g.MinAssetIndex
+ for i := 0; i <= int(ai); i++ {
+ asset += g.groupData.AssetOffsets[i]
+ }
+ return asset
+}
+
+// AssetAt returns asset value at postion ai
+func (g *AssetsParamsGroup) AssetAt(ai int) basics.AssetIndex {
asset := g.MinAssetIndex
- for i := 0; i < int(pos); i++ {
+ for i := 0; i <= int(ai); i++ {
asset += g.groupData.AssetOffsets[i]
}
- rgCount := g.Count - g.Count/2
- rgMinAssetIndex := asset + g.groupData.AssetOffsets[pos]
- rgDeltaMaxIndex := g.MinAssetIndex + basics.AssetIndex(g.DeltaMaxAssetIndex) - rgMinAssetIndex
- lgMinAssetIndex := g.MinAssetIndex
- lgCount := g.Count - rgCount
- lgDeltaMaxIndex := asset - g.MinAssetIndex
+ return asset
+}
+
+// GroupData returns interface to AbstractAssetGroupData for this group data
+func (g *AssetsHoldingGroup) GroupData() AbstractAssetGroupData {
+ return &g.groupData.AssetsCommonGroupData
+}
+
+// GroupData returns interface to AbstractAssetGroupData for this group data
+func (g *AssetsParamsGroup) GroupData() AbstractAssetGroupData {
+ return &g.groupData.AssetsCommonGroupData
+}
+
+// Reset clears this group
+func (g *AssetsHoldingGroup) Reset() {
+ *g = AssetsHoldingGroup{}
+}
+
+// Reset clears this group
+func (g *AssetsParamsGroup) Reset() {
+ *g = AssetsParamsGroup{}
+}
+
+// Update sets group data by asset index
+func (g *AssetsHoldingGroup) Update(ai int, data interface{}) {
+ g.update(ai, data.(basics.AssetHolding))
+}
+
+// Update sets group data by asset index
+func (g *AssetsParamsGroup) Update(ai int, data interface{}) {
+ g.update(ai, data.(basics.AssetParams))
+}
+
+// Fetch loads group data using fetcher and returns all holdings
+func (g *AssetsHoldingGroup) Fetch(fetcher func(int64) ([]byte, basics.Round, error), assets interface{}) (basics.Round, error) {
+ var holdings map[basics.AssetIndex]basics.AssetHolding
+ if assets != nil {
+ holdings = assets.(map[basics.AssetIndex]basics.AssetHolding)
+ }
+
+ buf, rnd, err := fetcher(g.AssetGroupKey)
+ if err != nil {
+ return 0, err
+ }
+
+ var groupData AssetsHoldingGroupData
+ err = protocol.Decode(buf, &groupData)
+ if err != nil {
+ return 0, err
+ }
+
+ if holdings != nil {
+ aidx := g.MinAssetIndex
+ for i := 0; i < len(groupData.AssetOffsets); i++ {
+ aidx += groupData.AssetOffsets[i]
+ holdings[aidx] = groupData.GetHolding(i)
+ }
+ }
+ g.Load(groupData)
+ return rnd, nil
+}
+
+// Fetch loads group data into using fetcher
+func (g *AssetsParamsGroup) Fetch(fetcher func(int64) ([]byte, basics.Round, error), assets interface{}) (basics.Round, error) {
+ var params map[basics.AssetIndex]basics.AssetParams
+ if assets != nil {
+ params = assets.(map[basics.AssetIndex]basics.AssetParams)
+ }
+
+ buf, rnd, err := fetcher(g.AssetGroupKey)
+ if err != nil {
+ return 0, err
+ }
+
+ var groupData AssetsParamsGroupData
+ err = protocol.Decode(buf, &groupData)
+ if err != nil {
+ return 0, err
+ }
+
+ if params != nil {
+ aidx := g.MinAssetIndex
+ for i := 0; i < len(groupData.AssetOffsets); i++ {
+ aidx += groupData.AssetOffsets[i]
+ params[aidx] = groupData.GetParams(i)
+ }
+ }
+ g.Load(groupData)
+ return rnd, nil
+}
+
+type assetDataGetter interface {
+ get(aidx basics.AssetIndex) interface{}
+}
+
+type assetHoldingGetter struct {
+ assets map[basics.AssetIndex]basics.AssetHolding
+}
+
+func (g assetHoldingGetter) get(aidx basics.AssetIndex) interface{} {
+ return g.assets[aidx]
+}
+
+type assetParamsGetter struct {
+ assets map[basics.AssetIndex]basics.AssetParams
+}
+
+func (g assetParamsGetter) get(aidx basics.AssetIndex) interface{} {
+ return g.assets[aidx]
+}
+
+// Update an asset holding by asset index
+func (e *ExtendedAssetHolding) Update(updated []basics.AssetIndex, assets map[basics.AssetIndex]basics.AssetHolding) error {
+ g := assetHoldingGetter{assets}
+ return update(updated, e, &g)
+}
+
+// Update an asset params by index
+func (e *ExtendedAssetParams) Update(updated []basics.AssetIndex, assets map[basics.AssetIndex]basics.AssetParams) error {
+ g := assetParamsGetter{assets}
+ return update(updated, e, &g)
+}
+
+func update(updated []basics.AssetIndex, agl AbstractAssetGroupList, assets assetDataGetter) error {
+ sort.SliceStable(updated, func(i, j int) bool { return updated[i] < updated[j] })
+ gi, ai := 0, 0
+ for _, aidx := range updated {
+ gi, ai = findAsset(aidx, gi, agl)
+ if gi == -1 || ai == -1 {
+ return fmt.Errorf("failed to find asset group for updating %d: (%d, %d)", aidx, gi, ai)
+ }
+ agl.Get(gi).Update(ai, assets.get(aidx))
+ }
+ return nil
+}
+
+func deleteAssets(assets []basics.AssetIndex, agl AbstractAssetGroupList) (deleted []int64, err error) {
+ // TODO: possible optimizations:
+ // 1. pad.NumAssetHoldings() == len(deleted)
+ // 2. deletion of entire group
+ sort.SliceStable(assets, func(i, j int) bool { return assets[i] < assets[j] })
+ gi, ai := 0, 0
+ for _, aidx := range assets {
+ gi, ai = findAsset(aidx, gi, agl)
+ if gi == -1 || ai == -1 {
+ err = fmt.Errorf("failed to find asset group for deleting %d: (%d, %d)", aidx, gi, ai)
+ return
+ }
+ // group data is loaded in accountsLoadOld
+ ag := agl.Get(gi)
+ if ag.AssetCount() == 1 {
+ key := ag.Key()
+ agl.ReleaseGroup(gi)
+ deleted = append(deleted, key)
+ } else {
+ agl.deleteByIndex(gi, ai)
+ }
+ }
+ return
+}
+
+// Delete asset holdings identified by asset indexes in assets list
+// Function returns list of group keys that needs to be removed from DB
+func (e *ExtendedAssetHolding) Delete(assets []basics.AssetIndex) (deleted []int64, err error) {
+ return deleteAssets(assets, e)
+}
+
+// Delete asset holdings identified by asset indexes in assets list
+// Function returns list of group keys that needs to be removed from DB
+func (e *ExtendedAssetParams) Delete(assets []basics.AssetIndex) (deleted []int64, err error) {
+ return deleteAssets(assets, e)
+}
+
+func (e *ExtendedAssetHolding) dropGroup(gi int) {
+ if gi < len(e.Groups)-1 {
+ copy(e.Groups[gi:], e.Groups[gi+1:])
+ }
+ e.Groups[len(e.Groups)-1] = AssetsHoldingGroup{} // release AssetsHoldingGroup data
+ e.Groups = e.Groups[:len(e.Groups)-1]
+}
+
+func (e *ExtendedAssetParams) dropGroup(gi int) {
+ if gi < len(e.Groups)-1 {
+ copy(e.Groups[gi:], e.Groups[gi+1:])
+ }
+ e.Groups[len(e.Groups)-1] = AssetsParamsGroup{} // release AssetsHoldingGroup data
+ e.Groups = e.Groups[:len(e.Groups)-1]
+}
+
+// ReleaseGroup removes all assets in group gi and the group itself
+func (e *ExtendedAssetHolding) ReleaseGroup(gi int) {
+ count := e.Groups[gi].AssetCount()
+ e.dropGroup(gi)
+ e.Count -= count
+}
+
+// ReleaseGroup removes all assets in group gi and the group itself
+func (e *ExtendedAssetParams) ReleaseGroup(gi int) {
+ count := e.Groups[gi].AssetCount()
+ e.dropGroup(gi)
+ e.Count -= count
+}
+
+func (e *ExtendedAssetHolding) deleteByIndex(gi int, ai int) {
+ e.Groups[gi].Delete(ai, &e.Groups[gi])
+ e.Count--
+}
+
+func (e *ExtendedAssetParams) deleteByIndex(gi int, ai int) {
+ e.Groups[gi].Delete(ai, &e.Groups[gi])
+ e.Count--
+}
+
+// split Groups[i] in preparation to insertion of asset aidx.
+// It returns group index where to insert.
+func (e *ExtendedAssetHolding) split(gi int, aidx basics.AssetIndex) int {
+ return split(gi, aidx, e)
+}
+
+func (e *ExtendedAssetParams) split(gi int, aidx basics.AssetIndex) int {
+ return split(gi, aidx, e)
+}
+
+func split(gi int, aidx basics.AssetIndex, agl AbstractAssetGroupList) int {
+ g := agl.Get(gi)
+ pos := g.AssetCount() / 2
+ asset := g.AssetAt(int(pos - 1))
+
+ rgCount := g.AssetCount() - pos
+ lgCount := pos
+ rgMinAssetIndex := asset + g.GroupData().AssetDeltaValue(int(pos))
+ lgMinAssetIndex := g.MinAsset()
+ rgDeltaMaxIndex := g.MaxAsset() - rgMinAssetIndex
+ lgDeltaMaxIndex := asset - g.MinAsset()
rgCap := rgCount
if aidx >= lgMinAssetIndex+lgDeltaMaxIndex {
// if new asset goes into right group, reserve space
rgCap++
}
+
+ // make a right group
+ rightGroup := g.groupFromPosition(pos, rgCount, rgCap, rgMinAssetIndex, uint64(rgDeltaMaxIndex))
+
+ // modify left group
+ g.slice(lgCount, uint64(lgDeltaMaxIndex))
+
+ // insert rightGroup after gi
+ // slice reallocation may happen, so the left group needs to modifed before possible array reallocation in insertAfter
+ agl.insertAfter(gi, rightGroup)
+
+ if aidx < lgMinAssetIndex+lgDeltaMaxIndex {
+ return gi
+ }
+ return gi + 1
+}
+
+func (g *AssetsHoldingGroup) slice(pos uint32, maxDelta uint64) {
+ g.Count = pos
+ g.DeltaMaxAssetIndex = maxDelta
+ g.groupData = AssetsHoldingGroupData{
+ Amounts: g.groupData.Amounts[:pos],
+ Frozens: g.groupData.Frozens[:pos],
+ AssetsCommonGroupData: AssetsCommonGroupData{
+ AssetOffsets: g.groupData.AssetOffsets[:pos],
+ },
+ }
+}
+
+func (g *AssetsParamsGroup) slice(pos uint32, maxDelta uint64) {
+ g.Count = pos
+ g.DeltaMaxAssetIndex = maxDelta
+ g.groupData = AssetsParamsGroupData{
+ Totals: g.groupData.Totals[:pos],
+ Decimals: g.groupData.Decimals[:pos],
+ DefaultFrozens: g.groupData.DefaultFrozens[:pos],
+ UnitNames: g.groupData.UnitNames[:pos],
+ AssetNames: g.groupData.AssetNames[:pos],
+ URLs: g.groupData.URLs[:pos],
+ MetadataHash: g.groupData.MetadataHash[:pos],
+ Managers: g.groupData.Managers[:pos],
+ Reserves: g.groupData.Reserves[:pos],
+ Freezes: g.groupData.Freezes[:pos],
+ Clawbacks: g.groupData.Clawbacks[:pos],
+ AssetsCommonGroupData: AssetsCommonGroupData{
+ AssetOffsets: g.groupData.AssetOffsets[:pos],
+ },
+ }
+}
+
+// groupFromPosition creates a new group from the data at position [pos:]
+func (g *AssetsHoldingGroup) sliceRight(pos uint32, delta basics.AssetIndex) {
+ length := len(g.groupData.AssetOffsets)
+
+ g.Count -= uint32(pos)
+ g.groupData.AssetOffsets = g.groupData.AssetOffsets[pos:]
+ delta += g.groupData.AssetOffsets[0]
+ g.groupData.AssetOffsets[0] = 0
+
+ g.groupData.slice(int(pos), length)
+ g.MinAssetIndex += delta
+ g.DeltaMaxAssetIndex -= uint64(delta)
+}
+
+// groupFromPosition creates a new group from the data at position [pos:]
+func (g *AssetsParamsGroup) sliceRight(pos uint32, delta basics.AssetIndex) {
+ length := len(g.groupData.AssetOffsets)
+
+ g.Count -= uint32(pos)
+ g.groupData.AssetOffsets = g.groupData.AssetOffsets[pos:]
+ delta += g.groupData.AssetOffsets[0]
+ g.groupData.AssetOffsets[0] = 0
+ g.groupData.slice(int(pos), length)
+ g.MinAssetIndex += delta
+ g.DeltaMaxAssetIndex -= uint64(delta)
+}
+
+// groupFromPosition creates a new group from the data at position [pos:]
+func (g *AssetsHoldingGroup) groupFromPosition(pos uint32, length, capacity uint32, minAssetIndex basics.AssetIndex, maxDelta uint64) interface{} {
rgd := AssetsHoldingGroupData{
- Amounts: make([]uint64, rgCount, rgCap),
- Frozens: make([]bool, rgCount, rgCap),
- AssetOffsets: make([]basics.AssetIndex, rgCount, rgCap),
+ Amounts: make([]uint64, length, capacity),
+ Frozens: make([]bool, length, capacity),
+ AssetsCommonGroupData: AssetsCommonGroupData{AssetOffsets: make([]basics.AssetIndex, length, capacity)},
}
copy(rgd.Amounts, g.groupData.Amounts[pos:])
copy(rgd.Frozens, g.groupData.Frozens[pos:])
copy(rgd.AssetOffsets, g.groupData.AssetOffsets[pos:])
rightGroup := AssetsHoldingGroup{
- Count: rgCount,
- MinAssetIndex: rgMinAssetIndex,
- DeltaMaxAssetIndex: uint64(rgDeltaMaxIndex),
- groupData: rgd,
- loaded: true,
+ AssetGroupDesc: AssetGroupDesc{
+ Count: length,
+ MinAssetIndex: minAssetIndex,
+ DeltaMaxAssetIndex: maxDelta,
+ },
+ groupData: rgd,
+ loaded: true,
}
rightGroup.groupData.AssetOffsets[0] = 0
+ return rightGroup
+}
- e.Groups[gi].Count = lgCount
- e.Groups[gi].DeltaMaxAssetIndex = uint64(lgDeltaMaxIndex)
- e.Groups[gi].groupData = AssetsHoldingGroupData{
- Amounts: g.groupData.Amounts[:pos],
- Frozens: g.groupData.Frozens[:pos],
- AssetOffsets: g.groupData.AssetOffsets[:pos],
+func (g *AssetsParamsGroup) groupFromPosition(pos uint32, length, capacity uint32, minAssetIndex basics.AssetIndex, maxDelta uint64) interface{} {
+ rgd := AssetsParamsGroupData{
+ Totals: make([]uint64, length, capacity),
+ Decimals: make([]uint32, length, capacity),
+ DefaultFrozens: make([]bool, length, capacity),
+ UnitNames: make([]string, length, capacity),
+ AssetNames: make([]string, length, capacity),
+ URLs: make([]string, length, capacity),
+ MetadataHash: make([][32]byte, length, capacity),
+ Managers: make([]basics.Address, length, capacity),
+ Reserves: make([]basics.Address, length, capacity),
+ Freezes: make([]basics.Address, length, capacity),
+ Clawbacks: make([]basics.Address, length, capacity),
+ AssetsCommonGroupData: AssetsCommonGroupData{AssetOffsets: make([]basics.AssetIndex, length, capacity)},
}
- if aidx < lgMinAssetIndex+lgDeltaMaxIndex {
- e.Groups[gi].insert(aidx, holding)
- } else {
- rightGroup.insert(aidx, holding)
+ copy(rgd.Totals, g.groupData.Totals[pos:])
+ copy(rgd.Decimals, g.groupData.Decimals[pos:])
+ copy(rgd.DefaultFrozens, g.groupData.DefaultFrozens[pos:])
+ copy(rgd.UnitNames, g.groupData.UnitNames[pos:])
+ copy(rgd.AssetNames, g.groupData.AssetNames[pos:])
+ copy(rgd.URLs, g.groupData.URLs[pos:])
+ copy(rgd.MetadataHash, g.groupData.MetadataHash[pos:])
+ copy(rgd.Managers, g.groupData.Managers[pos:])
+ copy(rgd.Reserves, g.groupData.Reserves[pos:])
+ copy(rgd.Freezes, g.groupData.Freezes[pos:])
+ copy(rgd.Clawbacks, g.groupData.Clawbacks[pos:])
+ copy(rgd.AssetOffsets, g.groupData.AssetOffsets[pos:])
+ rightGroup := AssetsParamsGroup{
+ AssetGroupDesc: AssetGroupDesc{
+ Count: length,
+ MinAssetIndex: minAssetIndex,
+ DeltaMaxAssetIndex: maxDelta,
+ },
+ groupData: rgd,
+ loaded: true,
}
+ rightGroup.groupData.AssetOffsets[0] = 0
+ return rightGroup
+}
+
+func makeAssetGroup(aidx basics.AssetIndex, data interface{}, b groupBuilder) interface{} {
+ desc := AssetGroupDesc{
+ Count: 1,
+ MinAssetIndex: aidx,
+ DeltaMaxAssetIndex: 0,
+ AssetGroupKey: 0,
+ }
+ return b.newGroup(1).newElement(0, data).build(desc)
+}
+func makeAssetHoldingGroup(aidx basics.AssetIndex, data interface{}) AssetsHoldingGroup {
+ g := makeAssetGroup(aidx, data, &assetHoldingGroupBuilder{})
+ return g.(AssetsHoldingGroup)
+}
+
+func makeAssetParamsGroup(aidx basics.AssetIndex, data interface{}) AssetsParamsGroup {
+ g := makeAssetGroup(aidx, data, &assetParamsGroupBuilder{})
+ return g.(AssetsParamsGroup)
+}
+
+func (e *ExtendedAssetHolding) prependNewGroup(aidx basics.AssetIndex, data interface{}) {
+ g := makeAssetHoldingGroup(aidx, data)
+ e.Groups = append([]AssetsHoldingGroup{g}, e.Groups...)
+ e.Count++
+}
+
+func (e *ExtendedAssetHolding) appendNewGroup(aidx basics.AssetIndex, data interface{}) {
+ g := makeAssetHoldingGroup(aidx, data)
+ e.Groups = append(e.Groups, g)
e.Count++
+}
+
+// insertAfter adds a new group after idx (at newly allocated position idx+1)
+func (e *ExtendedAssetHolding) insertNewGroupAfter(gi int, aidx basics.AssetIndex, data interface{}) {
+ g := makeAssetHoldingGroup(aidx, data)
+ e.insertAfter(gi, g)
+ e.Count++
+}
+
+func (e *ExtendedAssetHolding) insertAfter(gi int, group interface{}) {
e.Groups = append(e.Groups, AssetsHoldingGroup{})
copy(e.Groups[gi+1:], e.Groups[gi:])
- e.Groups[gi+1] = rightGroup
+ e.Groups[gi+1] = group.(AssetsHoldingGroup)
+}
+
+func (e *ExtendedAssetHolding) insertInto(idx int, aidx basics.AssetIndex, data interface{}) {
+ e.Groups[idx].insert(aidx, data.(basics.AssetHolding))
+ e.Count++
+}
+
+func (e *ExtendedAssetParams) prependNewGroup(aidx basics.AssetIndex, data interface{}) {
+ g := makeAssetParamsGroup(aidx, data)
+ e.Groups = append([]AssetsParamsGroup{g}, e.Groups...)
+ e.Count++
+}
+
+func (e *ExtendedAssetParams) appendNewGroup(aidx basics.AssetIndex, data interface{}) {
+ g := makeAssetParamsGroup(aidx, data)
+ e.Groups = append(e.Groups, g)
+ e.Count++
+}
+
+// insertAfter adds new group after idx (at newly allocated position idx+1)
+func (e *ExtendedAssetParams) insertNewGroupAfter(gi int, aidx basics.AssetIndex, data interface{}) {
+ g := makeAssetParamsGroup(aidx, data)
+ e.insertAfter(gi, g)
+ e.Count++
+}
+
+func (e *ExtendedAssetParams) insertAfter(gi int, group interface{}) {
+ e.Groups = append(e.Groups, AssetsParamsGroup{})
+ copy(e.Groups[gi+1:], e.Groups[gi:])
+ e.Groups[gi+1] = group.(AssetsParamsGroup)
+}
+
+func (e *ExtendedAssetParams) insertInto(idx int, aidx basics.AssetIndex, data interface{}) {
+ e.Groups[idx].insert(aidx, data.(basics.AssetParams))
+ e.Count++
}
// Insert takes an array of asset holdings into ExtendedAssetHolding.
// The input sequence must be sorted.
func (e *ExtendedAssetHolding) Insert(input []basics.AssetIndex, data map[basics.AssetIndex]basics.AssetHolding) {
+ flatten := make([]flattenAsset, len(input), len(input))
+ for i, aidx := range input {
+ flatten[i] = flattenAsset{aidx, data[aidx]}
+ }
+ sort.SliceStable(flatten, func(i, j int) bool { return flatten[i].aidx < flatten[j].aidx })
+ insert(flatten, e)
+}
+
+// Insert takes an array of asset params into ExtendedAssetParams.
+// The input sequence must be sorted.
+func (e *ExtendedAssetParams) Insert(input []basics.AssetIndex, data map[basics.AssetIndex]basics.AssetParams) {
+ flatten := make([]flattenAsset, len(input), len(input))
+ for i, aidx := range input {
+ flatten[i] = flattenAsset{aidx, data[aidx]}
+ }
+ sort.SliceStable(flatten, func(i, j int) bool { return flatten[i].aidx < flatten[j].aidx })
+ insert(flatten, e)
+}
+
+// func insert(input []basics.AssetIndex, data map[basics.AssetIndex]basics.AssetHolding, agl AbstractAssetGroupList) {
+func insert(assets []flattenAsset, agl AbstractAssetGroupList) {
gi := 0
- for _, aidx := range input {
- result := e.findGroup(aidx, gi)
+ for _, asset := range assets {
+ result := findGroup(asset.aidx, gi, agl)
if result.found {
if result.split {
- e.splitInsert(result.gi, aidx, data[aidx])
+ // e.splitInsert(result.gi, aidx, data[aidx])
+ pos := agl.split(result.gi, asset.aidx)
+ agl.insertInto(pos, asset.aidx, asset.data)
} else {
- e.Groups[result.gi].insert(aidx, data[aidx])
- e.Count++
+ agl.insertInto(result.gi, asset.aidx, asset.data)
}
gi = result.gi // advance group search offset (input is ordered, it is safe to search from the last match)
} else {
insertAfter := result.gi
- holding := data[aidx]
- g := AssetsHoldingGroup{
- Count: 1,
- MinAssetIndex: aidx,
- DeltaMaxAssetIndex: 0,
- AssetGroupKey: 0,
- groupData: AssetsHoldingGroupData{
- AssetOffsets: []basics.AssetIndex{0},
- Amounts: []uint64{holding.Amount},
- Frozens: []bool{holding.Frozen},
- },
- loaded: true,
- }
if insertAfter == -1 {
- // special case, prepend
- e.Groups = append([]AssetsHoldingGroup{g}, e.Groups...)
- } else if insertAfter == len(e.Groups)-1 {
- // save on two copying compare to the default branch below
- e.Groups = append(e.Groups, g)
+ agl.prependNewGroup(asset.aidx, asset.data)
+ } else if insertAfter == agl.Len()-1 {
+ agl.appendNewGroup(asset.aidx, asset.data)
} else {
- // insert after result.gi
- e.Groups = append(e.Groups, AssetsHoldingGroup{})
- copy(e.Groups[result.gi+1:], e.Groups[result.gi:])
- e.Groups[result.gi+1] = g
+ agl.insertNewGroupAfter(result.gi, asset.aidx, asset.data)
}
- e.Count++
gi = result.gi + 1
}
}
@@ -391,7 +1246,7 @@ type fgres struct {
split bool
}
-// findGroup looks up for an appropriate group or position for insertion a new asset holdings entry
+// findGroup looks up for an appropriate group or position for insertion a new asset holding entry
// Examples:
// groups of size 4
// [2, 3, 5], [7, 10, 12, 15]
@@ -412,55 +1267,57 @@ type fgres struct {
// [1, 2, 3, 5], [7, 10, 12, 15]
// aidx = 6 -> new group after 0
-func (e ExtendedAssetHolding) findGroup(aidx basics.AssetIndex, startIdx int) fgres {
- if e.Count == 0 {
+// func (e ExtendedAssetHolding) findGroup(aidx basics.AssetIndex, startIdx int) fgres {
+func findGroup(aidx basics.AssetIndex, startIdx int, agl AbstractAssetGroupList) fgres {
+ if agl.Total() == 0 {
return fgres{false, -1, false}
}
- for i, g := range e.Groups[startIdx:] {
+ for i := startIdx; i < agl.Len(); i++ {
+ g := agl.Get(i)
// check exact boundaries
- if aidx >= g.MinAssetIndex && aidx <= g.MinAssetIndex+basics.AssetIndex(g.DeltaMaxAssetIndex) {
+ if aidx >= g.MinAsset() && aidx <= g.MaxAsset() {
// found a group that is a right place for the asset
// if it has space, insert into it
- if g.Count < MaxHoldingGroupSize {
- return fgres{found: true, gi: i + startIdx, split: false}
+ if g.HasSpace() {
+ return fgres{found: true, gi: i, split: false}
}
// otherwise split into two groups
- return fgres{found: true, gi: i + startIdx, split: true}
+ return fgres{found: true, gi: i, split: true}
}
// check upper bound
- if aidx >= g.MinAssetIndex && aidx > g.MinAssetIndex+basics.AssetIndex(g.DeltaMaxAssetIndex) {
+ if aidx >= g.MinAsset() && aidx > g.MaxAsset() {
// the asset still might fit into a group if it has space and does not break groups order
- if g.Count < MaxHoldingGroupSize {
+ if g.HasSpace() {
// ensure next group starts with the asset greater than current one
- if i+startIdx < len(e.Groups)-1 && aidx < e.Groups[i+startIdx+1].MinAssetIndex {
- return fgres{found: true, gi: i + startIdx, split: false}
+ if i < agl.Len()-1 && aidx < agl.Get(i+1).MinAsset() {
+ return fgres{found: true, gi: i, split: false}
}
// the last group, ok to add more
- if i+startIdx == len(e.Groups)-1 {
- return fgres{found: true, gi: i + startIdx, split: false}
+ if i == agl.Len()-1 {
+ return fgres{found: true, gi: i, split: false}
}
}
}
// check bottom bound
- if aidx < g.MinAssetIndex {
+ if aidx < g.MinAsset() {
// found a group that is a right place for the asset
// if it has space, insert into it
- if g.Count < MaxHoldingGroupSize {
- return fgres{found: true, gi: i + startIdx, split: false}
+ if g.HasSpace() {
+ return fgres{found: true, gi: i, split: false}
}
// otherwise insert group before the current one
- return fgres{found: false, gi: i + startIdx - 1, split: false}
+ return fgres{found: false, gi: i - 1, split: false}
}
}
// no matching groups then add a new group at the end
- return fgres{found: false, gi: len(e.Groups) - 1, split: false}
+ return fgres{found: false, gi: agl.Len() - 1, split: false}
}
// FindGroup returns a group suitable for asset insertion
func (e ExtendedAssetHolding) FindGroup(aidx basics.AssetIndex, startIdx int) int {
- res := e.findGroup(aidx, startIdx)
+ res := findGroup(aidx, startIdx, &e)
if res.found {
return res.gi
}
@@ -468,56 +1325,216 @@ func (e ExtendedAssetHolding) FindGroup(aidx basics.AssetIndex, startIdx int) in
}
// FindAsset returns group index and asset index if found and (-1, -1) otherwise.
-// If a matching group found but the group is not loaded yet, it returns (gi, -1)
+// If a matching group found but the group is not loaded yet, it returns (groupIdx, -1)
func (e ExtendedAssetHolding) FindAsset(aidx basics.AssetIndex, startIdx int) (int, int) {
- if e.Count == 0 {
- return -1, -1
+ return findAsset(aidx, startIdx, &e)
+}
+
+// FindGroup returns a group suitable for asset insertion
+func (e ExtendedAssetParams) FindGroup(aidx basics.AssetIndex, startIdx int) int {
+ res := findGroup(aidx, startIdx, &e)
+ if res.found {
+ return res.gi
+ }
+ return -1
+}
+
+// FindAsset returns group index and asset index if found and (-1, -1) otherwise.
+// If a matching group found but the group is not loaded yet, it returns (groupIdx, -1)
+func (e ExtendedAssetParams) FindAsset(aidx basics.AssetIndex, startIdx int) (int, int) {
+ return findAsset(aidx, startIdx, &e)
+}
+
+// findAsset returns group index and asset index if found and (-1, -1) otherwise.
+// If a matching group found but the group is not loaded yet, it returns (groupIdx, -1).
+// It is suitable for searchin within AbstractAssetGroupList that is either holding or params types.
+func findAsset(aidx basics.AssetIndex, startIdx int, agl AbstractAssetGroupList) (int, int) {
+ const notFound int = -1
+
+ if agl.Total() == 0 {
+ return notFound, notFound
}
- for gi, g := range e.Groups[startIdx:] {
- if aidx >= g.MinAssetIndex && aidx <= g.MinAssetIndex+basics.AssetIndex(g.DeltaMaxAssetIndex) {
- if !g.loaded {
+ // TODO: binary search
+ for i := startIdx; i < agl.Len(); i++ {
+ g := agl.Get(i)
+ if aidx >= g.MinAsset() && aidx <= g.MaxAsset() {
+ if !g.Loaded() {
// groupData not loaded, but the group boundaries match
// return group match and -1 as asset index indicating loading is need
- return gi + startIdx, -1
+ return i, notFound
}
- // linear search because AssetOffsets is delta-encoded, not values
- cur := g.MinAssetIndex
- for ai, d := range g.groupData.AssetOffsets {
- cur = d + cur
- if aidx == cur {
- return gi + startIdx, ai
- }
+ if ai := g.GroupData().Find(aidx, g.MinAsset()); ai != -1 {
+ return i, ai
}
// the group is loaded and the asset not found
- return -1, -1
+ return notFound, notFound
}
}
+ return notFound, notFound
+}
- return -1, -1
+type assetHoldingGroupBuilder struct {
+ gd AssetsHoldingGroupData
+ idx int
}
-// ConvertToGroups converts asset holdings map to groups/group data
-func (e *ExtendedAssetHolding) ConvertToGroups(assets map[basics.AssetIndex]basics.AssetHolding) {
- if len(assets) == 0 {
- return
+func (b *assetHoldingGroupBuilder) newGroup(size int) groupBuilder {
+ b.gd = AssetsHoldingGroupData{
+ AssetsCommonGroupData: AssetsCommonGroupData{AssetOffsets: make([]basics.AssetIndex, size, size)},
+ Amounts: make([]uint64, size, size),
+ Frozens: make([]bool, size, size),
+ }
+ b.idx = 0
+ return b
+}
+
+func (b *assetHoldingGroupBuilder) build(desc AssetGroupDesc) interface{} {
+ defer func() {
+ b.gd = AssetsHoldingGroupData{}
+ b.idx = 0
+ }()
+
+ return AssetsHoldingGroup{
+ AssetGroupDesc: desc,
+ groupData: b.gd,
+ loaded: true,
+ }
+}
+
+func (b *assetHoldingGroupBuilder) newElement(offset basics.AssetIndex, data interface{}) groupBuilder {
+ b.gd.AssetOffsets[b.idx] = offset
+ holding := data.(basics.AssetHolding)
+ b.gd.Amounts[b.idx] = holding.Amount
+ b.gd.Frozens[b.idx] = holding.Frozen
+ b.idx++
+ return b
+}
+
+type assetParamsGroupBuilder struct {
+ gd AssetsParamsGroupData
+ idx int
+}
+
+func (b *assetParamsGroupBuilder) newGroup(size int) groupBuilder {
+ b.gd = AssetsParamsGroupData{
+ AssetsCommonGroupData: AssetsCommonGroupData{AssetOffsets: make([]basics.AssetIndex, size, size)},
+ Totals: make([]uint64, size, size),
+ Decimals: make([]uint32, size, size),
+ DefaultFrozens: make([]bool, size, size),
+ UnitNames: make([]string, size, size),
+ AssetNames: make([]string, size, size),
+ URLs: make([]string, size, size),
+ MetadataHash: make([][32]byte, size, size),
+ Managers: make([]basics.Address, size, size),
+ Reserves: make([]basics.Address, size, size),
+ Freezes: make([]basics.Address, size, size),
+ Clawbacks: make([]basics.Address, size, size),
}
+ b.idx = 0
+ return b
+}
+
+func (b *assetParamsGroupBuilder) build(desc AssetGroupDesc) interface{} {
+ defer func() {
+ b.gd = AssetsParamsGroupData{}
+ b.idx = 0
+ }()
- type asset struct {
- aidx basics.AssetIndex
- holdings basics.AssetHolding
+ return AssetsParamsGroup{
+ AssetGroupDesc: desc,
+ groupData: b.gd,
+ loaded: true,
}
- flatten := make([]asset, len(assets), len(assets))
+}
+
+func (b *assetParamsGroupBuilder) newElement(offset basics.AssetIndex, data interface{}) groupBuilder {
+ b.gd.AssetOffsets[b.idx] = offset
+ params := data.(basics.AssetParams)
+ b.gd.Totals[b.idx] = params.Total
+ b.gd.Decimals[b.idx] = params.Decimals
+ b.gd.DefaultFrozens[b.idx] = params.DefaultFrozen
+ b.gd.UnitNames[b.idx] = params.UnitName
+ b.gd.AssetNames[b.idx] = params.AssetName
+ b.gd.URLs[b.idx] = params.URL
+
+ copy(b.gd.MetadataHash[b.idx][:], params.MetadataHash[:])
+ copy(b.gd.Managers[b.idx][:], params.Manager[:])
+ copy(b.gd.Reserves[b.idx][:], params.Reserve[:])
+ copy(b.gd.Freezes[b.idx][:], params.Freeze[:])
+ copy(b.gd.Clawbacks[b.idx][:], params.Clawback[:])
+ b.idx++
+
+ return b
+}
+
+type assetFlattener struct {
+ assets []flattenAsset
+}
+
+type flattenAsset struct {
+ aidx basics.AssetIndex
+ data interface{}
+}
+
+func newAssetHoldingFlattener(assets map[basics.AssetIndex]basics.AssetHolding) *assetFlattener {
+ flatten := make([]flattenAsset, len(assets), len(assets))
i := 0
for k, v := range assets {
- flatten[i] = asset{k, v}
+ flatten[i] = flattenAsset{k, v}
i++
}
sort.SliceStable(flatten, func(i, j int) bool { return flatten[i].aidx < flatten[j].aidx })
+ return &assetFlattener{flatten}
+}
+
+func newAssetParamsFlattener(assets map[basics.AssetIndex]basics.AssetParams) *assetFlattener {
+ flatten := make([]flattenAsset, len(assets), len(assets))
+ i := 0
+ for k, v := range assets {
+ flatten[i] = flattenAsset{k, v}
+ i++
+ }
+ sort.SliceStable(flatten, func(i, j int) bool { return flatten[i].aidx < flatten[j].aidx })
+ return &assetFlattener{flatten}
+}
+
+func (f *assetFlattener) Count() uint32 {
+ return uint32(len(f.assets))
+}
+
+func (f *assetFlattener) AssetIndex(idx int) basics.AssetIndex {
+ return f.assets[idx].aidx
+}
+
+func (f *assetFlattener) Data(idx int) interface{} {
+ return f.assets[idx].data
+}
+
+// ConvertToGroups converts map of basics.AssetHolding to asset holding groups
+func (e *ExtendedAssetHolding) ConvertToGroups(assets map[basics.AssetIndex]basics.AssetHolding) {
+ if len(assets) == 0 {
+ return
+ }
+ b := assetHoldingGroupBuilder{}
+ flt := newAssetHoldingFlattener(assets)
+ convertToGroups(e, flt, &b, MaxHoldingGroupSize)
+}
+
+// ConvertToGroups converts map of basics.AssetHolding to asset params groups
+func (e *ExtendedAssetParams) ConvertToGroups(assets map[basics.AssetIndex]basics.AssetParams) {
+ if len(assets) == 0 {
+ return
+ }
+ b := assetParamsGroupBuilder{}
+ flt := newAssetParamsFlattener(assets)
+ convertToGroups(e, flt, &b, MaxParamsGroupSize)
+}
- numGroups := (len(assets) + MaxHoldingGroupSize - 1) / MaxHoldingGroupSize
+// convertToGroups converts data from Flattener into groups produced by GroupBuilder and assigns into AbstractAssetGroupList
+func convertToGroups(agl AbstractAssetGroupList, flt flattener, builder groupBuilder, maxGroupSize int) {
min := func(a, b int) int {
if a < b {
return a
@@ -525,33 +1542,29 @@ func (e *ExtendedAssetHolding) ConvertToGroups(assets map[basics.AssetIndex]basi
return b
}
- e.Count = uint32(len(assets))
- e.Groups = make([]AssetsHoldingGroup, numGroups)
+ numGroups := (int(flt.Count()) + maxGroupSize - 1) / maxGroupSize
+ agl.Reset(flt.Count(), numGroups)
for i := 0; i < numGroups; i++ {
- start := i * MaxHoldingGroupSize
- end := min((i+1)*MaxHoldingGroupSize, len(assets))
+ start := i * maxGroupSize
+ end := min((i+1)*maxGroupSize, int(flt.Count()))
size := end - start
- gd := AssetsHoldingGroupData{
- AssetOffsets: make([]basics.AssetIndex, size, size),
- Amounts: make([]uint64, size, size),
- Frozens: make([]bool, size, size),
- }
- first := flatten[start].aidx
+ builder.newGroup(size)
+
+ first := flt.AssetIndex(start)
prev := first
for j, di := start, 0; j < end; j, di = j+1, di+1 {
- gd.AssetOffsets[di] = flatten[j].aidx - prev
- gd.Amounts[di] = flatten[j].holdings.Amount
- gd.Frozens[di] = flatten[j].holdings.Frozen
- prev = flatten[j].aidx
+ offset := flt.AssetIndex(j) - prev
+ builder.newElement(offset, flt.Data(j))
+ prev = flt.AssetIndex(j)
}
- e.Groups[i] = AssetsHoldingGroup{
+
+ desc := AssetGroupDesc{
Count: uint32(size),
MinAssetIndex: first,
DeltaMaxAssetIndex: uint64(prev - first),
- groupData: gd,
- loaded: true,
}
+ agl.Assign(i, builder.build(desc))
}
}
@@ -562,11 +1575,12 @@ type continuosRange struct {
count int // total holdings
}
-func (e ExtendedAssetHolding) findLoadedSiblings() (loaded []int, crs []continuosRange) {
+func findLoadedSiblings(agl AbstractAssetGroupList) (loaded []int, crs []continuosRange) {
// find candidates for merging
- loaded = make([]int, 0, len(e.Groups))
- for i := 0; i < len(e.Groups); i++ {
- if !e.Groups[i].Loaded() {
+ loaded = make([]int, 0, agl.Len())
+ for i := 0; i < agl.Len(); i++ {
+ g := agl.Get(i)
+ if !g.Loaded() {
continue
}
if len(loaded) > 0 && loaded[len(loaded)-1] == i-1 {
@@ -576,12 +1590,13 @@ func (e ExtendedAssetHolding) findLoadedSiblings() (loaded []int, crs []continuo
last := &crs[len(crs)-1]
if last.start+last.size == i {
last.size++
- last.count += int(e.Groups[i].Count)
+ last.count += int(g.AssetCount())
exists = true
}
}
if !exists {
- count := int(e.Groups[i-1].Count + e.Groups[i].Count)
+ pg := agl.Get(i - 1)
+ count := int(pg.AssetCount() + g.AssetCount())
crs = append(crs, continuosRange{i - 1, 2, count})
}
}
@@ -594,56 +1609,35 @@ func (e ExtendedAssetHolding) findLoadedSiblings() (loaded []int, crs []continuo
return
}
-// merge merges groups [start, start+size) and returns keys of deleted group data entries
-func (e *ExtendedAssetHolding) merge(start int, size int, hint int) (deleted []int64) {
+// mergeInternal merges groups [start, start+size) and returns keys of deleted group data entries
+func mergeInternal(agl AbstractAssetGroupList, start int, size int, hint int, assetThreshold uint32) (deleted []int64) {
deleted = make([]int64, 0, hint)
// process i and i + 1 groups at once => size-1 iterations
i := 0
for i < size-1 {
li := start + i // left group index, destination
ri := start + i + 1 // right group index, source
- lg := &e.Groups[li]
- rg := &e.Groups[ri]
+ lg := agl.Get(li)
+ rg := agl.Get(ri)
- num := int(MaxHoldingGroupSize - lg.Count)
+ num := assetThreshold - lg.AssetCount()
if num == 0 { // group is full, skip
i++
continue
}
- if num > int(rg.Count) { // source group is shorter than dest capacity, adjust
- num = int(rg.Count)
+ if num > rg.AssetCount() { // source group is shorter than dest capacity, adjust
+ num = rg.AssetCount()
}
- groupDelta := rg.MinAssetIndex - (lg.MinAssetIndex + basics.AssetIndex(lg.DeltaMaxAssetIndex))
- delta := basics.AssetIndex(0)
- lg.groupData.AssetOffsets = append(lg.groupData.AssetOffsets, rg.groupData.AssetOffsets[0]+groupDelta)
- for j := 1; j < num; j++ {
- lg.groupData.AssetOffsets = append(lg.groupData.AssetOffsets, rg.groupData.AssetOffsets[j])
- delta += rg.groupData.AssetOffsets[j]
- }
- lg.DeltaMaxAssetIndex += uint64(delta + groupDelta)
- lg.groupData.Amounts = append(lg.groupData.Amounts, rg.groupData.Amounts[:num]...)
- lg.groupData.Frozens = append(lg.groupData.Frozens, rg.groupData.Frozens[:num]...)
- lg.Count += uint32(num)
- if num != int(rg.Count) {
+
+ delta := lg.mergeIn(rg, num)
+ if num != rg.AssetCount() {
// src group survived, update it and repeat
- rg.Count -= uint32(num)
- rg.groupData.AssetOffsets = rg.groupData.AssetOffsets[num:]
- delta += rg.groupData.AssetOffsets[0]
- rg.groupData.AssetOffsets[0] = 0
- rg.groupData.Amounts = rg.groupData.Amounts[num:]
- rg.groupData.Frozens = rg.groupData.Frozens[num:]
- rg.MinAssetIndex += delta
- rg.DeltaMaxAssetIndex -= uint64(delta)
+ rg.sliceRight(num, delta)
i++
} else {
// entire src group gone: save the key and delete from Groups
- deleted = append(deleted, e.Groups[ri].AssetGroupKey)
- if ri == len(e.Groups) {
- // last group, cut and exit
- e.Groups = e.Groups[:len(e.Groups)-1]
- return
- }
- e.Groups = append(e.Groups[:ri], e.Groups[ri+1:]...)
+ deleted = append(deleted, rg.Key())
+ agl.dropGroup(ri)
// restart merging with the same index but decrease size
size--
}
@@ -656,20 +1650,32 @@ func (e *ExtendedAssetHolding) merge(start int, size int, hint int) (deleted []i
// - loaded list group indices that are loaded and needs to flushed
// - deleted list of group data keys that needs to be deleted
func (e *ExtendedAssetHolding) Merge() (loaded []int, deleted []int64) {
- loaded, crs := e.findLoadedSiblings()
+ return merge(e, MaxHoldingGroupSize)
+}
+
+// Merge attempts to re-merge loaded groups by squashing small loaded sibling groups together
+// Returns:
+// - loaded list group indices that are loaded and needs to flushed
+// - deleted list of group data keys that needs to be deleted
+func (e *ExtendedAssetParams) Merge() (loaded []int, deleted []int64) {
+ return merge(e, MaxParamsGroupSize)
+}
+
+func merge(agl AbstractAssetGroupList, assetThreshold uint32) (loaded []int, deleted []int64) {
+ loaded, crs := findLoadedSiblings(agl)
if len(crs) == 0 {
return
}
someGroupDeleted := false
- offset := 0 // difference in group indexes that happens after deleteion some groups from e.Groups array
+ offset := 0 // difference in group indexes that happens after deletion some groups from e.Groups array
for _, cr := range crs {
- minGroupsRequired := (cr.count + MaxHoldingGroupSize - 1) / MaxHoldingGroupSize
+ minGroupsRequired := (cr.count + int(assetThreshold) - 1) / int(assetThreshold)
if minGroupsRequired == cr.size {
// no gain in merging, skip
continue
}
- del := e.merge(cr.start-offset, cr.size, cr.size-minGroupsRequired)
+ del := mergeInternal(agl, cr.start-offset, cr.size, cr.size-minGroupsRequired, assetThreshold)
offset += len(del)
for _, key := range del {
someGroupDeleted = true
@@ -682,8 +1688,9 @@ func (e *ExtendedAssetHolding) Merge() (loaded []int, deleted []int64) {
if someGroupDeleted {
// rebuild loaded list since indices changed after merging
loaded = make([]int, 0, len(loaded)-len(deleted))
- for i := 0; i < len(e.Groups); i++ {
- if e.Groups[i].Loaded() {
+ for i := 0; i < agl.Len(); i++ {
+ g := agl.Get(i)
+ if g.Loaded() {
loaded = append(loaded, i)
}
}
@@ -691,6 +1698,68 @@ func (e *ExtendedAssetHolding) Merge() (loaded []int, deleted []int64) {
return
}
+// Get returns AbstractAssetGroup interface by group index
+func (e *ExtendedAssetHolding) Get(gi int) AbstractAssetGroup {
+ return &(e.Groups[gi])
+}
+
+// Len returns number of groups
+func (e *ExtendedAssetHolding) Len() int {
+ return len(e.Groups)
+}
+
+// Total returns number or assets
+func (e *ExtendedAssetHolding) Total() uint32 {
+ return e.Count
+}
+
+// Reset sets count to a new value and re-allocates groups
+func (e *ExtendedAssetHolding) Reset(count uint32, length int) {
+ e.Count = count
+ e.Groups = make([]AssetsHoldingGroup, length)
+}
+
+// Assign sets group at group index position
+func (e *ExtendedAssetHolding) Assign(gi int, group interface{}) {
+ e.Groups[gi] = group.(AssetsHoldingGroup)
+}
+
+// Get returns AbstractAssetGroup interface by group index
+func (e *ExtendedAssetParams) Get(gi int) AbstractAssetGroup {
+ return &(e.Groups[gi])
+}
+
+// Len returns number of groups
+func (e *ExtendedAssetParams) Len() int {
+ return len(e.Groups)
+}
+
+// Total returns number or assets
+func (e *ExtendedAssetParams) Total() uint32 {
+ return e.Count
+}
+
+// Reset sets count to a new value and re-allocates groups
+func (e *ExtendedAssetParams) Reset(count uint32, length int) {
+ e.Count = count
+ e.Groups = make([]AssetsParamsGroup, length)
+}
+
+// Assign sets group at group index position
+func (e *ExtendedAssetParams) Assign(gi int, group interface{}) {
+ e.Groups[gi] = group.(AssetsParamsGroup)
+}
+
+// TestGetGroupData returns group data. Used in tests only
+func (g AssetsHoldingGroup) TestGetGroupData() AssetsHoldingGroupData {
+ return g.groupData
+}
+
+// TestGetGroupData returns group data. Used in tests only
+func (g AssetsParamsGroup) TestGetGroupData() AssetsParamsGroupData {
+ return g.groupData
+}
+
// TestClearGroupData removes all the groups, used in tests only
func (e *ExtendedAssetHolding) TestClearGroupData() {
for i := 0; i < len(e.Groups); i++ {
diff --git a/ledger/ledgercore/persistedacctdata_test.go b/ledger/ledgercore/persistedacctdata_test.go
index 00364efe0..ff9240423 100644
--- a/ledger/ledgercore/persistedacctdata_test.go
+++ b/ledger/ledgercore/persistedacctdata_test.go
@@ -159,12 +159,93 @@ type groupSpec struct {
count int
}
-func genExtendedHolding(t *testing.T, spec []groupSpec) (e ExtendedAssetHolding) {
+func assetHoldingTestGroupMaker(desc AssetGroupDesc, ao []basics.AssetIndex, am []uint64) interface{} {
+ g := AssetsHoldingGroup{
+ AssetGroupDesc: desc,
+ groupData: AssetsHoldingGroupData{
+ AssetsCommonGroupData: AssetsCommonGroupData{AssetOffsets: ao},
+ Amounts: am,
+ Frozens: make([]bool, len(ao)),
+ },
+ loaded: true,
+ }
+ return g
+}
+
+func assetParamsTestGroupMaker(desc AssetGroupDesc, ao []basics.AssetIndex, am []uint64) interface{} {
+ assetNames := make([]string, len(ao))
+ for i, total := range am {
+ assetNames[i] = fmt.Sprintf("a%d", total)
+ }
+ g := AssetsParamsGroup{
+ AssetGroupDesc: desc,
+ groupData: AssetsParamsGroupData{
+ AssetsCommonGroupData: AssetsCommonGroupData{AssetOffsets: ao},
+ Totals: am,
+ Decimals: make([]uint32, len(ao)),
+ DefaultFrozens: make([]bool, len(ao)),
+ UnitNames: make([]string, len(ao)),
+ AssetNames: assetNames,
+ URLs: make([]string, len(ao)),
+ MetadataHash: make([][32]byte, len(ao)),
+ Managers: make([]basics.Address, len(ao)),
+ Reserves: make([]basics.Address, len(ao)),
+ Freezes: make([]basics.Address, len(ao)),
+ Clawbacks: make([]basics.Address, len(ao)),
+ },
+ loaded: true,
+ }
+ return g
+}
+
+func genExtendedHolding(t testing.TB, spec []groupSpec) (e ExtendedAssetHolding) {
e.Groups = make([]AssetsHoldingGroup, len(spec))
+ count := genExtendedAsset(spec, &e, assetHoldingTestGroupMaker)
+ e.Count = count
+
+ a := require.New(t)
+ for _, s := range spec {
+ gi, ai := e.FindAsset(s.start, 0)
+ a.NotEqual(-1, gi)
+ a.NotEqual(-1, ai)
+ a.Equal(uint64(s.start), e.Groups[gi].groupData.Amounts[ai])
+ gi, ai = e.FindAsset(s.end, 0)
+ a.NotEqual(-1, gi)
+ a.NotEqual(-1, ai)
+ a.Equal(uint64(s.end), e.Groups[gi].groupData.Amounts[ai])
+ }
+
+ return e
+}
+
+func genExtendedParams(t testing.TB, spec []groupSpec) (e ExtendedAssetParams) {
+ e.Groups = make([]AssetsParamsGroup, len(spec))
+ count := genExtendedAsset(spec, &e, assetParamsTestGroupMaker)
+ e.Count = count
+
+ a := require.New(t)
+ for _, s := range spec {
+ gi, ai := e.FindAsset(s.start, 0)
+ a.NotEqual(-1, gi)
+ a.NotEqual(-1, ai)
+ a.Equal(uint64(s.start), e.Groups[gi].groupData.Totals[ai])
+ a.Equal(fmt.Sprintf("a%d", e.Groups[gi].groupData.Totals[ai]), e.Groups[gi].groupData.AssetNames[ai])
+ gi, ai = e.FindAsset(s.end, 0)
+ a.NotEqual(-1, gi)
+ a.NotEqual(-1, ai)
+ a.Equal(uint64(s.end), e.Groups[gi].groupData.Totals[ai])
+ }
+
+ return e
+}
+
+func genExtendedAsset(spec []groupSpec, agl AbstractAssetGroupList, maker func(AssetGroupDesc, []basics.AssetIndex, []uint64) interface{}) (count uint32) {
for i, s := range spec {
- e.Groups[i].Count = uint32(s.count)
- e.Groups[i].MinAssetIndex = s.start
- e.Groups[i].DeltaMaxAssetIndex = uint64(s.end - s.start)
+ desc := AssetGroupDesc{
+ Count: uint32(s.count),
+ MinAssetIndex: s.start,
+ DeltaMaxAssetIndex: uint64(s.end - s.start),
+ }
ao := make([]basics.AssetIndex, s.count)
am := make([]uint64, s.count)
ao[0] = 0
@@ -180,23 +261,11 @@ func genExtendedHolding(t *testing.T, spec []groupSpec) (e ExtendedAssetHolding)
ao[s.count-1] = s.end - aidx + gap
am[s.count-1] = uint64(s.end)
}
- e.Groups[i].groupData = AssetsHoldingGroupData{AssetOffsets: ao, Amounts: am, Frozens: make([]bool, len(ao))}
- e.Groups[i].loaded = true
- e.Count += uint32(s.count)
+ group := maker(desc, ao, am)
+ agl.Assign(i, group)
+ count += uint32(s.count)
}
- a := require.New(t)
- for _, s := range spec {
- gi, ai := e.FindAsset(s.start, 0)
- a.NotEqual(-1, gi)
- a.NotEqual(-1, ai)
- a.Equal(uint64(s.start), e.Groups[gi].groupData.Amounts[ai])
- gi, ai = e.FindAsset(s.end, 0)
- a.NotEqual(-1, gi)
- a.NotEqual(-1, ai)
- a.Equal(uint64(s.end), e.Groups[gi].groupData.Amounts[ai])
- }
-
- return e
+ return count
}
// test for AssetsHoldingGroup.insert
@@ -208,6 +277,7 @@ func TestAssetHoldingGroupInsert(t *testing.T) {
}
e := genExtendedHolding(t, spec)
+ e2 := genExtendedParams(t, spec)
oldCount := e.Count
oldDeltaMaxAssetIndex := e.Groups[0].DeltaMaxAssetIndex
oldAssetOffsets := make([]basics.AssetIndex, spec[0].count)
@@ -250,9 +320,23 @@ func TestAssetHoldingGroupInsert(t *testing.T) {
a.Equal(oldAssetOffsets[1:], e.Groups[0].groupData.AssetOffsets[2:])
checkAssetMap(aidx, e.Groups[0])
+ e2.Groups[0].insert(aidx, basics.AssetParams{Total: uint64(aidx)})
+ a.Equal(oldCount+1, e2.Groups[0].Count)
+ a.Equal(aidx, e2.Groups[0].MinAssetIndex)
+ a.Equal(oldDeltaMaxAssetIndex+uint64((spec[0].start-aidx)), e2.Groups[0].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.AssetOffsets))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Totals))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Decimals))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.DefaultFrozens))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Managers))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.MetadataHash))
+ a.Equal(basics.AssetIndex(0), e2.Groups[0].groupData.AssetOffsets[0])
+ a.Equal(spec[0].start-aidx, e2.Groups[0].groupData.AssetOffsets[1])
+ a.Equal(oldAssetOffsets[1:], e2.Groups[0].groupData.AssetOffsets[2:])
+
// append
- e = genExtendedHolding(t, spec)
aidx = spec[0].end + 10
+ e = genExtendedHolding(t, spec)
e.Groups[0].insert(aidx, basics.AssetHolding{Amount: uint64(aidx)})
a.Equal(oldCount+1, e.Groups[0].Count)
a.Equal(spec[0].start, e.Groups[0].MinAssetIndex)
@@ -261,14 +345,28 @@ func TestAssetHoldingGroupInsert(t *testing.T) {
a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Amounts))
a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Frozens))
a.Equal(basics.AssetIndex(0), e.Groups[0].groupData.AssetOffsets[0])
- a.Equal(oldAssetOffsets, e.Groups[0].groupData.AssetOffsets[:e.Groups[0].Count-1])
- a.Equal(aidx-spec[0].end, e.Groups[0].groupData.AssetOffsets[e.Groups[0].Count-1])
+ a.Equal(oldAssetOffsets, e.Groups[0].groupData.AssetOffsets[:e2.Groups[0].Count-1])
+ a.Equal(aidx-spec[0].end, e.Groups[0].groupData.AssetOffsets[e2.Groups[0].Count-1])
checkAssetMap(aidx, e.Groups[0])
+ e2 = genExtendedParams(t, spec)
+ e2.Groups[0].insert(aidx, basics.AssetParams{Total: uint64(aidx)})
+ a.Equal(oldCount+1, e2.Groups[0].Count)
+ a.Equal(spec[0].start, e2.Groups[0].MinAssetIndex)
+ a.Equal(uint64(aidx-spec[0].start), e2.Groups[0].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Totals))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Decimals))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.DefaultFrozens))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Managers))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.MetadataHash))
+ a.Equal(basics.AssetIndex(0), e2.Groups[0].groupData.AssetOffsets[0])
+ a.Equal(oldAssetOffsets, e2.Groups[0].groupData.AssetOffsets[:e2.Groups[0].Count-1])
+ a.Equal(aidx-spec[0].end, e2.Groups[0].groupData.AssetOffsets[e2.Groups[0].Count-1])
+
// insert in the middle
- e = genExtendedHolding(t, spec)
aidx = spec[0].end - 1
delta := spec[0].end - aidx
+ e = genExtendedHolding(t, spec)
e.Groups[0].insert(aidx, basics.AssetHolding{Amount: uint64(aidx)})
a.Equal(oldCount+1, e.Groups[0].Count)
a.Equal(spec[0].start, e.Groups[0].MinAssetIndex)
@@ -281,6 +379,33 @@ func TestAssetHoldingGroupInsert(t *testing.T) {
a.Equal(oldAssetOffsets[len(oldAssetOffsets)-1]-delta, e.Groups[0].groupData.AssetOffsets[e.Groups[0].Count-2])
a.Equal(delta, e.Groups[0].groupData.AssetOffsets[e.Groups[0].Count-1])
checkAssetMap(aidx, e.Groups[0])
+
+ e2 = genExtendedParams(t, spec)
+ e2.Groups[0].insert(aidx, basics.AssetParams{Total: uint64(aidx)})
+ a.Equal(oldCount+1, e2.Groups[0].Count)
+ a.Equal(spec[0].start, e2.Groups[0].MinAssetIndex)
+ a.Equal(uint64(spec[0].end-spec[0].start), e2.Groups[0].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.AssetOffsets))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Totals))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Decimals))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.DefaultFrozens))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.Managers))
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.MetadataHash))
+ a.Equal(basics.AssetIndex(0), e2.Groups[0].groupData.AssetOffsets[0])
+ a.Equal(oldAssetOffsets[:len(oldAssetOffsets)-1], e2.Groups[0].groupData.AssetOffsets[:e2.Groups[0].Count-2])
+ a.Equal(oldAssetOffsets[len(oldAssetOffsets)-1]-delta, e2.Groups[0].groupData.AssetOffsets[e2.Groups[0].Count-2])
+ a.Equal(delta, e2.Groups[0].groupData.AssetOffsets[e2.Groups[0].Count-1])
+
+}
+
+func checkGroup(t *testing.T, group interface{}) {
+ if g, ok := group.(*AssetsHoldingGroup); ok {
+ checkHoldings(t, *g)
+ } else if g, ok := group.(*AssetsParamsGroup); ok {
+ checkParams(t, *g)
+ } else {
+ t.Fatal(fmt.Sprintf("%T is not %T nor %T", group, &AssetsHoldingGroup{}, &AssetsParamsGroup{}))
+ }
}
func checkHoldings(t *testing.T, g AssetsHoldingGroup) {
@@ -292,8 +417,18 @@ func checkHoldings(t *testing.T, g AssetsHoldingGroup) {
}
}
-// test for AssetsHoldingGroup.splitInsert
-func TestAssetHoldingSplitInsertGroup(t *testing.T) {
+func checkParams(t *testing.T, g AssetsParamsGroup) {
+ a := require.New(t)
+ aidx := g.MinAssetIndex
+ for i := 0; i < int(g.Count); i++ {
+ aidx += g.groupData.AssetOffsets[i]
+ a.Equal(uint64(aidx), g.groupData.Totals[i])
+ a.Equal(fmt.Sprintf("a%d", g.groupData.Totals[i]), g.groupData.AssetNames[i])
+ }
+}
+
+// test for AssetsHoldingGroup.split + insertAfter
+func TestAssetSplitInsertAfter(t *testing.T) {
a := require.New(t)
spec1 := []groupSpec{
@@ -317,6 +452,7 @@ func TestAssetHoldingSplitInsertGroup(t *testing.T) {
rsize := test.split[1]
t.Run(fmt.Sprintf("size=%d", spec[0].count), func(t *testing.T) {
e := genExtendedHolding(t, spec)
+ e2 := genExtendedParams(t, spec)
// save original data for later comparison
oldCount := e.Count
@@ -334,66 +470,153 @@ func TestAssetHoldingSplitInsertGroup(t *testing.T) {
// split the group and insert left
aidx := spec[0].start + 1
- e.splitInsert(0, aidx, basics.AssetHolding{Amount: uint64(aidx)})
+ pos := e.split(0, aidx)
+ a.Equal(0, pos)
+ e.insertInto(pos, aidx, basics.AssetHolding{Amount: uint64(aidx)})
a.Equal(oldCount+1, e.Count)
a.Equal(2, len(e.Groups))
a.Equal(e.Count, e.Groups[0].Count+e.Groups[1].Count)
+ pos = e2.split(0, aidx)
+ a.Equal(0, pos)
+ e2.insertInto(pos, aidx, basics.AssetParams{Total: uint64(aidx), AssetName: fmt.Sprintf("a%d", aidx)})
+ a.Equal(oldCount+1, e2.Count)
+ a.Equal(2, len(e2.Groups))
+ a.Equal(e2.Count, e2.Groups[0].Count+e2.Groups[1].Count)
+
a.Equal(spec[0].start, e.Groups[0].MinAssetIndex)
a.Equal(uint32(lsize+1), e.Groups[0].Count)
a.Equal(uint64((lsize-1)*gap), e.Groups[0].DeltaMaxAssetIndex)
a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.AssetOffsets))
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Amounts))
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Frozens))
+
+ a.Equal(spec[0].start, e2.Groups[0].MinAssetIndex)
+ a.Equal(uint32(lsize+1), e2.Groups[0].Count)
+ a.Equal(uint64((lsize-1)*gap), e2.Groups[0].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.AssetOffsets))
+
+ checkGroupDataArrays(a, int(e.Groups[0].Count), &e.Groups[0])
+ checkGroupDataArrays(a, int(e2.Groups[0].Count), &e2.Groups[0])
+
a.Equal(oldAssetOffsets1[0], e.Groups[0].groupData.AssetOffsets[0])
a.Equal(basics.AssetIndex(1), e.Groups[0].groupData.AssetOffsets[1])
a.Equal(basics.AssetIndex(1), e.Groups[0].groupData.AssetOffsets[2])
a.Equal(oldAssetOffsets1[2:], e.Groups[0].groupData.AssetOffsets[3:])
+
+ a.Equal(oldAssetOffsets1[0], e2.Groups[0].groupData.AssetOffsets[0])
+ a.Equal(basics.AssetIndex(1), e2.Groups[0].groupData.AssetOffsets[1])
+ a.Equal(basics.AssetIndex(1), e2.Groups[0].groupData.AssetOffsets[2])
+ a.Equal(oldAssetOffsets1[2:], e2.Groups[0].groupData.AssetOffsets[3:])
+
checkHoldings(t, e.Groups[0])
+ checkParams(t, e2.Groups[0])
a.Equal(spec[0].start+basics.AssetIndex(e.Groups[0].DeltaMaxAssetIndex+uint64(gap)), e.Groups[1].MinAssetIndex)
a.Equal(uint32(rsize), e.Groups[1].Count)
a.Equal(uint64(spec[0].end-e.Groups[1].MinAssetIndex), e.Groups[1].DeltaMaxAssetIndex)
a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.AssetOffsets))
- a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.Amounts))
- a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.Frozens))
a.Equal(basics.AssetIndex(0), e.Groups[1].groupData.AssetOffsets[0])
a.Equal(oldAssetOffsets2[1:], e.Groups[1].groupData.AssetOffsets[1:])
+ checkGroupDataArrays(a, int(e.Groups[1].Count), &e.Groups[1])
checkHoldings(t, e.Groups[1])
+ a.Equal(spec[0].start+basics.AssetIndex(e2.Groups[0].DeltaMaxAssetIndex+uint64(gap)), e2.Groups[1].MinAssetIndex)
+ a.Equal(uint32(rsize), e2.Groups[1].Count)
+ a.Equal(uint64(spec[0].end-e2.Groups[1].MinAssetIndex), e2.Groups[1].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[1].Count), len(e2.Groups[1].groupData.AssetOffsets))
+ a.Equal(basics.AssetIndex(0), e2.Groups[1].groupData.AssetOffsets[0])
+ a.Equal(oldAssetOffsets2[1:], e2.Groups[1].groupData.AssetOffsets[1:])
+ checkGroupDataArrays(a, int(e2.Groups[1].Count), &e2.Groups[1])
+ checkParams(t, e2.Groups[1])
+
e = genExtendedHolding(t, spec)
+ e2 = genExtendedParams(t, spec)
// split the group and insert right
aidx = spec[0].end - 1
- e.splitInsert(0, aidx, basics.AssetHolding{Amount: uint64(aidx)})
+ pos = e.split(0, aidx)
+ a.Equal(1, pos)
+ e.insertInto(pos, aidx, basics.AssetHolding{Amount: uint64(aidx)})
a.Equal(oldCount+1, e.Count)
a.Equal(2, len(e.Groups))
a.Equal(e.Count, e.Groups[0].Count+e.Groups[1].Count)
+ pos = e2.split(0, aidx)
+ a.Equal(1, pos)
+ e2.insertInto(pos, aidx, basics.AssetParams{Total: uint64(aidx), AssetName: fmt.Sprintf("a%d", aidx)})
+ a.Equal(oldCount+1, e2.Count)
+ a.Equal(2, len(e2.Groups))
+ a.Equal(e2.Count, e2.Groups[0].Count+e2.Groups[1].Count)
+
a.Equal(spec[0].start, e.Groups[0].MinAssetIndex)
a.Equal(uint32(lsize), e.Groups[0].Count)
a.Equal(uint64((lsize-1)*gap), e.Groups[0].DeltaMaxAssetIndex)
a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.AssetOffsets))
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Amounts))
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Frozens))
a.Equal(oldAssetOffsets1, e.Groups[0].groupData.AssetOffsets)
+ checkGroupDataArrays(a, int(e.Groups[0].Count), &e.Groups[0])
checkHoldings(t, e.Groups[0])
+ a.Equal(spec[0].start, e2.Groups[0].MinAssetIndex)
+ a.Equal(uint32(lsize), e2.Groups[0].Count)
+ a.Equal(uint64((lsize-1)*gap), e2.Groups[0].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[0].Count), len(e2.Groups[0].groupData.AssetOffsets))
+ a.Equal(oldAssetOffsets1, e2.Groups[0].groupData.AssetOffsets)
+ checkGroupDataArrays(a, int(e2.Groups[0].Count), &e2.Groups[0])
+ checkParams(t, e2.Groups[0])
+
a.Equal(spec[0].start+basics.AssetIndex(e.Groups[0].DeltaMaxAssetIndex+uint64(gap)), e.Groups[1].MinAssetIndex)
a.Equal(uint32(rsize+1), e.Groups[1].Count)
a.Equal(uint64(spec[0].end-e.Groups[1].MinAssetIndex), e.Groups[1].DeltaMaxAssetIndex)
a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.AssetOffsets))
- a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.Amounts))
- a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.Frozens))
+
+ a.Equal(spec[0].start+basics.AssetIndex(e2.Groups[0].DeltaMaxAssetIndex+uint64(gap)), e2.Groups[1].MinAssetIndex)
+ a.Equal(uint32(rsize+1), e2.Groups[1].Count)
+ a.Equal(uint64(spec[0].end-e2.Groups[1].MinAssetIndex), e2.Groups[1].DeltaMaxAssetIndex)
+ a.Equal(int(e2.Groups[1].Count), len(e2.Groups[1].groupData.AssetOffsets))
+
+ checkGroupDataArrays(a, int(e.Groups[1].Count), &e.Groups[1])
+ checkGroupDataArrays(a, int(e2.Groups[1].Count), &e2.Groups[1])
+
a.Equal(basics.AssetIndex(0), e.Groups[1].groupData.AssetOffsets[0])
a.Equal(oldAssetOffsets2[1:len(oldAssetOffsets2)-1], e.Groups[1].groupData.AssetOffsets[1:e.Groups[1].Count-2])
a.Equal(oldAssetOffsets2[len(oldAssetOffsets2)-1]-1, e.Groups[1].groupData.AssetOffsets[e.Groups[1].Count-2])
a.Equal(basics.AssetIndex(1), e.Groups[1].groupData.AssetOffsets[e.Groups[1].Count-1])
+
+ a.Equal(basics.AssetIndex(0), e2.Groups[1].groupData.AssetOffsets[0])
+ a.Equal(oldAssetOffsets2[1:len(oldAssetOffsets2)-1], e2.Groups[1].groupData.AssetOffsets[1:e2.Groups[1].Count-2])
+ a.Equal(oldAssetOffsets2[len(oldAssetOffsets2)-1]-1, e2.Groups[1].groupData.AssetOffsets[e2.Groups[1].Count-2])
+ a.Equal(basics.AssetIndex(1), e2.Groups[1].groupData.AssetOffsets[e2.Groups[1].Count-1])
+
checkHoldings(t, e.Groups[1])
+ checkParams(t, e2.Groups[1])
})
}
}
+func checkGroupDataArrays(a *require.Assertions, count int, group interface{}) {
+ if g, ok := group.(*AssetsHoldingGroup); ok {
+ gd := g.groupData
+ a.Equal(count, len(gd.AssetOffsets))
+ a.Equal(count, len(gd.Amounts))
+ a.Equal(count, len(gd.Frozens))
+ } else if g, ok := group.(*AssetsParamsGroup); ok {
+ gd := g.groupData
+ a.Equal(count, len(gd.AssetOffsets))
+ a.Equal(count, len(gd.Totals))
+ a.Equal(count, len(gd.Decimals))
+ a.Equal(count, len(gd.DefaultFrozens))
+ a.Equal(count, len(gd.UnitNames))
+ a.Equal(count, len(gd.AssetNames))
+ a.Equal(count, len(gd.URLs))
+ a.Equal(count, len(gd.MetadataHash))
+ a.Equal(count, len(gd.Managers))
+ a.Equal(count, len(gd.Reserves))
+ a.Equal(count, len(gd.Freezes))
+ a.Equal(count, len(gd.Clawbacks))
+ } else {
+ a.Fail(fmt.Sprintf("%T is not %T nor %T", group, &AssetsHoldingGroup{}, &AssetsParamsGroup{}))
+ }
+}
+
// test for ExtendedAssetHolding.insert and findGroup
func TestAssetHoldingInsertGroup(t *testing.T) {
a := require.New(t)
@@ -405,69 +628,91 @@ func TestAssetHoldingInsertGroup(t *testing.T) {
{4001, 5000, MaxHoldingGroupSize},
}
- e := genExtendedHolding(t, spec1)
-
- // new group at the beginning
- aidx := basics.AssetIndex(1)
- res := e.findGroup(aidx, 0)
- a.False(res.found)
- a.False(res.split)
- a.Equal(-1, res.gi)
-
- // split group 0
- aidx = basics.AssetIndex(spec1[0].start + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.True(res.split)
- a.Equal(0, res.gi)
-
- // insert into group 1 if skipping 0
- res = e.findGroup(aidx, 1)
- a.True(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
-
- // prepend into group 1
- aidx = basics.AssetIndex(spec1[0].end + 10)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
-
- // append into group 1
- aidx = basics.AssetIndex(spec1[1].end + 10)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
-
- // insert into group 1
- aidx = basics.AssetIndex(spec1[1].start + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
-
- // split group 2
- aidx = basics.AssetIndex(spec1[2].start + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.True(res.split)
- a.Equal(2, res.gi)
-
- // new group after group 2
- aidx = basics.AssetIndex(spec1[2].end + 100)
- res = e.findGroup(aidx, 0)
- a.False(res.found)
- a.False(res.split)
- a.Equal(2, res.gi)
-
- // new group after group 3
- aidx = basics.AssetIndex(spec1[3].end + 100)
- res = e.findGroup(aidx, 0)
- a.False(res.found)
- a.False(res.split)
- a.Equal(3, res.gi)
+ spec1_1 := []groupSpec{
+ {10, 700, MaxParamsGroupSize},
+ {1001, 1060, 3},
+ {2001, 3000, MaxParamsGroupSize},
+ {4001, 5000, MaxParamsGroupSize},
+ }
+
+ e1 := genExtendedHolding(t, spec1)
+ e2 := genExtendedParams(t, spec1_1)
+ var tests = []struct {
+ e AbstractAssetGroupList
+ spec []groupSpec
+ }{
+ {&e1, spec1},
+ {&e2, spec1_1},
+ }
+
+ for _, test := range tests {
+ e := test.e
+ spec := test.spec
+ t.Run(fmt.Sprintf("%T", e), func(t *testing.T) {
+
+ // new group at the beginning
+ aidx := basics.AssetIndex(1)
+ res := findGroup(aidx, 0, e)
+ a.False(res.found)
+ a.False(res.split)
+ a.Equal(-1, res.gi)
+
+ // split group 0
+ aidx = basics.AssetIndex(spec[0].start + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.True(res.split)
+ a.Equal(0, res.gi)
+
+ // insert into group 1 if skipping 0
+ res = findGroup(aidx, 1, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+
+ // prepend into group 1
+ aidx = basics.AssetIndex(spec[0].end + 10)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+
+ // append into group 1
+ aidx = basics.AssetIndex(spec[1].end + 10)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+
+ // insert into group 1
+ aidx = basics.AssetIndex(spec[1].start + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+
+ // split group 2
+ aidx = basics.AssetIndex(spec[2].start + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.True(res.split)
+ a.Equal(2, res.gi)
+
+ // new group after group 2
+ aidx = basics.AssetIndex(spec[2].end + 100)
+ res = findGroup(aidx, 0, e)
+ a.False(res.found)
+ a.False(res.split)
+ a.Equal(2, res.gi)
+
+ // new group after group 3
+ aidx = basics.AssetIndex(spec[3].end + 100)
+ res = findGroup(aidx, 0, e)
+ a.False(res.found)
+ a.False(res.split)
+ a.Equal(3, res.gi)
+ })
+ }
// check insertion
assets := []basics.AssetIndex{
@@ -482,148 +727,218 @@ func TestAssetHoldingInsertGroup(t *testing.T) {
for _, aidx := range assets {
holdings[aidx] = basics.AssetHolding{Amount: uint64(aidx)}
}
- oldCount := e.Count
-
- e.Insert(assets, holdings)
+ params := make(map[basics.AssetIndex]basics.AssetParams, len(assets))
+ for _, aidx := range assets {
+ params[aidx] = basics.AssetParams{Total: uint64(aidx), AssetName: fmt.Sprintf("a%d", aidx)}
+ }
+ oldCount1 := e1.Count
+ oldCount2 := e2.Count
- a.Equal(oldCount+uint32(len(assets)), e.Count)
- a.Equal(4+len(spec1), len(e.Groups))
+ e1.Insert(assets, holdings)
+ e2.Insert(assets, params)
- a.Equal(uint32(1), e.Groups[0].Count)
- a.Equal(assets[0], e.Groups[0].MinAssetIndex)
- a.Equal(uint64(0), e.Groups[0].DeltaMaxAssetIndex)
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.AssetOffsets))
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Amounts))
- a.Equal(int(e.Groups[0].Count), len(e.Groups[0].groupData.Frozens))
- a.Equal(basics.AssetIndex(0), e.Groups[0].groupData.AssetOffsets[0])
- checkHoldings(t, e.Groups[0])
-
- // two cases below checked in splitInsert test
- a.Equal(uint32(spec1[0].count/2+1), e.Groups[1].Count)
- a.Equal(int(e.Groups[1].Count), len(e.Groups[1].groupData.AssetOffsets))
- checkHoldings(t, e.Groups[1])
-
- a.Equal(uint32(spec1[0].count/2+1), e.Groups[2].Count)
- a.Equal(int(e.Groups[2].Count), len(e.Groups[2].groupData.AssetOffsets))
- checkHoldings(t, e.Groups[2])
-
- a.Equal(uint32(spec1[1].count+1), e.Groups[3].Count)
- a.Equal(spec1[1].start, e.Groups[3].MinAssetIndex)
- a.Equal(uint64(spec1[1].end-spec1[1].start), e.Groups[3].DeltaMaxAssetIndex)
- a.Equal(int(e.Groups[3].Count), len(e.Groups[3].groupData.AssetOffsets))
- a.Equal(int(e.Groups[3].Count), len(e.Groups[3].groupData.Amounts))
- a.Equal(int(e.Groups[3].Count), len(e.Groups[3].groupData.Frozens))
- checkHoldings(t, e.Groups[3])
-
- // checked in group insert test
- a.Equal(uint32(spec1[2].count), e.Groups[4].Count)
- a.Equal(int(e.Groups[4].Count), len(e.Groups[4].groupData.AssetOffsets))
- checkHoldings(t, e.Groups[4])
-
- a.Equal(uint32(1), e.Groups[5].Count)
- a.Equal(assets[4], e.Groups[5].MinAssetIndex)
- a.Equal(uint64(0), e.Groups[5].DeltaMaxAssetIndex)
- a.Equal(int(e.Groups[5].Count), len(e.Groups[5].groupData.AssetOffsets))
- a.Equal(int(e.Groups[5].Count), len(e.Groups[5].groupData.Amounts))
- a.Equal(int(e.Groups[5].Count), len(e.Groups[5].groupData.Frozens))
- a.Equal(basics.AssetIndex(0), e.Groups[5].groupData.AssetOffsets[0])
- checkHoldings(t, e.Groups[5])
-
- a.Equal(uint32(1), e.Groups[7].Count)
- a.Equal(assets[5], e.Groups[7].MinAssetIndex)
- a.Equal(uint64(0), e.Groups[7].DeltaMaxAssetIndex)
- a.Equal(int(e.Groups[7].Count), len(e.Groups[7].groupData.AssetOffsets))
- a.Equal(int(e.Groups[7].Count), len(e.Groups[7].groupData.Amounts))
- a.Equal(int(e.Groups[7].Count), len(e.Groups[7].groupData.Frozens))
- a.Equal(basics.AssetIndex(0), e.Groups[7].groupData.AssetOffsets[0])
- checkHoldings(t, e.Groups[7])
+ var tests2 = []struct {
+ e AbstractAssetGroupList
+ count uint32
+ spec []groupSpec
+ }{
+ {&e1, oldCount1, spec1},
+ {&e2, oldCount2, spec1_1},
+ }
+ for _, test := range tests2 {
+ e := test.e
+ oldCount := test.count
+ spec := test.spec
+ t.Run(fmt.Sprintf("%T", e), func(t *testing.T) {
+
+ a.Equal(oldCount+uint32(len(assets)), e.Total())
+ a.Equal(4+len(spec), e.Len())
+
+ a.Equal(uint32(1), e.Get(0).AssetCount())
+ a.Equal(assets[0], e.Get(0).MinAsset())
+ a.Equal(e.Get(0).MinAsset()+0, e.Get(0).MaxAsset()) // MaxAsset returns min asset + delta, 0 emphasizes expected delta value
+ checkGroupDataArrays(a, int(e.Get(0).AssetCount()), e.Get(0))
+ a.Equal(basics.AssetIndex(0), e.Get(0).GroupData().AssetDeltaValue(0))
+ checkGroup(t, e.Get(0))
+
+ // two cases below checked in split + insertAfter test
+ a.Equal(uint32(spec[0].count/2+1), e.Get(1).AssetCount())
+ checkGroupDataArrays(a, int(e.Get(1).AssetCount()), e.Get(1))
+ checkGroup(t, e.Get(1))
+
+ a.Equal(uint32(spec[0].count/2+1), e.Get(2).AssetCount())
+ checkGroupDataArrays(a, int(e.Get(2).AssetCount()), e.Get(2))
+ checkGroup(t, e.Get(2))
+
+ a.Equal(uint32(spec[1].count+1), e.Get(3).AssetCount())
+ a.Equal(spec[1].start, e.Get(3).MinAsset())
+ a.Equal(e.Get(3).MinAsset()+spec[1].end-spec[1].start, e.Get(3).MaxAsset())
+ checkGroupDataArrays(a, int(e.Get(3).AssetCount()), e.Get(3))
+ checkGroup(t, e.Get(3))
+
+ // checked in group insert test
+ a.Equal(uint32(spec[2].count), e.Get(4).AssetCount())
+ checkGroupDataArrays(a, int(e.Get(4).AssetCount()), e.Get(4))
+ checkGroup(t, e.Get(4))
+
+ a.Equal(uint32(1), e.Get(5).AssetCount())
+ a.Equal(assets[4], e.Get(5).MinAsset())
+ a.Equal(e.Get(5).MinAsset()+0, e.Get(5).MaxAsset())
+ checkGroupDataArrays(a, int(e.Get(5).AssetCount()), e.Get(5))
+ a.Equal(basics.AssetIndex(0), e.Get(5).GroupData().AssetDeltaValue(0))
+ checkGroup(t, e.Get(5))
+
+ a.Equal(uint32(1), e.Get(7).AssetCount())
+ a.Equal(assets[5], e.Get(7).MinAsset())
+ a.Equal(e.Get(7).MinAsset()+0, e.Get(7).MaxAsset())
+ checkGroupDataArrays(a, int(e.Get(7).AssetCount()), e.Get(7))
+ checkGroup(t, e.Get(7))
+ })
+ }
spec2 := []groupSpec{
{1001, 1060, 20},
{2001, 3000, MaxHoldingGroupSize},
}
- e = genExtendedHolding(t, spec2)
-
- // insert into group 0
- aidx = basics.AssetIndex(1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(0, res.gi)
-
- // insert into group 0
- aidx = basics.AssetIndex(spec2[0].start + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(0, res.gi)
-
- // insert into group 0
- aidx = basics.AssetIndex(spec2[0].end + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(0, res.gi)
-
- // split group 1
- aidx = basics.AssetIndex(spec2[1].start + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.True(res.split)
- a.Equal(1, res.gi)
-
- // new group after group 1
- aidx = basics.AssetIndex(spec2[1].end + 1)
- res = e.findGroup(aidx, 0)
- a.False(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
+ spec2_1 := []groupSpec{
+ {1001, 1060, 3},
+ {2001, 3000, MaxParamsGroupSize},
+ }
+
+ e1 = genExtendedHolding(t, spec2)
+ e2 = genExtendedParams(t, spec2_1)
+ tests = []struct {
+ e AbstractAssetGroupList
+ spec []groupSpec
+ }{
+ {&e1, spec2},
+ {&e2, spec2_1},
+ }
+
+ for _, test := range tests {
+ e := test.e
+ spec := test.spec
+ t.Run(fmt.Sprintf("%T", e), func(t *testing.T) {
+
+ // insert into group 0
+ aidx := basics.AssetIndex(1)
+ res := findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(0, res.gi)
+
+ // insert into group 0
+ aidx = basics.AssetIndex(spec[0].start + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(0, res.gi)
+
+ // insert into group 0
+ aidx = basics.AssetIndex(spec[0].end + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(0, res.gi)
+
+ // split group 1
+ aidx = basics.AssetIndex(spec[1].start + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.True(res.split)
+ a.Equal(1, res.gi)
+
+ // new group after group 1
+ aidx = basics.AssetIndex(spec[1].end + 1)
+ res = findGroup(aidx, 0, e)
+ a.False(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+ })
+ }
spec3 := []groupSpec{
{2001, 3000, MaxHoldingGroupSize},
{3002, 3062, 20},
}
- e = genExtendedHolding(t, spec3)
-
- // split group 0
- aidx = basics.AssetIndex(spec3[0].start + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.True(res.split)
- a.Equal(0, res.gi)
-
- // insert into group 1
- aidx = basics.AssetIndex(spec3[1].start - 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
+ spec3_1 := []groupSpec{
+ {2001, 3000, MaxParamsGroupSize},
+ {3002, 3062, 3},
+ }
- // insert into group 1
- aidx = basics.AssetIndex(spec3[1].end + 1)
- res = e.findGroup(aidx, 0)
- a.True(res.found)
- a.False(res.split)
- a.Equal(1, res.gi)
+ e1 = genExtendedHolding(t, spec3)
+ e2 = genExtendedParams(t, spec3_1)
+ tests = []struct {
+ e AbstractAssetGroupList
+ spec []groupSpec
+ }{
+ {&e1, spec3},
+ {&e2, spec3_1},
+ }
+ for _, test := range tests {
+ e := test.e
+ spec := test.spec
+ t.Run(fmt.Sprintf("%T", e), func(t *testing.T) {
+
+ // split group 0
+ aidx := basics.AssetIndex(spec[0].start + 1)
+ res := findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.True(res.split)
+ a.Equal(0, res.gi)
+
+ // insert into group 1
+ aidx = basics.AssetIndex(spec[1].start - 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+
+ // insert into group 1
+ aidx = basics.AssetIndex(spec[1].end + 1)
+ res = findGroup(aidx, 0, e)
+ a.True(res.found)
+ a.False(res.split)
+ a.Equal(1, res.gi)
+ })
+ }
spec4 := []groupSpec{
{2001, 3000, MaxHoldingGroupSize},
{3002, 4000, MaxHoldingGroupSize},
}
- e = genExtendedHolding(t, spec4)
+ spec4_1 := []groupSpec{
+ {2001, 3000, MaxParamsGroupSize},
+ {3002, 4000, MaxParamsGroupSize},
+ }
- // new group after 0
- aidx = basics.AssetIndex(spec4[0].end + 1)
- res = e.findGroup(aidx, 0)
- a.False(res.found)
- a.False(res.split)
- a.Equal(0, res.gi)
+ e1 = genExtendedHolding(t, spec4)
+ e2 = genExtendedParams(t, spec4_1)
+ tests = []struct {
+ e AbstractAssetGroupList
+ spec []groupSpec
+ }{
+ {&e1, spec4},
+ {&e2, spec4_1},
+ }
+ for _, test := range tests {
+ e := test.e
+ spec := test.spec
+ t.Run(fmt.Sprintf("%T", e), func(t *testing.T) {
+ // new group after 0
+ aidx := basics.AssetIndex(spec[0].end + 1)
+ res := findGroup(aidx, 0, e)
+ a.False(res.found)
+ a.False(res.split)
+ a.Equal(0, res.gi)
+ })
+ }
}
-func TestAssetHoldingDelete(t *testing.T) {
+func TestAssetDelete(t *testing.T) {
a := require.New(t)
spec := []groupSpec{
@@ -633,86 +948,122 @@ func TestAssetHoldingDelete(t *testing.T) {
}
e := genExtendedHolding(t, spec)
+ e2 := genExtendedParams(t, spec)
oldCount := e.Count
a.Equal(uint32(spec[0].count+spec[1].count+spec[2].count), e.Count)
+ a.Equal(e.Count, e2.Count)
a.Equal(uint32(spec[1].count), e.Groups[1].Count)
+ a.Equal(e.Groups[1].Count, e2.Groups[1].Count)
a.Equal(spec[1].start, e.Groups[1].MinAssetIndex)
+ a.Equal(e.Groups[1].MinAssetIndex, e2.Groups[1].MinAssetIndex)
a.Equal(uint64(0), e.Groups[1].DeltaMaxAssetIndex)
+ a.Equal(e.Groups[1].DeltaMaxAssetIndex, e2.Groups[1].DeltaMaxAssetIndex)
a.Equal(basics.AssetIndex(0), e.Groups[1].groupData.AssetOffsets[0])
+ a.Equal(e.Groups[1].groupData.AssetOffsets[0], e2.Groups[1].groupData.AssetOffsets[0])
- oldAssets := make(map[basics.AssetIndex]basics.AssetHolding, spec[0].count)
+ oldAssetHoldings := make(map[basics.AssetIndex]basics.AssetHolding, spec[0].count)
aidx := e.Groups[0].MinAssetIndex
for i := 0; i < spec[0].count; i++ {
aidx += e.Groups[0].groupData.AssetOffsets[i]
- oldAssets[aidx] = basics.AssetHolding{Amount: e.Groups[0].groupData.Amounts[i]}
+ oldAssetHoldings[aidx] = basics.AssetHolding{Amount: e.Groups[0].groupData.Amounts[i]}
}
- checkAssetMap := func(delAsset basics.AssetIndex, g AssetsHoldingGroup) {
+ oldAssetParams := make(map[basics.AssetIndex]basics.AssetParams, spec[0].count)
+ aidx = e2.Groups[0].MinAssetIndex
+ for i := 0; i < spec[0].count; i++ {
+ aidx += e2.Groups[0].groupData.AssetOffsets[i]
+ oldAssetParams[aidx] = basics.AssetParams{Total: e2.Groups[0].groupData.Totals[i], AssetName: e2.Groups[0].groupData.AssetNames[i]}
+ }
+
+ checkAssetHoldingsMap := func(delAsset basics.AssetIndex, g AssetsHoldingGroup) {
newAssets := make(map[basics.AssetIndex]basics.AssetHolding, g.Count)
aidx := g.MinAssetIndex
for i := 0; i < int(g.Count); i++ {
aidx += g.groupData.AssetOffsets[i]
- newAssets[aidx] = basics.AssetHolding{Amount: e.Groups[0].groupData.Amounts[i]}
+ newAssets[aidx] = basics.AssetHolding{Amount: g.groupData.Amounts[i]}
a.Equal(uint64(aidx), g.groupData.Amounts[i])
}
a.Equal(int(g.Count), len(newAssets))
- a.Contains(oldAssets, delAsset)
+ a.Contains(oldAssetHoldings, delAsset)
- oldAssetsCopy := make(map[basics.AssetIndex]basics.AssetHolding, len(oldAssets))
- for k, v := range oldAssets {
- oldAssetsCopy[k] = v
+ oldAssetHoldingsCopy := make(map[basics.AssetIndex]basics.AssetHolding, len(oldAssetHoldings))
+ for k, v := range oldAssetHoldings {
+ oldAssetHoldingsCopy[k] = v
}
- delete(oldAssetsCopy, delAsset)
- a.Equal(oldAssetsCopy, newAssets)
+ delete(oldAssetHoldingsCopy, delAsset)
+ a.Equal(oldAssetHoldingsCopy, newAssets)
}
- assetByIndex := func(gi, ai int, e ExtendedAssetHolding) basics.AssetIndex {
- aidx := e.Groups[gi].MinAssetIndex
- for i := 0; i <= ai; i++ {
- aidx += e.Groups[gi].groupData.AssetOffsets[i]
+ checkAssetParamsMap := func(delAsset basics.AssetIndex, g AssetsParamsGroup) {
+ newAssets := make(map[basics.AssetIndex]basics.AssetParams, g.Count)
+ aidx := g.MinAssetIndex
+ for i := 0; i < int(g.Count); i++ {
+ aidx += g.groupData.AssetOffsets[i]
+ newAssets[aidx] = basics.AssetParams{Total: g.groupData.Totals[i], AssetName: g.groupData.AssetNames[i]}
+ a.Equal(uint64(aidx), g.groupData.Totals[i])
+ }
+ a.Equal(int(g.Count), len(newAssets))
+ a.Contains(oldAssetParams, delAsset)
+
+ oldAssetParamsCopy := make(map[basics.AssetIndex]basics.AssetParams, len(oldAssetParams))
+ for k, v := range oldAssetParams {
+ oldAssetParamsCopy[k] = v
}
- return aidx
+ delete(oldAssetParamsCopy, delAsset)
+ a.Equal(oldAssetParamsCopy, newAssets)
}
// delete a group with only one item
- e.Delete(1, 0)
+ e.Delete([]basics.AssetIndex{spec[1].start})
a.Equal(oldCount-1, e.Count)
a.Equal(len(spec)-1, len(e.Groups))
+ e2.Delete([]basics.AssetIndex{spec[1].start})
+ a.Equal(oldCount-1, e2.Count)
+ a.Equal(len(spec)-1, len(e2.Groups))
+
gap := int(spec[0].end-spec[0].start) / spec[0].count
- // delete first entry in a group
- e = genExtendedHolding(t, spec)
- aidx = assetByIndex(0, 0, e)
- e.Delete(0, 0)
- a.Equal(oldCount-1, e.Count)
- a.Equal(len(spec), len(e.Groups))
- a.Equal(spec[0].start+basics.AssetIndex(gap), e.Groups[0].MinAssetIndex)
- a.Equal(uint64(spec[0].end-spec[0].start-basics.AssetIndex(gap)), e.Groups[0].DeltaMaxAssetIndex)
- checkAssetMap(aidx, e.Groups[0])
+ tests := []struct {
+ gi int
+ ai int
+ minAsset basics.AssetIndex
+ maxDelta uint64
+ }{
+ // delete first entry in a group
+ {0, 0, spec[0].start + basics.AssetIndex(gap), uint64(spec[0].end - spec[0].start - basics.AssetIndex(gap))},
+ // delete last entry in a group
+ // assets are 10, 12, 14, ..., 700
+ // the second last is 2 * (spec[0].count-2) + 10
+ // so the delta = (spec[0].count-2)*gap + 10 -10
+ {0, spec[0].count - 1, spec[0].start, uint64((spec[0].count - 2) * gap)},
+ // delete some middle entry
+ {0, 1, spec[0].start, uint64(spec[0].end - spec[0].start)},
+ }
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("holding_%d", i+1), func(t *testing.T) {
+ e := genExtendedHolding(t, spec)
+ aidx := e.Get(test.gi).AssetAt(test.ai)
+ e.deleteByIndex(test.gi, test.ai)
+ a.Equal(oldCount-1, e.Count)
+ a.Equal(len(spec), len(e.Groups))
+ a.Equal(test.minAsset, e.Groups[0].MinAssetIndex)
+ a.Equal(test.maxDelta, e.Groups[0].DeltaMaxAssetIndex)
+ checkAssetHoldingsMap(aidx, e.Groups[0])
+ })
- // delete last entry in a group
- e = genExtendedHolding(t, spec)
- aidx = assetByIndex(0, spec[0].count-1, e)
- e.Delete(0, spec[0].count-1)
- a.Equal(oldCount-1, e.Count)
- a.Equal(len(spec), len(e.Groups))
- a.Equal(spec[0].start, e.Groups[0].MinAssetIndex)
- // assets are 10, 12, 14, ..., 700
- // the second last is 2 * (spec[0].count-2) + 10
- // so the delta = (spec[0].count-2)*gap + 10 -10
- a.Equal(uint64((spec[0].count-2)*gap), e.Groups[0].DeltaMaxAssetIndex)
- checkAssetMap(aidx, e.Groups[0])
+ t.Run(fmt.Sprintf("params_%d", i+1), func(t *testing.T) {
+ e := genExtendedParams(t, spec)
+ aidx = e.Get(test.gi).AssetAt(test.ai)
+ e.deleteByIndex(test.gi, test.ai)
+ a.Equal(oldCount-1, e.Count)
+ a.Equal(len(spec), len(e.Groups))
+ a.Equal(test.minAsset, e.Groups[0].MinAssetIndex)
+ a.Equal(test.maxDelta, e.Groups[0].DeltaMaxAssetIndex)
+ checkAssetParamsMap(aidx, e.Groups[0])
- // delete some middle entry
- e = genExtendedHolding(t, spec)
- aidx = assetByIndex(0, 1, e)
- e.Delete(0, 1)
- a.Equal(oldCount-1, e.Count)
- a.Equal(len(spec), len(e.Groups))
- a.Equal(spec[0].start, e.Groups[0].MinAssetIndex)
- a.Equal(uint64(spec[0].end-spec[0].start), e.Groups[0].DeltaMaxAssetIndex)
- checkAssetMap(aidx, e.Groups[0])
+ })
+ }
}
func TestAssetHoldingDeleteRepeat(t *testing.T) {
@@ -738,11 +1089,26 @@ func TestAssetHoldingDeleteRepeat(t *testing.T) {
maxReps := rand.Intn(30)
for c := 0; c < maxReps; c++ {
maxIdx := rand.Intn(MaxHoldingGroupSize)
- delOrder := rand.Perm(maxIdx)
- for _, i := range delOrder {
- if i < int(e.Groups[0].Count) {
- e.Delete(0, i)
+ if c%2 == 0 {
+ delOrder := rand.Perm(maxIdx)
+ for _, i := range delOrder {
+ if i < int(e.Groups[0].Count) {
+ e.deleteByIndex(0, i)
+ }
+ }
+ } else {
+ delOrder := make([]basics.AssetIndex, 0, maxIdx)
+ for i := 1; i <= maxIdx; i++ {
+ if i >= int(e.Groups[0].Count) {
+ break
+ }
+ gi, ai := e.FindAsset(basics.AssetIndex(i), 0)
+ if gi != -1 && ai != -1 {
+ delOrder = append(delOrder, basics.AssetIndex(i))
+ }
}
+ _, err := e.Delete(delOrder)
+ a.NoError(err)
}
// validate the group after deletion
@@ -770,7 +1136,9 @@ func genExtendedHoldingGroups(spec []groupLayout) (e ExtendedAssetHolding) {
}
e.Groups = make([]AssetsHoldingGroup, len(spec), len(spec))
for i, s := range spec {
- e.Groups[i] = AssetsHoldingGroup{Count: uint32(s.count), loaded: s.loaded}
+ e.Groups[i] = AssetsHoldingGroup{
+ AssetGroupDesc: AssetGroupDesc{Count: uint32(s.count)},
+ loaded: s.loaded}
}
return
}
@@ -860,14 +1228,19 @@ func TestFindLoadedSiblings(t *testing.T) {
rt := getRandTest(seed)
tests = append(tests, rt)
}
+ for seed := int64(1); seed < 1000; seed++ {
+ rt := getRandTest(seed)
+ tests = append(tests, rt)
+ }
rt := getRandTest(time.Now().Unix())
tests = append(tests, rt)
+ printSeed := false
for i, test := range tests {
t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
e := genExtendedHoldingGroups(test.i)
- l, c := e.findLoadedSiblings()
- if test.seed != 0 {
+ l, c := findLoadedSiblings(&e)
+ if test.seed != 0 && printSeed {
fmt.Printf("seed = %d\n", test.seed)
}
require.Equal(t, test.r.loaded, l)
@@ -876,9 +1249,7 @@ func TestFindLoadedSiblings(t *testing.T) {
}
}
-// generate groups from sizes. group N increments each asset id by N
-// i.e. Group[0] = [aidx, aidx+1, aidx+2,...]
-func genExtendedHoldingGroupsFromSizes(t *testing.T, sizes []int, aidx basics.AssetIndex) (e ExtendedAssetHolding) {
+func specFromSizes(t testing.TB, sizes []int, aidx basics.AssetIndex) []groupSpec {
spec := make([]groupSpec, 0, len(sizes))
for i, size := range sizes {
increment := i + 1
@@ -887,7 +1258,13 @@ func genExtendedHoldingGroupsFromSizes(t *testing.T, sizes []int, aidx basics.As
spec = append(spec, s)
aidx = end + 1
}
+ return spec
+}
+// generate groups from sizes. group N increments each asset id by N
+// i.e. Group[0] = [aidx, aidx+1, aidx+2,...]
+func genExtendedHoldingGroupsFromSizes(t testing.TB, sizes []int, aidx basics.AssetIndex) (e ExtendedAssetHolding) {
+ spec := specFromSizes(t, sizes, aidx)
e = genExtendedHolding(t, spec)
for i := 0; i < len(e.Groups); i++ {
e.Groups[i].AssetGroupKey = int64(i + 1)
@@ -896,6 +1273,16 @@ func genExtendedHoldingGroupsFromSizes(t *testing.T, sizes []int, aidx basics.As
return
}
+func genExtendedParamsGroupsFromSizes(t testing.TB, sizes []int, aidx basics.AssetIndex) (e ExtendedAssetParams) {
+ spec := specFromSizes(t, sizes, aidx)
+ e = genExtendedParams(t, spec)
+ for i := 0; i < len(e.Groups); i++ {
+ e.Groups[i].AssetGroupKey = int64(i + 1)
+ }
+
+ return
+}
+
func getAllHoldings(e ExtendedAssetHolding) map[basics.AssetIndex]basics.AssetHolding {
holdings := make(map[basics.AssetIndex]basics.AssetHolding, int(e.Count))
for _, g := range e.Groups {
@@ -908,62 +1295,98 @@ func getAllHoldings(e ExtendedAssetHolding) map[basics.AssetIndex]basics.AssetHo
return holdings
}
+func getAllParams(e ExtendedAssetParams) map[basics.AssetIndex]basics.AssetParams {
+ params := make(map[basics.AssetIndex]basics.AssetParams, int(e.Count))
+ for _, g := range e.Groups {
+ aidx := g.MinAssetIndex
+ for ai, offset := range g.groupData.AssetOffsets {
+ aidx += offset
+ params[aidx] = g.GetParams(ai)
+ }
+ }
+ return params
+}
+
func TestGroupMergeInternal(t *testing.T) {
- estimate := func(sizes []int) (int, int, int) {
+ estimate := func(sizes []int, assetThreshold int) (int, int, int) {
sum := 0
for _, size := range sizes {
sum += size
}
- groupsNeeded := (sum + MaxHoldingGroupSize - 1) / MaxHoldingGroupSize
+ groupsNeeded := (sum + assetThreshold - 1) / assetThreshold
groupsToDelete := len(sizes) - groupsNeeded
return groupsNeeded, groupsToDelete, sum
}
type test struct {
- sizes []int
+ sizes []int
+ maxSize int
+ seed int64
}
tests := []test{
- {[]int{1, 2}},
- {[]int{1, 2, 3}},
- {[]int{1, 255, 3}},
- {[]int{1, 253, 1}},
- {[]int{256, 2, 3}},
- {[]int{256, 1, 256}},
- {[]int{254, 1, 1}},
- {[]int{256, 255, 1}},
- {[]int{256, 256, 1}},
- {[]int{256, 256, 256}},
- {[]int{128, 179, 128, 142, 128, 164, 128, 156, 147}},
- {[]int{128, 168, 242, 128, 144, 255, 232}},
- }
-
- // random test
- n := rand.Intn(100)
- sizes := make([]int, n, n)
- for i := 0; i < n; i++ {
- sizes[i] = rand.Intn(MaxHoldingGroupSize-1) + 1 // no zeroes please
- }
- tests = append(tests, test{sizes})
+ {[]int{1, 2}, MaxHoldingGroupSize, 0},
+ {[]int{1, 2, 3}, MaxHoldingGroupSize, 0},
+ {[]int{1, 255, 3}, MaxHoldingGroupSize, 0},
+ {[]int{1, 253, 1}, MaxHoldingGroupSize, 0},
+ {[]int{256, 2, 3}, MaxHoldingGroupSize, 0},
+ {[]int{256, 1, 256}, MaxHoldingGroupSize, 0},
+ {[]int{254, 1, 1}, MaxHoldingGroupSize, 0},
+ {[]int{256, 255, 1}, MaxHoldingGroupSize, 0},
+ {[]int{256, 256, 1}, MaxHoldingGroupSize, 0},
+ {[]int{256, 256, 256}, MaxHoldingGroupSize, 0},
+ {[]int{128, 179, 128, 142, 128, 164, 128, 156, 147}, MaxHoldingGroupSize, 0},
+ {[]int{128, 168, 242, 128, 144, 255, 232}, MaxHoldingGroupSize, 0},
+ {[]int{1, 2}, MaxParamsGroupSize, 0},
+ {[]int{1, 2, 3}, MaxParamsGroupSize, 0},
+ {[]int{1, 13, 3}, MaxParamsGroupSize, 0},
+ {[]int{1, 12, 1}, MaxParamsGroupSize, 0},
+ {[]int{14, 2, 3}, MaxParamsGroupSize, 0},
+ {[]int{14, 1, 14}, MaxParamsGroupSize, 0},
+ {[]int{12, 1, 1}, MaxParamsGroupSize, 0},
+ }
+
+ // random tests
+ addRandomTest := func(maxSize int, seed int64) {
+ rand.Seed(seed)
+ n := rand.Intn(1000)
+ sizes := make([]int, n, n)
+ for i := 0; i < n; i++ {
+ sizes[i] = rand.Intn(maxSize-1) + 1 // no zeroes please
+ }
+ tests = append(tests, test{sizes, maxSize, seed})
+ }
+ for s := int64(1); s < 100; s++ {
+ addRandomTest(MaxHoldingGroupSize, s)
+ addRandomTest(MaxParamsGroupSize, s)
+ }
+
+ printSeed := false
for n, test := range tests {
- t.Run(fmt.Sprintf("%d", n), func(t *testing.T) {
+ size := uint32(test.maxSize)
+ t.Run(fmt.Sprintf("%d_%d", n, size), func(t *testing.T) {
a := require.New(t)
sizes := test.sizes
- groupsNeeded, groupsToDelete, totalAssets := estimate(sizes)
+ groupsNeeded, groupsToDelete, totalAssets := estimate(sizes, int(size))
a.Equal(len(sizes), groupsNeeded+groupsToDelete)
e := genExtendedHoldingGroupsFromSizes(t, sizes, basics.AssetIndex(1))
oldCount := e.Count
+ if test.seed != 0 && printSeed {
+ fmt.Printf("seed = %d\n", test.seed)
+ }
oldHoldings := getAllHoldings(e)
- deleted := e.merge(0, len(sizes), groupsToDelete)
+ deleted := mergeInternal(&e, 0, len(sizes), groupsToDelete, size)
a.Equal(groupsToDelete, len(deleted))
a.Equal(groupsNeeded, len(e.Groups))
a.Equal(oldCount, e.Count)
for i := 0; i < groupsNeeded-1; i++ {
- a.Equal(uint32(MaxHoldingGroupSize), e.Groups[i].Count)
+ a.Equal(uint32(size), e.Groups[i].Count)
+ }
+ if groupsNeeded > 0 {
+ a.Equal(uint32(totalAssets-(groupsNeeded-1)*int(size)), e.Groups[groupsNeeded-1].Count)
}
- a.Equal(uint32(totalAssets-(groupsNeeded-1)*MaxHoldingGroupSize), e.Groups[groupsNeeded-1].Count)
newHoldings := getAllHoldings(e)
a.Equal(oldHoldings, newHoldings)
@@ -972,7 +1395,20 @@ func TestGroupMergeInternal(t *testing.T) {
}
func TestGroupMerge(t *testing.T) {
- delgroup := func(e ExtendedAssetHolding, d []int) ExtendedAssetHolding {
+ hdelgroup := func(e ExtendedAssetHolding, d []int) ExtendedAssetHolding {
+ offset := 0
+ for _, gi := range d {
+ e.Count -= e.Groups[gi+offset].Count
+ if gi == len(e.Groups)-1 {
+ e.Groups = e.Groups[:len(e.Groups)-1]
+ } else {
+ e.Groups = append(e.Groups[:gi], e.Groups[gi+1:]...)
+ }
+ }
+ return e
+ }
+
+ pdelgroup := func(e ExtendedAssetParams, d []int) ExtendedAssetParams {
offset := 0
for _, gi := range d {
e.Count -= e.Groups[gi+offset].Count
@@ -1015,14 +1451,14 @@ func TestGroupMerge(t *testing.T) {
}
for n, test := range tests {
- t.Run(fmt.Sprintf("%d", n), func(t *testing.T) {
+ t.Run(fmt.Sprintf("holding_%d", n), func(t *testing.T) {
a := require.New(t)
sizes := test.sizes
e := genExtendedHoldingGroupsFromSizes(t, sizes, basics.AssetIndex(1))
for _, gi := range test.unload {
e.Groups[gi].loaded = false
}
- e = delgroup(e, test.del)
+ e = hdelgroup(e, test.del)
oldCount := e.Count
oldHoldings := getAllHoldings(e)
@@ -1032,6 +1468,42 @@ func TestGroupMerge(t *testing.T) {
a.Equal(oldCount, e.Count)
newHoldings := getAllHoldings(e)
a.Equal(oldHoldings, newHoldings)
+ var count uint32
+ for _, g := range e.Groups {
+ count += g.Count
+ }
+ a.Equal(count, e.Count)
+ })
+
+ t.Run(fmt.Sprintf("params_%d", n), func(t *testing.T) {
+ a := require.New(t)
+ sizes := make([]int, len(test.sizes))
+ copy(sizes, test.sizes)
+ // fixup sizes
+ for i, size := range sizes {
+ if size > MaxParamsGroupSize {
+ sizes[i] = MaxParamsGroupSize - (MaxHoldingGroupSize - size)
+ }
+ }
+ e := genExtendedParamsGroupsFromSizes(t, sizes, basics.AssetIndex(1))
+ for _, gi := range test.unload {
+ e.Groups[gi].loaded = false
+ }
+ e = pdelgroup(e, test.del)
+ oldCount := e.Count
+ oldParams := getAllParams(e)
+
+ loaded, deleted := e.Merge()
+ a.Equal(test.r.l, loaded)
+ a.Equal(test.r.d, deleted)
+ a.Equal(oldCount, e.Count)
+ newParams := getAllParams(e)
+ a.Equal(oldParams, newParams)
+ var count uint32
+ for _, g := range e.Groups {
+ count += g.Count
+ }
+ a.Equal(count, e.Count)
})
}
@@ -1049,3 +1521,38 @@ func TestGroupMerge(t *testing.T) {
a.Equal(oldHoldings, newHoldings)
}
+
+func viaInterface(agl AbstractAssetGroupList) (total int64) {
+ for i := 0; i < agl.Len(); i++ {
+ total += int64(len(agl.Get(i).Encode()))
+ agl.Get(i).SetKey(total)
+ }
+ return total
+}
+
+func viaType(a *ExtendedAssetHolding) (total int64) {
+ for i := 0; i < len(a.Groups); i++ {
+ total += int64(len(a.Groups[i].Encode()))
+ a.Groups[i].SetKey(total)
+ }
+ return total
+}
+
+var result int64
+
+func BenchmarkSliceVsInterface(b *testing.B) {
+ tests := []bool{false, true}
+ sizes := []int{128, 179, 128, 142, 128, 164, 128, 156, 147}
+ e := genExtendedHoldingGroupsFromSizes(b, sizes, basics.AssetIndex(1))
+ for _, isSlice := range tests {
+ b.Run(fmt.Sprintf("slice=%v", isSlice), func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ if isSlice {
+ result += viaType(&e)
+ } else {
+ result += viaInterface(&e)
+ }
+ }
+ })
+ }
+}
diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go
index 9ba392e98..9fe8f1bdc 100644
--- a/ledger/ledgercore/statedelta.go
+++ b/ledger/ledgercore/statedelta.go
@@ -17,6 +17,8 @@
package ledgercore
import (
+ "fmt"
+
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -82,25 +84,40 @@ type StateDelta struct {
initialTransactionsCount int
}
-// HoldingAction is an enum of actions on holdings
-//msgp:ignore HoldingAction
-type HoldingAction uint64
+// EntityAction is an enum of actions on holdings
+//msgp:ignore EntityAction
+type EntityAction uint64
const (
- // ActionCreate is for asset holding creation
- ActionCreate HoldingAction = 1 + iota
- // ActionDelete is for asset holding creation
- ActionDelete
+ // ActionHoldingCreate is for asset holding creation
+ ActionHoldingCreate EntityAction = 1 + iota
+ // ActionHoldingDelete is for asset holding creation
+ ActionHoldingDelete
+ // ActionParamsCreate is for asset holding creation
+ ActionParamsCreate
+ // ActionParamsDelete is for asset holding creation
+ ActionParamsDelete
)
+// EntityDelta holds asset/app actions
+//msgp:ignore EntityDelta
+type EntityDelta map[basics.CreatableIndex]EntityAction
+
+// AccountEntityDelta holds asset/app actions per account
+//msgp:ignore AccountEntityDelta
+type AccountEntityDelta map[basics.Address]EntityDelta
+
// AccountDeltas stores ordered accounts and allows fast lookup by address
+//msgp:ignore AccountDeltas
type AccountDeltas struct {
// actual data
accts []PersistedBalanceRecord
// cache for addr to deltas index resolution
acctsCache map[basics.Address]int
- // holdings keeps track of created and deleted holdings per address
- holdings map[basics.Address]map[basics.AssetIndex]HoldingAction
+ // entityHoldings keeps track of created and deleted assets holdings and app local states per address
+ entityHoldings AccountEntityDelta
+ // entityParams keeps track of created and deleted asset and app params per address
+ entityParams AccountEntityDelta
}
// PersistedBalanceRecord is similar to BalanceRecord but contains PersistedAccountData
@@ -116,9 +133,10 @@ type PersistedBalanceRecord struct {
func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, compactCertNext basics.Round) StateDelta {
return StateDelta{
Accts: AccountDeltas{
- accts: make([]PersistedBalanceRecord, 0, hint*2),
- acctsCache: make(map[basics.Address]int, hint*2),
- holdings: make(map[basics.Address]map[basics.AssetIndex]HoldingAction),
+ accts: make([]PersistedBalanceRecord, 0, hint*2),
+ acctsCache: make(map[basics.Address]int, hint*2),
+ entityHoldings: make(AccountEntityDelta),
+ entityParams: make(AccountEntityDelta),
},
Txids: make(map[transactions.Txid]basics.Round, hint),
Txleases: make(map[Txlease]basics.Round, hint),
@@ -188,27 +206,61 @@ func (ad *AccountDeltas) upsert(pbr PersistedBalanceRecord) {
ad.acctsCache[addr] = last
}
-// SetHoldingDelta saves creation/deletion info about asset holding
+// SetEntityDelta saves creation/deletion info about asset/app params/holding
// Creation is not really important since the holding is already in ad.accts,
// but saving deleteion info is only the way to know if the asset gone
-func (ad *AccountDeltas) SetHoldingDelta(addr basics.Address, aidx basics.AssetIndex, action HoldingAction) {
- hmap, ok := ad.holdings[addr]
+func (ad *AccountDeltas) SetEntityDelta(addr basics.Address, cidx basics.CreatableIndex, action EntityAction) {
+ var entityDelta EntityDelta
+ ok := false
+
+ if action == ActionHoldingCreate || action == ActionHoldingDelete {
+ entityDelta, ok = ad.entityHoldings[addr]
+ } else if action == ActionParamsCreate || action == ActionParamsDelete {
+ entityDelta, ok = ad.entityParams[addr]
+ } else {
+ panic(fmt.Sprintf("SetEntityDelta: unknown action %d", action))
+ }
+
if !ok {
// in most cases there will be only one asset modification per account
- hmap = map[basics.AssetIndex]HoldingAction{aidx: action}
+ entityDelta = EntityDelta{cidx: action}
} else {
- hmap[aidx] = action
+ entityDelta[cidx] = action
}
- if ad.holdings == nil {
- ad.holdings = make(map[basics.Address]map[basics.AssetIndex]HoldingAction)
+ if action == ActionHoldingCreate || action == ActionHoldingDelete {
+ if ad.entityHoldings == nil {
+ ad.entityHoldings = make(AccountEntityDelta)
+ }
+ ad.entityHoldings[addr] = entityDelta
+ } else if action == ActionParamsCreate || action == ActionParamsDelete {
+ if ad.entityParams == nil {
+ ad.entityParams = make(AccountEntityDelta)
+ }
+ ad.entityParams[addr] = entityDelta
}
- ad.holdings[addr] = hmap
}
-// GetHoldingDeltas return map of created/deleted asset holdings
-func (ad AccountDeltas) GetHoldingDeltas(addr basics.Address) map[basics.AssetIndex]HoldingAction {
- return ad.holdings[addr]
+// GetEntityParamsDeltas return map of created/deleted asset/app params
+func (ad AccountDeltas) GetEntityParamsDeltas(addr basics.Address) EntityDelta {
+ return ad.entityParams[addr]
+}
+
+// GetEntityHoldingDeltas return map of created/deleted assets/apps holding
+func (ad AccountDeltas) GetEntityHoldingDeltas(addr basics.Address) EntityDelta {
+ return ad.entityHoldings[addr]
+}
+
+// Update adds new data from other to old data in e and returns a new object
+func (e EntityDelta) Update(other EntityDelta) (result EntityDelta) {
+ result = make(EntityDelta, len(e)+len(other))
+ for cidx, action := range e {
+ result[cidx] = action
+ }
+ for cidx, action := range other {
+ result[cidx] = action
+ }
+ return
}
// OptimizeAllocatedMemory by reallocating maps to needed capacity
diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go
index ab16e1f96..d4c1e068c 100644
--- a/ledger/ledgercore/statedelta_test.go
+++ b/ledger/ledgercore/statedelta_test.go
@@ -149,3 +149,44 @@ func BenchmarkTxLeases(b *testing.B) {
}
}
}
+
+func TestEntityDeltaUpdate(t *testing.T) {
+ a := require.New(t)
+
+ o := EntityDelta{
+ 1: ActionHoldingCreate,
+ 2: ActionHoldingCreate,
+ 3: ActionHoldingCreate,
+ }
+
+ n := EntityDelta{
+ 1: ActionHoldingDelete,
+ 4: ActionHoldingDelete,
+ }
+
+ r := o.Update(n)
+
+ a.Equal(
+ EntityDelta{
+ 1: ActionHoldingCreate,
+ 2: ActionHoldingCreate,
+ 3: ActionHoldingCreate,
+ },
+ o)
+
+ a.Equal(
+ EntityDelta{
+ 1: ActionHoldingDelete,
+ 4: ActionHoldingDelete,
+ },
+ n)
+
+ a.Equal(
+ EntityDelta{
+ 1: ActionHoldingDelete,
+ 2: ActionHoldingCreate,
+ 3: ActionHoldingCreate,
+ 4: ActionHoldingDelete,
+ },
+ r)
+}
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index 4068bf03f..23f7ef28d 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -183,6 +183,89 @@ func TestAssetValidRounds(t *testing.T) {
a.Equal(basics.Round(cparams.MaxTxnLife+1), tx.LastValid)
}
+// createTestAssets creates MaxAssetsPerAccount assets
+func createTestAssets(a *require.Assertions, fixture *fixtures.RestClientFixture, numAssets int, account0 string, manager string, reserve string, freeze string, clawback string, assetURL string, assetMetadataHash []byte, params config.ConsensusParams) {
+ txids := make(map[string]string)
+ client := fixture.LibGoalClient
+ min := func(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+ }
+
+ i := 1
+ for i <= numAssets {
+ // re-generate wh, since this test takes a while and sometimes
+ // the wallet handle expires.
+ wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ groupSize := min(params.MaxTxGroupSize, numAssets+1-i)
+ if groupSize == 1 {
+ tx, err := client.MakeUnsignedAssetCreateTx(uint64(i), false, manager, reserve, freeze, clawback, fmt.Sprintf("test%d", i), fmt.Sprintf("testname%d", i), assetURL, assetMetadataHash, 0)
+ a.NoError(err)
+ txid, err := helperFillSignBroadcast(client, wh, account0, tx, err)
+ a.NoError(err)
+ txids[txid] = account0
+ } else {
+ txns := make([]transactions.Transaction, 0, groupSize)
+ stxns := make([]transactions.SignedTxn, 0, groupSize)
+ for j := 0; j < groupSize; j++ {
+ tx, err := client.MakeUnsignedAssetCreateTx(uint64(i+j), false, manager, reserve, freeze, clawback, fmt.Sprintf("test%d", i+j), fmt.Sprintf("testname%d", i+j), assetURL, assetMetadataHash, 0)
+ a.NoError(err)
+ tx, err = client.FillUnsignedTxTemplate(account0, 0, 0, 1000000, tx)
+ a.NoError(err)
+ txns = append(txns, tx)
+ }
+ gid, err := client.GroupID(txns)
+ a.NoError(err)
+ for j := 0; j < groupSize; j++ {
+ txns[j].Group = gid
+ stxn, err := client.SignTransactionWithWallet(wh, nil, txns[j])
+ a.NoError(err)
+ stxns = append(stxns, stxn)
+ txids[stxn.ID().String()] = account0
+ }
+ err = client.BroadcastTransactionGroup(stxns)
+ a.NoError(err)
+ }
+ // Travis is slow, so help it along by waiting every once in a while
+ // for these transactions to commit..
+ if (i % 50) == 0 {
+ _, curRound := fixture.GetBalanceAndRound(account0)
+ confirmed := fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
+ a.True(confirmed)
+ txids = make(map[string]string)
+ }
+ i += groupSize
+ }
+
+ _, curRound := fixture.GetBalanceAndRound(account0)
+ confirmed := fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
+ a.True(confirmed, "creating max number of assets")
+
+ return
+}
+
+func checkTestAssets(a *require.Assertions, client *libgoal.Client, count int, account0 string, manager string, reserve string, freeze string, clawback string, assetURL string, assetMetadataHash []byte) (assets []assetIDParams) {
+ info, err := client.AccountInformation(account0)
+ a.NoError(err)
+ a.Equal(count, len(info.AssetParams))
+ for idx, cp := range info.AssetParams {
+ assets = append(assets, assetIDParams{idx, cp})
+ a.Equal(cp.UnitName, fmt.Sprintf("test%d", cp.Total))
+ a.Equal(cp.AssetName, fmt.Sprintf("testname%d", cp.Total))
+ a.Equal(cp.ManagerAddr, manager)
+ a.Equal(cp.ReserveAddr, reserve)
+ a.Equal(cp.FreezeAddr, freeze)
+ a.Equal(cp.ClawbackAddr, clawback)
+ a.Equal(cp.MetadataHash, assetMetadataHash)
+ a.Equal(cp.URL, assetURL)
+ }
+ return
+}
+
func TestAssetConfig(t *testing.T) {
if testing.Short() {
t.Skip()
@@ -191,7 +274,7 @@ func TestAssetConfig(t *testing.T) {
a := require.New(fixtures.SynchronizedTest(t))
var fixture fixtures.RestClientFixture
- fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV27.json"))
defer fixture.Shutdown()
client := fixture.LibGoalClient
@@ -226,31 +309,8 @@ func TestAssetConfig(t *testing.T) {
a.Equal(len(info.AssetParams), 0)
// Create max number of assets
- txids := make(map[string]string)
- for i := 0; i < config.Consensus[protocol.ConsensusFuture].MaxAssetsPerAccount; i++ {
- // re-generate wh, since this test takes a while and sometimes
- // the wallet handle expires.
- wh, err = client.GetUnencryptedWalletHandle()
- a.NoError(err)
-
- tx, err := client.MakeUnsignedAssetCreateTx(1+uint64(i), false, manager, reserve, freeze, clawback, fmt.Sprintf("test%d", i), fmt.Sprintf("testname%d", i), assetURL, assetMetadataHash, 0)
- txid, err := helperFillSignBroadcast(client, wh, account0, tx, err)
- a.NoError(err)
- txids[txid] = account0
-
- // Travis is slow, so help it along by waiting every once in a while
- // for these transactions to commit..
- if (i % 50) == 0 {
- _, curRound := fixture.GetBalanceAndRound(account0)
- confirmed := fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
- a.True(confirmed)
- txids = make(map[string]string)
- }
- }
-
- _, curRound := fixture.GetBalanceAndRound(account0)
- confirmed := fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
- a.True(confirmed, "creating max number of assets")
+ numAssets := config.Consensus[protocol.ConsensusV27].MaxAssetsPerAccount
+ createTestAssets(a, &fixture, numAssets, account0, manager, reserve, freeze, clawback, assetURL, assetMetadataHash, config.Consensus[protocol.ConsensusV27])
// re-generate wh, since this test takes a while and sometimes
// the wallet handle expires.
@@ -259,26 +319,13 @@ func TestAssetConfig(t *testing.T) {
// Creating more assets should return an error
tx, err := client.MakeUnsignedAssetCreateTx(1, false, manager, reserve, freeze, clawback, fmt.Sprintf("toomany"), fmt.Sprintf("toomany"), assetURL, assetMetadataHash, 0)
+ a.NoError(err)
_, err = helperFillSignBroadcast(client, wh, account0, tx, err)
a.Error(err)
a.True(strings.Contains(err.Error(), "too many assets in account:"))
// Check that assets are visible
- info, err = client.AccountInformation(account0)
- a.NoError(err)
- a.Equal(len(info.AssetParams), config.Consensus[protocol.ConsensusFuture].MaxAssetsPerAccount)
- var assets []assetIDParams
- for idx, cp := range info.AssetParams {
- assets = append(assets, assetIDParams{idx, cp})
- a.Equal(cp.UnitName, fmt.Sprintf("test%d", cp.Total-1))
- a.Equal(cp.AssetName, fmt.Sprintf("testname%d", cp.Total-1))
- a.Equal(cp.ManagerAddr, manager)
- a.Equal(cp.ReserveAddr, reserve)
- a.Equal(cp.FreezeAddr, freeze)
- a.Equal(cp.ClawbackAddr, clawback)
- a.Equal(cp.MetadataHash, assetMetadataHash)
- a.Equal(cp.URL, assetURL)
- }
+ assets := checkTestAssets(a, &client, config.Consensus[protocol.ConsensusV27].MaxAssetsPerAccount, account0, manager, reserve, freeze, clawback, assetURL, assetMetadataHash)
// re-generate wh, since this test takes a while and sometimes
// the wallet handle expires.
@@ -287,7 +334,7 @@ func TestAssetConfig(t *testing.T) {
// Test changing various keys
var empty string
- txids = make(map[string]string)
+ txids := make(map[string]string)
tx, err = client.MakeUnsignedAssetConfigTx(account0, assets[0].idx, &account0, nil, nil, nil)
txid, err := helperFillSignBroadcast(client, wh, manager, tx, err)
@@ -324,16 +371,16 @@ func TestAssetConfig(t *testing.T) {
a.NoError(err)
txids[txid] = manager
- _, curRound = fixture.GetBalanceAndRound(account0)
- confirmed = fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
+ _, curRound := fixture.GetBalanceAndRound(account0)
+ confirmed := fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
a.True(confirmed, "changing keys")
info, err = client.AccountInformation(account0)
a.NoError(err)
- a.Equal(len(info.AssetParams), config.Consensus[protocol.ConsensusFuture].MaxAssetsPerAccount)
+ a.Equal(len(info.AssetParams), config.Consensus[protocol.ConsensusV27].MaxAssetsPerAccount)
for idx, cp := range info.AssetParams {
- a.Equal(cp.UnitName, fmt.Sprintf("test%d", cp.Total-1))
- a.Equal(cp.AssetName, fmt.Sprintf("testname%d", cp.Total-1))
+ a.Equal(cp.UnitName, fmt.Sprintf("test%d", cp.Total))
+ a.Equal(cp.AssetName, fmt.Sprintf("testname%d", cp.Total))
if idx == assets[0].idx {
a.Equal(cp.ManagerAddr, account0)
@@ -378,30 +425,77 @@ func TestAssetConfig(t *testing.T) {
a.True(strings.Contains(err.Error(), "outstanding assets"))
// Destroy assets
- txids = make(map[string]string)
- for idx := range info.AssetParams {
+ txids = make(map[string]string, len(info.AssetParams))
+ params := config.Consensus[protocol.ConsensusV27]
+ min := func(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+ }
+
+ // flatten in order to send in groups
+ type flatten struct {
+ idx uint64
+ params v1.AssetParams
+ }
+ assetParams := make([]flatten, 0, len(info.AssetParams))
+ for idx, params := range info.AssetParams {
+ assetParams = append(assetParams, flatten{idx, params})
+ }
+ i := 0
+ for i < len(assetParams) {
// re-generate wh, since this test takes a while and sometimes
// the wallet handle expires.
wh, err = client.GetUnencryptedWalletHandle()
a.NoError(err)
- tx, err := client.MakeUnsignedAssetDestroyTx(idx)
- sender := manager
- if idx == assets[0].idx {
- sender = account0
+ groupSize := min(params.MaxTxGroupSize, len(assetParams)-i)
+ if groupSize == 1 {
+ tx, err := client.MakeUnsignedAssetDestroyTx(assetParams[i].idx)
+ sender := manager
+ if assetParams[i].idx == assets[0].idx {
+ sender = account0
+ }
+ txid, err := helperFillSignBroadcast(client, wh, sender, tx, err)
+ a.NoError(err)
+ txids[txid] = sender
+ } else {
+ txns := make([]transactions.Transaction, 0, groupSize)
+ stxns := make([]transactions.SignedTxn, 0, groupSize)
+ for j := 0; j < groupSize; j++ {
+ tx, err := client.MakeUnsignedAssetDestroyTx(assetParams[i+j].idx)
+ a.NoError(err)
+ sender := manager
+ if assetParams[i+j].idx == assets[0].idx {
+ sender = account0
+ }
+ tx, err = client.FillUnsignedTxTemplate(sender, 0, 0, 1000000, tx)
+ a.NoError(err)
+ txns = append(txns, tx)
+ }
+ gid, err := client.GroupID(txns)
+ a.NoError(err)
+ for j := 0; j < groupSize; j++ {
+ txns[j].Group = gid
+ stxn, err := client.SignTransactionWithWallet(wh, nil, txns[j])
+ a.NoError(err)
+ stxns = append(stxns, stxn)
+ txids[stxn.ID().String()] = stxn.Txn.Sender.String()
+ }
+ err = client.BroadcastTransactionGroup(stxns)
+ a.NoError(err)
}
- txid, err := helperFillSignBroadcast(client, wh, sender, tx, err)
- a.NoError(err)
- txids[txid] = sender
// Travis is slow, so help it along by waiting every once in a while
// for these transactions to commit..
- if (idx % 50) == 0 {
+ if (i % 50) == 0 {
_, curRound = fixture.GetBalanceAndRound(account0)
confirmed = fixture.WaitForAllTxnsToConfirm(curRound+20, txids)
a.True(confirmed)
txids = make(map[string]string)
}
+ i += groupSize
}
_, curRound = fixture.GetBalanceAndRound(account0)
@@ -418,6 +512,73 @@ func TestAssetConfig(t *testing.T) {
a.NoError(err)
}
+// TestAssetConfigUnlimited is similar to TestAssetConfig
+// and checks MaxAssetsPerAccount+1 are OK
+func TestAssetConfigUnlimited(t *testing.T) {
+ if testing.Short() {
+ t.Skip()
+ }
+ t.Parallel()
+ a := require.New(fixtures.SynchronizedTest(t))
+
+ var fixture fixtures.RestClientFixture
+ fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json"))
+ defer fixture.Shutdown()
+
+ client := fixture.LibGoalClient
+ accountList, err := fixture.GetWalletsSortedByBalance()
+ a.NoError(err)
+ account0 := accountList[0].Address
+ wh, err := client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ manager, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ reserve, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ freeze, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ clawback, err := client.GenerateAddress(wh)
+ a.NoError(err)
+
+ assetURL := "foo://bar"
+ assetMetadataHash := []byte("ISTHISTHEREALLIFEISTHISJUSTFANTA")
+
+ // Fund the manager, so it can issue transactions later on
+ _, err = client.SendPaymentFromUnencryptedWallet(account0, manager, 0, 10000000000, nil)
+ a.NoError(err)
+
+ // There should be no assets to start with
+ info, err := client.AccountInformation(account0)
+ a.NoError(err)
+ a.Equal(len(info.AssetParams), 0)
+
+ // Create max number of assets
+ numAssets := config.Consensus[protocol.ConsensusFuture].MaxAssetsPerAccount
+ createTestAssets(a, &fixture, numAssets, account0, manager, reserve, freeze, clawback, assetURL, assetMetadataHash, config.Consensus[protocol.ConsensusFuture])
+
+ // re-generate wh, since this test takes a while and sometimes
+ // the wallet handle expires.
+ wh, err = client.GetUnencryptedWalletHandle()
+ a.NoError(err)
+
+ // Creating more assets should not return an error
+ tx, err := client.MakeUnsignedAssetCreateTx(uint64(numAssets+1), false, manager, reserve, freeze, clawback, fmt.Sprintf("test%d", numAssets+1), fmt.Sprintf("testname%d", numAssets+1), assetURL, assetMetadataHash, 0)
+ a.NoError(err)
+ txid, err := helperFillSignBroadcast(client, wh, account0, tx, err)
+ a.NoError(err)
+
+ _, curRound := fixture.GetBalanceAndRound(account0)
+ confirmed := fixture.WaitForAllTxnsToConfirm(curRound+5, map[string]string{txid: account0})
+ a.True(confirmed)
+
+ // Check that assets are visible
+ checkTestAssets(a, &client, numAssets+1, account0, manager, reserve, freeze, clawback, assetURL, assetMetadataHash)
+}
+
func TestAssetInformation(t *testing.T) {
t.Parallel()
a := require.New(fixtures.SynchronizedTest(t))
diff --git a/test/testdata/nettemplates/TwoNodes50EachV27.json b/test/testdata/nettemplates/TwoNodes50EachV27.json
new file mode 100644
index 000000000..e881d4f02
--- /dev/null
+++ b/test/testdata/nettemplates/TwoNodes50EachV27.json
@@ -0,0 +1,29 @@
+{
+ "Genesis": {
+ "NetworkName": "tbd",
+ "ConsensusProtocol": "https://github.com/algorandfoundation/specs/tree/d050b3cade6d5c664df8bd729bf219f179812595",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 50,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 50,
+ "Online": true
+ }
+ ]
+ },
+ "Nodes": [
+ {
+ "Name": "Primary",
+ "IsRelay": true,
+ "Wallets": [{ "Name": "Wallet1", "ParticipationOnly": false }]
+ },
+ {
+ "Name": "Node",
+ "Wallets": [{ "Name": "Wallet2", "ParticipationOnly": false }]
+ }
+ ]
+}