summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohn Lee <64482439+algojohnlee@users.noreply.github.com>2020-08-14 00:31:57 -0400
committerGitHub <noreply@github.com>2020-08-14 00:31:57 -0400
commit2fc191356106c0edaf09191669c0b555b7b196c9 (patch)
tree74d625746b1e7877f74cbda22901b4a6ce297a8f
parent48422dd79d60676a689d0b296d0440e7b65c1e09 (diff)
parent0bba4c9b0a0829ef7c6bfc3c2598ce0e97a3b6df (diff)
Merge pull request #1379 from onetechnical/onetechnical/relbeta2.1.2v2.1.2-beta
Onetechnical/relbeta2.1.2
-rw-r--r--.travis.yml2
-rw-r--r--Makefile2
-rw-r--r--README.md2
-rw-r--r--agreement/gossip/network_test.go4
-rw-r--r--buildnumber.dat2
-rw-r--r--cmd/tealdbg/README.md9
-rw-r--r--cmd/tealdbg/local.go6
-rw-r--r--cmd/tealdbg/localLedger.go107
-rw-r--r--cmd/tealdbg/local_test.go134
-rw-r--r--cmd/tealdbg/main.go6
-rw-r--r--cmd/tealdbg/server.go2
-rw-r--r--components/mocks/mockNetwork.go6
-rw-r--r--config/config.go6
-rw-r--r--config/local_defaults.go3
-rw-r--r--crypto/merkletrie/cache.go72
-rw-r--r--crypto/merkletrie/cache_test.go162
-rw-r--r--data/pools/ewma.go53
-rw-r--r--data/pools/ewma_test.go90
-rw-r--r--data/pools/feeTracker.go114
-rw-r--r--data/pools/feeTracker_test.go84
-rw-r--r--data/pools/transactionPool.go25
-rw-r--r--data/pools/transactionPool_test.go51
-rw-r--r--go.mod7
-rw-r--r--go.sum46
-rw-r--r--installer/config.json.example3
-rw-r--r--ledger/accountdb.go110
-rw-r--r--ledger/accountdb_test.go5
-rw-r--r--ledger/acctupdates.go195
-rw-r--r--ledger/acctupdates_test.go175
-rw-r--r--ledger/blockdb.go5
-rw-r--r--ledger/catchpointwriter.go28
-rw-r--r--ledger/catchpointwriter_test.go39
-rw-r--r--ledger/catchupaccessor.go57
-rw-r--r--logging/telemetryspec/event.go18
-rw-r--r--network/requestTracker.go117
-rw-r--r--network/requestTracker_test.go2
-rw-r--r--network/wsNetwork.go15
-rw-r--r--node/node.go11
-rw-r--r--package-deploy.yaml74
-rw-r--r--package-sign.yaml74
-rw-r--r--package-test.yaml51
-rw-r--r--package.yaml72
-rw-r--r--rpcs/ledgerService.go41
-rwxr-xr-xscripts/release/mule/package/rpm/package.sh8
-rw-r--r--test/README.md51
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-bootloader.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-delete-clear.sh40
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-real-assets-round.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-simple.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-stateful-global.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-stateful-local.sh2
-rwxr-xr-xtest/scripts/e2e_subs/e2e-app-x-app-reads.sh2
-rwxr-xr-xtest/scripts/e2e_subs/rekey.sh26
-rwxr-xr-xtest/scripts/e2e_subs/rest.sh68
-rw-r--r--test/testdata/configs/config-v10.json76
-rw-r--r--util/db/dbutil.go61
56 files changed, 1646 insertions, 783 deletions
diff --git a/.travis.yml b/.travis.yml
index 8ad739124..20e273ac1 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,4 @@
dist: bionic
-go:
- - "1.12"
go_import_path: github.com/algorand/go-algorand
language: go
diff --git a/Makefile b/Makefile
index 7939df229..5293d7c97 100644
--- a/Makefile
+++ b/Makefile
@@ -133,7 +133,7 @@ KMD_API_SWAGGER_INJECT := daemon/kmd/lib/kmdapi/bundledSpecInject.go
$(KMD_API_SWAGGER_SPEC): $(KMD_API_FILES) crypto/libs/$(OS_TYPE)/$(ARCH)/lib/libsodium.a
cd daemon/kmd/lib/kmdapi && \
- python genSwaggerWrappers.py $(KMD_API_SWAGGER_WRAPPER)
+ python3 genSwaggerWrappers.py $(KMD_API_SWAGGER_WRAPPER)
cd daemon/kmd && \
PATH=$(GOPATH1)/bin:$$PATH \
go generate ./...
diff --git a/README.md b/README.md
index 8849ca0b9..edeb522d5 100644
--- a/README.md
+++ b/README.md
@@ -181,7 +181,7 @@ A number of packages provide utilities for the various components:
- `util` contains a variety of utilities, including a codec, a sqlite wrapper,
a goroutine pool, a timer interface, node metrics, and more.
-`test` contains end-to-end tests for the above components.
+`test` ([README](test/README.md)) contains end-to-end tests and utilities for the above components.
## License
diff --git a/agreement/gossip/network_test.go b/agreement/gossip/network_test.go
index 19a45a8be..ac503ecc0 100644
--- a/agreement/gossip/network_test.go
+++ b/agreement/gossip/network_test.go
@@ -18,6 +18,7 @@ package gossip
import (
"context"
+ "net"
"net/http"
"sync"
"sync/atomic"
@@ -153,6 +154,9 @@ func (w *whiteholeNetwork) GetPeers(options ...network.PeerOption) []network.Pee
}
func (w *whiteholeNetwork) RegisterHTTPHandler(path string, handler http.Handler) {
}
+func (w *whiteholeNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) {
+ return nil
+}
func (w *whiteholeNetwork) Start() {
w.quit = make(chan struct{})
diff --git a/buildnumber.dat b/buildnumber.dat
index d00491fd7..0cfbf0888 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-1
+2
diff --git a/cmd/tealdbg/README.md b/cmd/tealdbg/README.md
index b1fc841fc..a5619cd86 100644
--- a/cmd/tealdbg/README.md
+++ b/cmd/tealdbg/README.md
@@ -152,6 +152,15 @@ If default/empty local and global state are OK for the application, the use `--p
to automatically create necessary balance records for the application(s) so that `app_` opcodes
do not fail due to absent data in ledger.
+### Indexer Support
+
+You can also supply balance records through an indexer https://github.com/algorand/indexer.
+Specify the indexer api endpoint, round number at which to fetch balance records, and an api token if necessary.
+
+```
+$ tealdbg debug myprog.teal --round roundnumber -i apiendpoint --indexer-token token
+```
+
### Execution mode
Execution mode, either **signature** or **application** matches to **Algod**'s evaluation mode
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index 240e7dd53..063e50b92 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -349,7 +349,7 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) {
ledger, states, err = makeAppLedger(
balances, r.txnGroup, dp.GroupIndex,
r.proto, dp.Round, dp.LatestTimestamp, appIdx,
- dp.Painless,
+ dp.Painless, dp.IndexerURL, dp.IndexerToken,
)
if err != nil {
return
@@ -387,7 +387,7 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) {
ledger, states, err = makeAppLedger(
balances, r.txnGroup, gi,
r.proto, dp.Round, dp.LatestTimestamp,
- appIdx, dp.Painless,
+ appIdx, dp.Painless, dp.IndexerURL, dp.IndexerToken,
)
if err != nil {
return
@@ -421,7 +421,7 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) {
ledger, states, err = makeAppLedger(
balances, r.txnGroup, gi,
r.proto, dp.Round, dp.LatestTimestamp,
- appIdx, dp.Painless,
+ appIdx, dp.Painless, dp.IndexerURL, dp.IndexerToken,
)
if err != nil {
return
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index ff7103bc6..b1d23ed34 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -17,17 +17,44 @@
package main
import (
+ "encoding/json"
"fmt"
+ "io/ioutil"
"math/rand"
+ "net/http"
"github.com/algorand/go-algorand/config"
"github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/ledger"
)
+// AccountIndexerResponse represents the Account Response object from querying indexer
+type AccountIndexerResponse struct {
+ // Account information at a given round.
+ //
+ // Definition:
+ // data/basics/userBalance.go : AccountData
+ Account generated.Account `json:"account"`
+
+ // Round at which the results were computed.
+ CurrentRound uint64 `json:"current-round"`
+}
+
+// ApplicationIndexerResponse represents the Application Response object from querying indexer
+type ApplicationIndexerResponse struct {
+
+ // Application index and its parameters
+ Application generated.Application `json:"application,omitempty"`
+
+ // Round at which the results were computed.
+ CurrentRound uint64 `json:"current-round"`
+}
+
type balancesAdapter struct {
balances map[basics.Address]basics.AccountData
txnGroup []transactions.SignedTxn
@@ -39,7 +66,7 @@ type balancesAdapter struct {
func makeAppLedger(
balances map[basics.Address]basics.AccountData, txnGroup []transactions.SignedTxn,
groupIndex int, proto config.ConsensusParams, round uint64, latestTimestamp int64,
- appIdx basics.AppIndex, painless bool,
+ appIdx basics.AppIndex, painless bool, indexerURL string, indexerToken string,
) (logic.LedgerForLogic, appState, error) {
if groupIndex >= len(txnGroup) {
@@ -53,6 +80,30 @@ func makeAppLedger(
apps := []basics.AppIndex{appIdx}
apps = append(apps, txn.Txn.ForeignApps...)
+ // populate balances from the indexer if not already
+ if indexerURL != "" {
+ for _, acc := range accounts {
+ // only populate from indexer if balance record not specified
+ if _, ok := balances[acc]; !ok {
+ var err error
+ balances[acc], err = getBalanceFromIndexer(indexerURL, indexerToken, acc, round)
+ if err != nil {
+ return nil, appState{}, err
+ }
+ }
+ }
+ for _, app := range apps {
+ creator, err := getAppCreatorFromIndexer(indexerURL, indexerToken, app)
+ if err != nil {
+ return nil, appState{}, err
+ }
+ balances[creator], err = getBalanceFromIndexer(indexerURL, indexerToken, creator, round)
+ if err != nil {
+ return nil, appState{}, err
+ }
+ }
+ }
+
ba := &balancesAdapter{
balances: balances,
txnGroup: txnGroup,
@@ -130,6 +181,60 @@ func makeAppLedger(
return ledger, states, err
}
+func getAppCreatorFromIndexer(indexerURL string, indexerToken string, app basics.AppIndex) (basics.Address, error) {
+ queryString := fmt.Sprintf("%s/v2/applications/%d", indexerURL, app)
+ client := &http.Client{}
+ request, err := http.NewRequest("GET", queryString, nil)
+ request.Header.Set("X-Indexer-API-Token", indexerToken)
+ resp, err := client.Do(request)
+ if err != nil {
+ return basics.Address{}, fmt.Errorf("application request error: %s", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ msg, _ := ioutil.ReadAll(resp.Body)
+ return basics.Address{}, fmt.Errorf("application response error: %s, status code: %d, request: %s", string(msg), resp.StatusCode, queryString)
+ }
+ var appResp ApplicationIndexerResponse
+ err = json.NewDecoder(resp.Body).Decode(&appResp)
+ if err != nil {
+ return basics.Address{}, fmt.Errorf("application response decode error: %s", err)
+ }
+
+ creator, err := basics.UnmarshalChecksumAddress(appResp.Application.Params.Creator)
+
+ if err != nil {
+ return basics.Address{}, fmt.Errorf("UnmarshalChecksumAddress error: %s", err)
+ }
+ return creator, nil
+}
+
+func getBalanceFromIndexer(indexerURL string, indexerToken string, account basics.Address, round uint64) (basics.AccountData, error) {
+ queryString := fmt.Sprintf("%s/v2/accounts/%s?round=%d", indexerURL, account, round)
+ client := &http.Client{}
+ request, err := http.NewRequest("GET", queryString, nil)
+ request.Header.Set("X-Indexer-API-Token", indexerToken)
+ resp, err := client.Do(request)
+ if err != nil {
+ return basics.AccountData{}, fmt.Errorf("account request error: %s", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ msg, _ := ioutil.ReadAll(resp.Body)
+ return basics.AccountData{}, fmt.Errorf("account response error: %s, status code: %d, request: %s", string(msg), resp.StatusCode, queryString)
+ }
+ var accountResp AccountIndexerResponse
+ err = json.NewDecoder(resp.Body).Decode(&accountResp)
+ if err != nil {
+ return basics.AccountData{}, fmt.Errorf("account response decode error: %s", err)
+ }
+ balance, err := v2.AccountToAccountData(&accountResp.Account)
+ if err != nil {
+ return basics.AccountData{}, fmt.Errorf("AccountToAccountData error: %s", err)
+ }
+ return balance, nil
+}
+
func makeSchemas() basics.StateSchemas {
return basics.StateSchemas{
LocalStateSchema: makeLocalSchema(),
diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go
index b81d554f1..2f12876c7 100644
--- a/cmd/tealdbg/local_test.go
+++ b/cmd/tealdbg/local_test.go
@@ -17,10 +17,15 @@
package main
import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
"reflect"
+ "strconv"
"strings"
"testing"
+ "github.com/algorand/go-algorand/daemon/algod/api/server/v2"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -158,6 +163,11 @@ func makeSampleBalanceRecord(addr basics.Address, assetIdx basics.AssetIndex, ap
Total: 100,
UnitName: "tok",
AssetName: "asset",
+ Manager: addr,
+ Reserve: addr,
+ Freeze: addr,
+ Clawback: addr,
+ URL: "http://127.0.0.1/8000",
},
}
br.Assets = map[basics.AssetIndex]basics.AssetHolding{
@@ -923,3 +933,127 @@ func TestLocalLedger(t *testing.T) {
tkv, err = ledger.AppLocalState(payTxn.Txn.Receiver, appIdx)
a.Error(err)
}
+
+func TestLocalLedgerIndexer(t *testing.T) {
+ a := require.New(t)
+
+ sender, err := basics.UnmarshalChecksumAddress("47YPQTIGQEO7T4Y4RWDYWEKV6RTR2UNBQXBABEEGM72ESWDQNCQ52OPASU")
+ a.NoError(err)
+ // make balance records
+ appIdx := basics.AppIndex(100)
+ assetIdx := basics.AssetIndex(50)
+ brs := makeSampleBalanceRecord(sender, assetIdx, appIdx)
+ //balanceBlob := protocol.EncodeMsgp(&brs)
+ srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ accountPath := "/v2/accounts/"
+ applicationPath := "/v2/applications/"
+ switch {
+ case strings.HasPrefix(r.URL.Path, accountPath):
+ w.WriteHeader(200)
+ if r.URL.Path[len(accountPath):] == brs.Addr.String() {
+ account, err := v2.AccountDataToAccount(brs.Addr.String(), &brs.AccountData, map[basics.AssetIndex]string{}, 100, basics.MicroAlgos{Raw: 0})
+ a.NoError(err)
+ accountResponse := AccountIndexerResponse{Account: account, CurrentRound: 100}
+ response, err := json.Marshal(accountResponse)
+ a.NoError(err)
+ w.Write(response)
+ }
+ case strings.HasPrefix(r.URL.Path, applicationPath):
+ w.WriteHeader(200)
+ if r.URL.Path[len(applicationPath):] == strconv.FormatUint(uint64(appIdx), 10) {
+ appParams := brs.AppParams[appIdx]
+ app := v2.AppParamsToApplication(sender.String(), appIdx, &appParams)
+ a.NoError(err)
+ applicationResponse := ApplicationIndexerResponse{Application: app, CurrentRound: 100}
+ response, err := json.Marshal(applicationResponse)
+ a.NoError(err)
+ w.Write(response)
+ }
+ default:
+ w.WriteHeader(404)
+ }
+ }))
+ defer srv.Close()
+
+ // make transaction group: app call + sample payment
+ appTxn := transactions.SignedTxn{
+ Txn: transactions.Transaction{
+ Header: transactions.Header{
+ Sender: sender,
+ },
+ ApplicationCallTxnFields: transactions.ApplicationCallTxnFields{
+ ApplicationID: appIdx,
+ },
+ },
+ }
+
+ var payTxn transactions.SignedTxn
+ err = protocol.DecodeJSON([]byte(txnSample), &payTxn)
+ a.NoError(err)
+
+ txnBlob := protocol.EncodeMsgp(&appTxn)
+ txnBlob = append(txnBlob, protocol.EncodeMsgp(&payTxn)...)
+
+ l := LocalRunner{}
+ dp := DebugParams{
+ ProgramNames: []string{"test"},
+ ProgramBlobs: [][]byte{{1}},
+ TxnBlob: txnBlob,
+ IndexerURL: srv.URL,
+ RunMode: "application",
+ GroupIndex: 0,
+ Round: 100,
+ LatestTimestamp: 333,
+ }
+
+ err = l.Setup(&dp)
+ a.NoError(err)
+ a.Equal(2, len(l.txnGroup))
+ a.Equal(1, len(l.runs))
+ a.Equal(0, l.runs[0].groupIndex)
+ a.NotNil(l.runs[0].eval)
+ a.Equal([]byte{1}, l.runs[0].program)
+ a.NotNil(l.runs[0].ledger)
+ a.NotEqual(
+ reflect.ValueOf(logic.Eval).Pointer(),
+ reflect.ValueOf(l.runs[0].eval).Pointer(),
+ )
+ ledger := l.runs[0].ledger
+ a.Equal(basics.Round(100), ledger.Round())
+ a.Equal(int64(333), ledger.LatestTimestamp())
+
+ balance, err := ledger.Balance(sender)
+ a.NoError(err)
+ a.Equal(basics.MicroAlgos{Raw: 500000000}, balance)
+
+ holdings, err := ledger.AssetHolding(sender, assetIdx)
+ a.NoError(err)
+ a.Equal(basics.AssetHolding{Amount: 10, Frozen: false}, holdings)
+ holdings, err = ledger.AssetHolding(sender, assetIdx+1)
+ a.Error(err)
+
+ params, err := ledger.AssetParams(assetIdx)
+ a.NoError(err)
+ a.Equal(uint64(100), params.Total)
+ a.Equal("tok", params.UnitName)
+
+ tkv, err := ledger.AppGlobalState(0)
+ a.NoError(err)
+ a.Equal(uint64(2), tkv["gkeyint"].Uint)
+ tkv, err = ledger.AppGlobalState(appIdx)
+ a.NoError(err)
+ a.Equal("global", tkv["gkeybyte"].Bytes)
+ tkv, err = ledger.AppGlobalState(appIdx + 1)
+ a.Error(err)
+
+ tkv, err = ledger.AppLocalState(sender, 0)
+ a.NoError(err)
+ a.Equal(uint64(1), tkv["lkeyint"].Uint)
+ tkv, err = ledger.AppLocalState(sender, appIdx)
+ a.NoError(err)
+ a.Equal("local", tkv["lkeybyte"].Bytes)
+ tkv, err = ledger.AppLocalState(sender, appIdx+1)
+ a.Error(err)
+ tkv, err = ledger.AppLocalState(payTxn.Txn.Receiver, appIdx)
+ a.Error(err)
+}
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index b43cf96b1..1715c80af 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -129,6 +129,8 @@ var txnFile string
var groupIndex int
var balanceFile string
var ddrFile string
+var indexerURL string
+var indexerToken string
var roundNumber uint64
var timestamp int64
var runMode runModeValue = runModeValue{makeCobraStringValue("auto", []string{"signature", "application"})}
@@ -156,6 +158,8 @@ func init() {
debugCmd.Flags().StringVarP(&balanceFile, "balance", "b", "", "Balance records to evaluate stateful TEAL on in form of json or msgpack file")
debugCmd.Flags().StringVarP(&ddrFile, "dryrun-req", "d", "", "Program(s) and state(s) in dryrun REST request format")
debugCmd.Flags().Uint64VarP(&appID, "app-id", "a", 1380011588, "Application ID for stateful TEAL if not set in transaction(s)")
+ debugCmd.Flags().StringVarP(&indexerURL, "indexer-url", "i", "", "URL for indexer to fetch Balance records from to evaluate stateful TEAL")
+ debugCmd.Flags().StringVarP(&indexerToken, "indexer-token", "", "", "API token for indexer to fetch Balance records from to evaluate stateful TEAL")
debugCmd.Flags().Uint64VarP(&roundNumber, "round", "r", 0, "Ledger round number to evaluate stateful TEAL on")
debugCmd.Flags().Int64VarP(&timestamp, "latest-timestamp", "l", 0, "Latest confirmed timestamp to evaluate stateful TEAL on")
debugCmd.Flags().VarP(&runMode, "mode", "m", "TEAL evaluation mode: "+runMode.AllowedString())
@@ -248,6 +252,8 @@ func debugLocal(args []string) {
GroupIndex: groupIndex,
BalanceBlob: balanceBlob,
DdrBlob: ddrBlob,
+ IndexerURL: indexerURL,
+ IndexerToken: indexerToken,
Round: uint64(roundNumber),
LatestTimestamp: timestamp,
RunMode: runMode.String(),
diff --git a/cmd/tealdbg/server.go b/cmd/tealdbg/server.go
index 723060b9f..1bf1753c6 100644
--- a/cmd/tealdbg/server.go
+++ b/cmd/tealdbg/server.go
@@ -64,6 +64,8 @@ type DebugParams struct {
GroupIndex int
BalanceBlob []byte
DdrBlob []byte
+ IndexerURL string
+ IndexerToken string
Round uint64
LatestTimestamp int64
RunMode string
diff --git a/components/mocks/mockNetwork.go b/components/mocks/mockNetwork.go
index 3076693be..d6f72e102 100644
--- a/components/mocks/mockNetwork.go
+++ b/components/mocks/mockNetwork.go
@@ -18,6 +18,7 @@ package mocks
import (
"context"
+ "net"
"net/http"
"github.com/algorand/go-algorand/network"
@@ -99,3 +100,8 @@ func (network *MockNetwork) RegisterHTTPHandler(path string, handler http.Handle
// OnNetworkAdvance - empty implementation
func (network *MockNetwork) OnNetworkAdvance() {}
+
+// GetHTTPRequestConnection - empty implementation
+func (network *MockNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) {
+ return nil
+}
diff --git a/config/config.go b/config/config.go
index 6161adda1..19d8b6043 100644
--- a/config/config.go
+++ b/config/config.go
@@ -63,7 +63,7 @@ type Local struct {
// Version tracks the current version of the defaults so we can migrate old -> new
// This is specifically important whenever we decide to change the default value
// for an existing parameter. This field tag must be updated any time we add a new version.
- Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9"`
+ Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10"`
// environmental (may be overridden)
// When enabled, stores blocks indefinitally, otherwise, only the most recents blocks
@@ -324,6 +324,10 @@ type Local struct {
// EnableDeveloperAPI enables teal/compile, teal/dryrun API endpoints.
// This functionlity is disabled by default.
EnableDeveloperAPI bool `version[9]:"false"`
+
+ // OptimizeAccountsDatabaseOnStartup controls whether the accounts database would be optimized
+ // on algod startup.
+ OptimizeAccountsDatabaseOnStartup bool `version[10]:"false"`
}
// Filenames of config files within the configdir (e.g. ~/.algorand)
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 9dc909a8f..07a4647e4 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -20,7 +20,7 @@
package config
var defaultLocal = Local{
- Version: 9,
+ Version: 10,
AnnounceParticipationKey: true,
Archival: false,
BaseLoggerDebugLevel: 4,
@@ -71,6 +71,7 @@ var defaultLocal = Local{
NetworkProtocolVersion: "",
NodeExporterListenAddress: ":9100",
NodeExporterPath: "./node_exporter",
+ OptimizeAccountsDatabaseOnStartup: false,
OutgoingMessageFilterBucketCount: 3,
OutgoingMessageFilterBucketSize: 128,
PeerConnectionsUpdateInterval: 3600,
diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go
index 3af7bc8b4..1f53edfef 100644
--- a/crypto/merkletrie/cache.go
+++ b/crypto/merkletrie/cache.go
@@ -67,8 +67,8 @@ type merkleTrieCache struct {
// pendingCreatedNID contains a list of the node ids that has been created since the last commit and need to be stored.
pendingCreatedNID map[storedNodeIdentifier]bool
- // pendingDeletionNID contains a list of the node ids that has been deleted since the last commit and need to be removed.
- pendingDeletionNID map[storedNodeIdentifier]bool
+ // pendingDeletionPage contains a map of pages to delete once committed.
+ pendingDeletionPages map[uint64]bool
// a list of the pages priorities. The item in the front has higher priority and would not get evicted as quickly as the item on the back
pagesPrioritizationList *list.List
@@ -86,7 +86,7 @@ func (mtc *merkleTrieCache) initialize(mt *Trie, committer Committer, cachedNode
mtc.committer = committer
mtc.cachedNodeCount = 0
mtc.pendingCreatedNID = make(map[storedNodeIdentifier]bool)
- mtc.pendingDeletionNID = make(map[storedNodeIdentifier]bool)
+ mtc.pendingDeletionPages = make(map[uint64]bool)
mtc.pagesPrioritizationList = list.New()
mtc.pagesPrioritizationMap = make(map[uint64]*list.Element)
mtc.cachedNodeCountTarget = cachedNodeCountTarget
@@ -199,7 +199,7 @@ func (mtc *merkleTrieCache) deleteNode(nid storedNodeIdentifier) {
page := uint64(nid) / uint64(mtc.nodesPerPage)
delete(mtc.pageToNIDsPtr[page], nid)
if len(mtc.pageToNIDsPtr[page]) == 0 {
- mtc.pageToNIDsPtr[page] = nil
+ delete(mtc.pageToNIDsPtr, page)
}
mtc.cachedNodeCount--
} else {
@@ -226,17 +226,20 @@ func (mtc *merkleTrieCache) commitTransaction() {
// delete the ones that we don't want from the list.
for nodeID := range mtc.txDeletedNodeIDs {
+ page := uint64(nodeID) / uint64(mtc.nodesPerPage)
if mtc.pendingCreatedNID[nodeID] {
// it was never flushed.
delete(mtc.pendingCreatedNID, nodeID)
+ delete(mtc.pageToNIDsPtr[page], nodeID)
+ // if the page is empty, and it's not on the pendingDeletionPages, it means that we have no further references to it,
+ // so we can delete it right away.
+ if len(mtc.pageToNIDsPtr[page]) == 0 && mtc.pendingDeletionPages[page] == false {
+ delete(mtc.pageToNIDsPtr, page)
+ }
} else {
- mtc.pendingDeletionNID[nodeID] = true
- }
-
- page := uint64(nodeID) / uint64(mtc.nodesPerPage)
- delete(mtc.pageToNIDsPtr[page], nodeID)
- if len(mtc.pageToNIDsPtr[page]) == 0 {
- mtc.pageToNIDsPtr[page] = nil
+ mtc.pendingDeletionPages[page] = true
+ delete(mtc.pageToNIDsPtr[page], nodeID)
+ // no need to clear out the mtc.pageToNIDsPtr page, since it will be taken care by the commit() function.
}
}
mtc.cachedNodeCount -= len(mtc.txDeletedNodeIDs)
@@ -252,7 +255,7 @@ func (mtc *merkleTrieCache) rollbackTransaction() {
page := uint64(nodeID) / uint64(mtc.nodesPerPage)
delete(mtc.pageToNIDsPtr[page], nodeID)
if len(mtc.pageToNIDsPtr[page]) == 0 {
- mtc.pageToNIDsPtr[page] = nil
+ delete(mtc.pageToNIDsPtr, page)
}
}
mtc.cachedNodeCount -= len(mtc.txCreatedNodeIDs)
@@ -261,16 +264,16 @@ func (mtc *merkleTrieCache) rollbackTransaction() {
mtc.txNextNodeID = storedNodeIdentifierNull
}
-// Int64Slice attaches the methods of Interface to []uint64, sorting in increasing order.
-type Int64Slice []int64
+// Uint64Slice attaches the methods of Interface to []uint64, sorting in increasing order.
+type Uint64Slice []uint64
-func (p Int64Slice) Len() int { return len(p) }
-func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+func (p Uint64Slice) Len() int { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-// SortInt64 sorts a slice of uint64s in increasing order.
-func SortInt64(a []int64) {
- sort.Sort(Int64Slice(a))
+// SortUint64 sorts a slice of uint64s in increasing order.
+func SortUint64(a []uint64) {
+ sort.Sort(Uint64Slice(a))
}
// commit - used as part of the Trie Commit functionality
@@ -284,26 +287,26 @@ func (mtc *merkleTrieCache) commit() error {
mtc.deferedPageLoad = storedNodeIdentifierNull
}
- createdPages := make(map[int64]map[storedNodeIdentifier]*node)
+ createdPages := make(map[uint64]map[storedNodeIdentifier]*node)
// create a list of all the pages that need to be created/updated
for nodeID := range mtc.pendingCreatedNID {
- nodePage := int64(nodeID) / mtc.nodesPerPage
+ nodePage := uint64(nodeID) / uint64(mtc.nodesPerPage)
if nil == createdPages[nodePage] {
createdPages[nodePage] = mtc.pageToNIDsPtr[uint64(nodePage)]
}
}
// create a sorted list of created pages
- sortedCreatedPages := make([]int64, 0, len(createdPages))
+ sortedCreatedPages := make([]uint64, 0, len(createdPages))
for page := range createdPages {
sortedCreatedPages = append(sortedCreatedPages, page)
}
- SortInt64(sortedCreatedPages)
+ SortUint64(sortedCreatedPages)
// updated the hashes of these pages. this works correctly
// since all trie modification are done with ids that are bottom-up
for _, page := range sortedCreatedPages {
- err := mtc.calculatePageHashes(page)
+ err := mtc.calculatePageHashes(int64(page))
if err != nil {
return err
}
@@ -319,19 +322,12 @@ func (mtc *merkleTrieCache) commit() error {
}
// pages that contains elemets that were removed.
- toRemovePages := make(map[int64]map[storedNodeIdentifier]*node)
- toUpdatePages := make(map[int64]map[storedNodeIdentifier]*node)
- for nodeID := range mtc.pendingDeletionNID {
- nodePage := int64(nodeID) / mtc.nodesPerPage
- if toRemovePages[nodePage] == nil {
- toRemovePages[nodePage] = make(map[storedNodeIdentifier]*node, mtc.nodesPerPage)
- }
- toRemovePages[nodePage][nodeID] = nil
- }
+ toRemovePages := mtc.pendingDeletionPages
+ toUpdatePages := make(map[uint64]map[storedNodeIdentifier]*node)
// iterate over the existing list and ensure we don't delete any page that has active elements
for pageRemovalCandidate := range toRemovePages {
- if mtc.pageToNIDsPtr[uint64(pageRemovalCandidate)] == nil {
+ if len(mtc.pageToNIDsPtr[uint64(pageRemovalCandidate)]) == 0 {
// we have no nodes associated with this page, so
// it means that we can remove this page safely.
continue
@@ -353,9 +349,9 @@ func (mtc *merkleTrieCache) commit() error {
if element != nil {
mtc.pagesPrioritizationList.Remove(element)
delete(mtc.pagesPrioritizationMap, uint64(page))
- mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[uint64(page)])
- delete(mtc.pageToNIDsPtr, uint64(page))
}
+ mtc.cachedNodeCount -= len(mtc.pageToNIDsPtr[uint64(page)])
+ delete(mtc.pageToNIDsPtr, uint64(page))
}
// updated pages
@@ -371,7 +367,7 @@ func (mtc *merkleTrieCache) commit() error {
}
mtc.pendingCreatedNID = make(map[storedNodeIdentifier]bool)
- mtc.pendingDeletionNID = make(map[storedNodeIdentifier]bool)
+ mtc.pendingDeletionPages = make(map[uint64]bool)
mtc.modified = false
return nil
}
diff --git a/crypto/merkletrie/cache_test.go b/crypto/merkletrie/cache_test.go
index 924063fdf..6f0fbae58 100644
--- a/crypto/merkletrie/cache_test.go
+++ b/crypto/merkletrie/cache_test.go
@@ -35,17 +35,10 @@ func verifyCacheNodeCount(t *testing.T, trie *Trie) {
// make sure that the pagesPrioritizationMap aligns with pagesPrioritizationList
require.Equal(t, len(trie.cache.pagesPrioritizationMap), trie.cache.pagesPrioritizationList.Len())
- // if we're not within a transaction, the following should also hold true:
- if !trie.cache.modified {
- require.Equal(t, len(trie.cache.pageToNIDsPtr), trie.cache.pagesPrioritizationList.Len())
- }
-
for e := trie.cache.pagesPrioritizationList.Back(); e != nil; e = e.Next() {
page := e.Value.(uint64)
_, has := trie.cache.pagesPrioritizationMap[page]
require.True(t, has)
- _, has = trie.cache.pageToNIDsPtr[page]
- require.True(t, has)
}
}
@@ -222,3 +215,158 @@ func TestCacheEvictionFuzzer2(t *testing.T) {
})
}
}
+
+// TestCacheMidTransactionPageDeletion ensures that if we need to
+// delete a in-memory page during merkleTrieCache.commitTransaction(),
+// it's being deleted correctly.
+func TestCacheMidTransactionPageDeletion(t *testing.T) {
+ var memoryCommitter smallPageMemoryCommitter
+ memoryCommitter.pageSize = 2
+ mt1, _ := MakeTrie(&memoryCommitter, defaultTestEvictSize)
+
+ // create 10000 hashes.
+ leafsCount := 10000
+ hashes := make([]crypto.Digest, leafsCount)
+ for i := 0; i < len(hashes); i++ {
+ hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
+ }
+
+ for i := 0; i < len(hashes); i++ {
+ added, err := mt1.Add(hashes[i][:])
+ require.NoError(t, err)
+ require.True(t, added)
+ }
+ for i := 0; i < len(hashes)/4; i++ {
+ deleted, err := mt1.Delete(hashes[i][:])
+ require.NoError(t, err)
+ require.True(t, deleted)
+ }
+ mt1.Commit()
+
+ // compare committed pages to the in-memory pages.
+
+ for page, pageContent := range memoryCommitter.memStore {
+ if page == storedNodeIdentifierNull {
+ continue
+ }
+
+ decodedPage, err := decodePage(pageContent)
+ require.NoError(t, err)
+
+ // stored page should have more than a single node.
+ require.Greater(t, len(decodedPage), 0)
+ }
+
+ for page, pageContent := range mt1.cache.pageToNIDsPtr {
+
+ // memory page should have more than a single node.
+ require.NotZerof(t, len(pageContent), "Memory page %d has zero nodes", page)
+
+ // memory page should also be available on disk:
+ require.NotNil(t, memoryCommitter.memStore[page])
+ }
+}
+
+// TestDeleteRollback is a modified version of the real Trie.Delete,
+// which always "fails" and rollback the transaction.
+// this function is used in TestCacheTransactionRollbackPageDeletion
+func (mt *Trie) TestDeleteRollback(d []byte) (bool, error) {
+ if mt.root == storedNodeIdentifierNull {
+ return false, nil
+ }
+ if len(d) != mt.elementLength {
+ return false, ErrMismatchingElementLength
+ }
+ pnode, err := mt.cache.getNode(mt.root)
+ if err != nil {
+ return false, err
+ }
+ found, err := pnode.find(mt.cache, d[:])
+ if !found || err != nil {
+ return false, err
+ }
+ mt.cache.beginTransaction()
+ if pnode.leaf {
+ // remove the root.
+ mt.cache.deleteNode(mt.root)
+ mt.root = storedNodeIdentifierNull
+ mt.cache.commitTransaction()
+ mt.elementLength = 0
+ return true, nil
+ }
+ _, err = pnode.remove(mt.cache, d[:], make([]byte, 0, len(d)))
+ // unlike the "real" function, we want always to fail here to test the rollbackTransaction() functionality.
+ mt.cache.rollbackTransaction()
+ return false, fmt.Errorf("this is a test for failing a Delete request")
+}
+
+// TestCacheTransactionRollbackPageDeletion ensures that if we need to
+// delete a in-memory page during merkleTrieCache.rollbackTransaction(),
+// it's being deleted correctly.
+func TestCacheTransactionRollbackPageDeletion(t *testing.T) {
+ var memoryCommitter smallPageMemoryCommitter
+ memoryCommitter.pageSize = 2
+ mt1, _ := MakeTrie(&memoryCommitter, 5)
+
+ // create 1000 hashes.
+ leafsCount := 1000
+ hashes := make([]crypto.Digest, leafsCount)
+ for i := 0; i < len(hashes); i++ {
+ hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
+ }
+
+ for i := 0; i < len(hashes); i++ {
+ added, err := mt1.Add(hashes[i][:])
+ require.NoError(t, err)
+ require.True(t, added)
+ }
+
+ mt1.Evict(true)
+
+ var deleted bool
+ var err error
+ for i := 0; i < len(hashes); i++ {
+ deleted, err = mt1.TestDeleteRollback(hashes[i][:])
+ if err != nil {
+ break
+ }
+ require.True(t, deleted)
+ }
+
+ for page, pageContent := range mt1.cache.pageToNIDsPtr {
+ // memory page should have more than a single node.
+ require.NotZerof(t, len(pageContent), "Memory page %d has zero nodes", page)
+ }
+}
+
+// TestCacheDeleteNodeMidTransaction ensures that if we need to
+// delete a in-memory page during merkleTrieCache.deleteNode(),
+// it's being deleted correctly.
+func TestCacheDeleteNodeMidTransaction(t *testing.T) {
+ var memoryCommitter smallPageMemoryCommitter
+ memoryCommitter.pageSize = 1
+ mt1, _ := MakeTrie(&memoryCommitter, 5)
+
+ // create 1000 hashes.
+ leafsCount := 10000
+ hashes := make([]crypto.Digest, leafsCount)
+ for i := 0; i < len(hashes); i++ {
+ hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)})
+ }
+
+ for i := 0; i < len(hashes); i++ {
+ added, err := mt1.Add(hashes[i][:])
+ require.NoError(t, err)
+ require.True(t, added)
+ }
+ for i := 0; i < len(hashes); i++ {
+ deleted, err := mt1.Delete(hashes[i][:])
+ require.NoError(t, err)
+ require.True(t, deleted)
+ }
+
+ for page, pageContent := range mt1.cache.pageToNIDsPtr {
+ // memory page should have more than a single node.
+ require.NotZerof(t, len(pageContent), "Memory page %d has zero nodes", page)
+ }
+}
diff --git a/data/pools/ewma.go b/data/pools/ewma.go
deleted file mode 100644
index ba0f740da..000000000
--- a/data/pools/ewma.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright (C) 2019-2020 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package pools
-
-import (
- "fmt"
-)
-
-// EWMA represents an exponentially moving average based on the following formula -
-// EWMA(S_0) = S_0
-// EWMA(S_n) = \alpha*S_n + (1-\alpha)*S_{n-1}
-type EWMA struct {
- alpha float64
- value float64
- running bool
-}
-
-// NewEMA creates a new Exponentially Moving Average
-func NewEMA(alpha float64) (*EWMA, error) {
- if alpha <= 0 || alpha > 1 {
- return nil, fmt.Errorf("alpha must be between 0 and 1")
- }
- return &EWMA{alpha: alpha}, nil
-}
-
-// Add adds a new sample to the EWMA
-func (ewma *EWMA) Add(sample float64) {
- if !ewma.running {
- ewma.value = sample
- ewma.running = true
- return
- }
- ewma.value = ewma.alpha*sample + (1-ewma.alpha)*ewma.value
-}
-
-// Value returns the current EWMA value rounded to the nearest integer
-func (ewma *EWMA) Value() uint64 {
- return uint64(ewma.value)
-}
diff --git a/data/pools/ewma_test.go b/data/pools/ewma_test.go
deleted file mode 100644
index 2f8dbc33d..000000000
--- a/data/pools/ewma_test.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright (C) 2019-2020 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package pools
-
-import (
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestMakeFeeTrackerError(t *testing.T) {
- _, err := NewEMA(-5)
- assert.Error(t, err)
-
- _, err = NewEMA(5)
- assert.Error(t, err)
-
- ft, err := NewEMA(0.364)
- assert.NoError(t, err)
-
- assert.Equal(t, ft.alpha, 0.364)
- assert.Zero(t, ft.value)
-}
-
-func TestEWMA_Same_Number(t *testing.T) {
- alpha := 0.4151165
- ft, err := NewEMA(alpha)
- assert.NoError(t, err)
-
- assert.Equal(t, ft.alpha, alpha)
- assert.Zero(t, ft.value)
-
- ft.Add(5)
- ft.Add(5)
- ft.Add(5)
- ft.Add(5)
- ft.Add(5)
- ft.Add(5)
-
- // Avg of N times Y is Y regardless of alpha
- assert.Equal(t, uint64(5), ft.Value())
-
-}
-
-func TestEWMA_Number(t *testing.T) {
- alpha := 2 / 6.0
- ft, err := NewEMA(alpha)
- assert.NoError(t, err)
-
- assert.Equal(t, ft.alpha, alpha)
- assert.Zero(t, ft.value)
-
- ft.Add(1)
- ft.Add(2)
- ft.Add(3)
- ft.Add(4)
- ft.Add(5)
-
- assert.Equal(t, uint64(3), ft.Value())
-
-}
-
-func TestEWMA_Number2(t *testing.T) {
- alpha := 0.5
- ft, err := NewEMA(alpha)
- assert.NoError(t, err)
-
- assert.Equal(t, ft.alpha, alpha)
- assert.Zero(t, ft.value)
-
- ft.Add(-1)
- ft.Add(1)
- ft.Add(10)
-
- assert.Equal(t, uint64(5), ft.Value())
-
-}
diff --git a/data/pools/feeTracker.go b/data/pools/feeTracker.go
deleted file mode 100644
index dde629f7b..000000000
--- a/data/pools/feeTracker.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright (C) 2019-2020 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package pools
-
-import (
- "sort"
-
- "github.com/algorand/go-deadlock"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
-)
-
-// feeTracker keeps track of the EWMA of the medians of the past 50 blocks
-
-var (
- // lag value for the EWMA. (\alpha = frac{2}{1+L})
- decayValue float64
-)
-
-// FeeTracker keeps track of the fees on the ledger and provides suggested fee
-type FeeTracker struct {
- mu deadlock.Mutex
- ewma *EWMA
-}
-
-// MakeFeeTracker creates a new Fee Tracker
-func MakeFeeTracker() (*FeeTracker, error) {
- ft := FeeTracker{}
-
- // init decay value
- decayValue = 2.0 / (1 + float64(config.GetDefaultLocal().SuggestedFeeSlidingWindowSize))
-
- ewma, err := NewEMA(decayValue)
- if err != nil {
- return nil, err
- }
- ft.ewma = ewma
- return &ft, nil
-}
-
-// EstimateFee returns the current suggested fee per byte
-func (ft *FeeTracker) EstimateFee() basics.MicroAlgos {
- ft.mu.Lock()
- defer ft.mu.Unlock()
-
- return basics.MicroAlgos{Raw: ft.ewma.Value()}
-}
-
-// ProcessBlock takes a block and update the current suggested fee
-func (ft *FeeTracker) ProcessBlock(block bookkeeping.Block) {
- // If the block is less than half full, drive the suggested fee down rapidly. Suggested Fee may fall to zero, but algod API client will be responsible for submitting transactions with at least MinTxnFee
- if len(protocol.Encode(&block.Payset)) < config.Consensus[block.CurrentProtocol].MaxTxnBytesPerBlock/2 {
- ft.add(1.0)
- return
- }
-
- // Get the median of the block
- payset, err := block.DecodePaysetFlat()
- if err != nil {
- return
- }
-
- fees := make([]float64, len(payset))
- for i, txad := range payset {
- tx := txad.SignedTxn
- fees[i] = ft.processTransaction(tx)
- }
-
- // Add median to EWMA
- ft.add(median(fees))
-}
-
-// add adds the given value to the ewma
-func (ft *FeeTracker) add(n float64) {
- ft.mu.Lock()
- defer ft.mu.Unlock()
- ft.ewma.Add(n)
-}
-
-// processTransaction takes a transaction and process it
-func (ft *FeeTracker) processTransaction(txn transactions.SignedTxn) float64 {
- // return the fee per byte
- return float64(txn.Txn.Fee.Raw) / float64(txn.GetEncodedLength())
-}
-
-func median(input []float64) float64 {
- sort.Float64s(input)
-
- l := len(input)
- if l == 0 {
- return 0
- } else if l%2 == 0 {
- return (input[l/2-1] + input[l/2]) / 2
- }
- return input[l/2]
-}
diff --git a/data/pools/feeTracker_test.go b/data/pools/feeTracker_test.go
deleted file mode 100644
index 3d0f34e44..000000000
--- a/data/pools/feeTracker_test.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Copyright (C) 2019-2020 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see <https://www.gnu.org/licenses/>.
-
-package pools
-
-import (
- "math/rand"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/data/bookkeeping"
- "github.com/algorand/go-algorand/data/transactions"
- "github.com/algorand/go-algorand/protocol"
-)
-
-func TestFeeTracker_ProcessBlock(t *testing.T) {
- numOfAccounts := 5
- // Genereate accounts
- secrets := make([]*crypto.SignatureSecrets, numOfAccounts)
- addresses := make([]basics.Address, numOfAccounts)
-
- r := rand.New(rand.NewSource(99))
-
- for i := 0; i < numOfAccounts; i++ {
- secret := keypair()
- addr := basics.Address(secret.SignatureVerifier)
- secrets[i] = secret
- addresses[i] = addr
- }
-
- ft, err := MakeFeeTracker()
- require.NoError(t, err)
- var block bookkeeping.Block
- block.Payset = make(transactions.Payset, 0)
-
- proto := config.Consensus[protocol.ConsensusV7]
- for i, sender := range addresses {
- for j, receiver := range addresses {
- if sender != receiver {
- for k := 0; k < 1000; k++ {
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: sender,
- Fee: basics.MicroAlgos{Raw: uint64(r.Int()%10000) + proto.MinTxnFee},
- FirstValid: 0,
- LastValid: basics.Round(proto.MaxTxnLife),
- Note: make([]byte, 2),
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: receiver,
- Amount: basics.MicroAlgos{Raw: 1},
- },
- }
- tx.Note[0] = byte(i)
- tx.Note[1] = byte(j)
- signedTx := tx.Sign(secrets[i])
- txib, err := block.EncodeSignedTxn(signedTx, transactions.ApplyData{})
- require.NoError(t, err)
- block.Payset = append(block.Payset, txib)
- }
- }
- }
- }
- ft.ProcessBlock(block)
- require.Equal(t, uint64(0x1f), ft.EstimateFee().Raw)
-}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index 574f66b1b..23e35be48 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -19,6 +19,7 @@ package pools
import (
"fmt"
"sync"
+ "sync/atomic"
"time"
"github.com/algorand/go-deadlock"
@@ -60,6 +61,7 @@ type TransactionPool struct {
pendingBlockEvaluator *ledger.BlockEvaluator
numPendingWholeBlocks basics.Round
feeThresholdMultiplier uint64
+ feePerByte uint64
statusCache *statusCache
assemblyMu deadlock.Mutex
@@ -235,7 +237,16 @@ func (pool *TransactionPool) checkPendingQueueSize() error {
return nil
}
-func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error {
+// FeePerByte returns the current minimum microalgos per byte a transaction
+// needs to pay in order to get into the pool.
+func (pool *TransactionPool) FeePerByte() uint64 {
+ return atomic.LoadUint64(&pool.feePerByte)
+}
+
+// computeFeePerByte computes and returns the current minimum microalgos per byte a transaction
+// needs to pay in order to get into the pool. It also updates the atomic counter that holds
+// the current fee per byte
+func (pool *TransactionPool) computeFeePerByte() uint64 {
// The baseline threshold fee per byte is 1, the smallest fee we can
// represent. This amounts to a fee of 100 for a 100-byte txn, which
// is well below MinTxnFee (1000). This means that, when the pool
@@ -265,6 +276,18 @@ func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn
feePerByte *= pool.expFeeFactor
}
+ // Update the counter for fast reads
+ atomic.StoreUint64(&pool.feePerByte, feePerByte)
+
+ return feePerByte
+}
+
+// checkSufficientFee take a set of signed transactions and verifies that each transaction has
+// sufficient fee to get into the transaction pool
+func (pool *TransactionPool) checkSufficientFee(txgroup []transactions.SignedTxn) error {
+ // get the current fee per byte
+ feePerByte := pool.computeFeePerByte()
+
for _, t := range txgroup {
feeThreshold := feePerByte * uint64(t.GetEncodedLength())
if t.Txn.Fee.Raw < feeThreshold {
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 4f85e18e5..1116841eb 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -885,6 +885,57 @@ func TestLogicSigOK(t *testing.T) {
require.NoError(t, transactionPool.RememberOne(signedTx, verify.Params{}))
}
+func TestTransactionPool_CurrentFeePerByte(t *testing.T) {
+ numOfAccounts := 5
+ // Generate accounts
+ secrets := make([]*crypto.SignatureSecrets, numOfAccounts)
+ addresses := make([]basics.Address, numOfAccounts)
+
+ for i := 0; i < numOfAccounts; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ }
+
+ l := makeMockLedger(t, initAccFixed(addresses, 1<<32))
+ cfg := config.GetDefaultLocal()
+ cfg.TxPoolSize = testPoolSize * 15
+ cfg.EnableProcessBlockStats = false
+ transactionPool := MakeTransactionPool(l, cfg)
+
+ for i, sender := range addresses {
+ for j := 0; j < testPoolSize*15/len(addresses); j++ {
+ var receiver basics.Address
+ crypto.RandBytes(receiver[:])
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: uint64(rand.Int()%10000) + proto.MinTxnFee},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ Note: make([]byte, 2),
+ GenesisHash: l.GenesisHash(),
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: receiver,
+ Amount: basics.MicroAlgos{Raw: proto.MinBalance},
+ },
+ }
+ tx.Note = make([]byte, 8, 8)
+ crypto.RandBytes(tx.Note)
+ signedTx := tx.Sign(secrets[i])
+ err := transactionPool.RememberOne(signedTx, verify.Params{})
+ require.NoError(t, err)
+ }
+ }
+
+ // The fee should be 1^(number of whole blocks - 1)
+ require.Equal(t, uint64(1<<(transactionPool.numPendingWholeBlocks-1)), transactionPool.FeePerByte())
+
+}
+
func BenchmarkTransactionPoolRememberOne(b *testing.B) {
numOfAccounts := 5
// Generate accounts
diff --git a/go.mod b/go.mod
index b1bddad3c..0fa87eade 100644
--- a/go.mod
+++ b/go.mod
@@ -16,7 +16,6 @@ require (
github.com/fortytw2/leaktest v1.3.0 // indirect
github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f
github.com/getkin/kin-openapi v0.14.0
- github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f // indirect
github.com/gofrs/flock v0.7.0
github.com/google/go-querystring v1.0.0
@@ -31,9 +30,9 @@ require (
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2
github.com/labstack/echo/v4 v4.1.16
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 // indirect
- github.com/matryer/moq v0.0.0-20200607124540-4638a53893e6 // indirect
github.com/mattn/go-sqlite3 v1.10.0
github.com/miekg/dns v1.1.27
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
github.com/olivere/elastic v6.2.14+incompatible
github.com/onsi/ginkgo v1.8.0 // indirect
@@ -43,16 +42,12 @@ require (
github.com/sirupsen/logrus v1.0.5
github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.3 // indirect
- github.com/stretchr/objx v0.2.0 // indirect
github.com/stretchr/testify v1.6.1
- github.com/yuin/goldmark v1.1.32 // indirect
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9
- golang.org/x/mod v0.3.0 // indirect
golang.org/x/net v0.0.0-20200602114024-627f9648deb9
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a // indirect
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1
golang.org/x/text v0.3.3 // indirect
- golang.org/x/tools v0.0.0-20200618155944-c7475b9d7fb2 // indirect
google.golang.org/appengine v1.6.1 // indirect
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect
diff --git a/go.sum b/go.sum
index 918b6e271..90b932a0e 100644
--- a/go.sum
+++ b/go.sum
@@ -14,9 +14,6 @@ github.com/aws/aws-sdk-go v1.16.5 h1:NVxzZXIuwX828VcJrpNxxWjur1tlOBISdMdDdHIKHcc
github.com/aws/aws-sdk-go v1.16.5/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/cpuguy83/go-md2man v1.0.8 h1:DwoNytLphI8hzS2Af4D0dfaEaiSq2bN05mEm4R6vf8M=
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
-github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20191230040416-20a325f050d4/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -29,40 +26,46 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumC
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f h1:eyHMPp7tXlBMF8PZHdsL89G0ehuRNflu7zKUeoQjcJ0=
github.com/gen2brain/beeep v0.0.0-20180718162406-4e430518395f/go.mod h1:GprdPCZglWh5OMcIDpeKBxuUJI+fEDOTVUfxZeda4zo=
github.com/getkin/kin-openapi v0.3.1/go.mod h1:W8dhxZgpE84ciM+VIItFqkmZ4eHtuomrdIHtASQIqi0=
-github.com/getkin/kin-openapi v0.9.0 h1:/vaUQkiOR+vfFO3oilZentZTfAhz7OzXPhLdNas4q4w=
-github.com/getkin/kin-openapi v0.9.0/go.mod h1:zZQMFkVgRHCdhgb6ihCTIo9dyDZFvX0k/xAKqw1FhPw=
github.com/getkin/kin-openapi v0.14.0 h1:hqwQL7kze/adt0wB+0UJR2nJm+gfUHqM0Gu4D8nByVc=
github.com/getkin/kin-openapi v0.14.0/go.mod h1:WGRs2ZMM1Q8LR1QBEwUxC6RJEfaBcD0s+pcEVXFuAjw=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
-github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec=
-github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ=
+github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f h1:zlOR3rOlPAVvtfuxGKoghCmop5B0TRyu/ZieziZuGiM=
github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gofrs/flock v0.7.0 h1:pGFUjl501gafK9HBt1VGL1KCOd/YhIooID+xgyJCf3g=
github.com/gofrs/flock v0.7.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gopherjs/gopherwasm v1.0.1 h1:Gmj9RMDjh+P9EFzzQltoCtjAxR5mUkaJqaNPfeaNe2I=
github.com/gopherjs/gopherwasm v1.0.1/go.mod h1:SkZ8z7CWBz5VXbhJel8TxCmAcsQqzgWGR/8nMhyhZSI=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/schema v1.0.2 h1:sAgNfOcNYvdDSrzGHVy9nzCQahG+qmsg+nE8dK85QRA=
github.com/gorilla/schema v1.0.2/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU=
+github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -71,20 +74,18 @@ github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhB
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2 h1:BkkpZxPVs3gIf+3Tejt8lWzuo2P29N1ChGUMEpuSJ8U=
github.com/karalabe/hid v0.0.0-20181128192157-d815e0c1a2e2/go.mod h1:YvbcH+3Wo6XPs9nkgTY3u19KXLauXW+J5nB7hEHuX0A=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/labstack/echo/v4 v4.1.16 h1:8swiwjE5Jkai3RPfZoahp8kjVCRNq+y7Q0hPji2Kz0o=
github.com/labstack/echo/v4 v4.1.16/go.mod h1:awO+5TzAjvL8XpibdsfXxPgHr+orhtXZJZIQCVjogKI=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/matryer/moq v0.0.0-20200310130814-7721994d1b54/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
-github.com/matryer/moq v0.0.0-20200607124540-4638a53893e6/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -97,11 +98,16 @@ github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK86
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM=
github.com/miekg/dns v1.1.27/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
github.com/olivere/elastic v6.2.14+incompatible h1:k+KadwNP/dkXE0/eu+T6otk1+5fe0tEpPyQJ4XVm5i8=
github.com/olivere/elastic v6.2.14+incompatible/go.mod h1:J+q1zQJTgAz9woqsbVRqGeB5G1iqDKVBWLNSYW8yfJ8=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
@@ -121,7 +127,6 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
@@ -136,7 +141,6 @@ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPU
github.com/valyala/fasttemplate v1.1.0 h1:RZqt0yGBsps8NGvLSGW804QQqCUYYLsaOjTVHy1Ocw4=
github.com/valyala/fasttemplate v1.1.0/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -146,8 +150,6 @@ golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9 h1:vEg9joUBmeBcK9iSJftGNf
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
-golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -161,6 +163,7 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -173,8 +176,6 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200610111108-226ff32320da h1:bGb80FudwxpeucJUjPYJXuJ8Hk91vNtfvrymzwiei38=
-golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -188,24 +189,27 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200423205358-59e73619c742/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200610052024-8d7dbee4c8ae/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2 h1:FD4wDsP+CQUqh2V12OBOt90pLHVToe58P++fUu3ggV4=
-golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200618155944-c7475b9d7fb2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009 h1:q/fZgS8MMadqFFGa8WL4Oyz+TmjiZfi8UrzWhTl8d5w=
gopkg.in/sohlich/elogrus.v3 v3.0.0-20180410122755-1fa29e2f2009/go.mod h1:O0bY1e/dSoxMYZYTHP0SWKxG5EWLEvKR9/cOjWPPMKU=
+gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2 h1:MZF6J7CV6s/h0HBkfqebrYfKCVEo5iN+wzE4QhV3Evo=
gopkg.in/toast.v1 v1.0.0-20180812000517-0a84660828b2/go.mod h1:s1Sn2yZos05Qfs7NKt867Xe18emOmtsO3eAKbDaon0o=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff --git a/installer/config.json.example b/installer/config.json.example
index 4eb867ed7..93fe6229b 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -1,5 +1,5 @@
{
- "Version": 9,
+ "Version": 10,
"AnnounceParticipationKey": true,
"Archival": false,
"BaseLoggerDebugLevel": 4,
@@ -50,6 +50,7 @@
"NetworkProtocolVersion": "",
"NodeExporterListenAddress": ":9100",
"NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
"OutgoingMessageFilterBucketCount": 3,
"OutgoingMessageFilterBucketSize": 128,
"PeerConnectionsUpdateInterval": 3600,
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index cb9551633..c6f148dac 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -100,7 +100,7 @@ var accountsResetExprs = []string{
// accountDBVersion is the database version that this binary would know how to support and how to upgrade to.
// details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX
// and their descriptions.
-var accountDBVersion = int32(2)
+var accountDBVersion = int32(3)
type accountDelta struct {
old basics.AccountData
@@ -305,7 +305,7 @@ func accountsDbInit(r db.Queryable, w db.Queryable) (*accountsDbQueries, error)
var err error
qs := &accountsDbQueries{}
- qs.listCreatablesStmt, err = r.Prepare("SELECT asset, creator, ctype FROM assetcreators WHERE asset <= ? AND ctype = ? ORDER BY asset desc LIMIT ?")
+ qs.listCreatablesStmt, err = r.Prepare("SELECT asset, creator FROM assetcreators WHERE asset <= ? AND ctype = ? ORDER BY asset desc LIMIT ?")
if err != nil {
return nil, err
}
@@ -751,47 +751,6 @@ func updateAccountsRound(tx *sql.Tx, rnd basics.Round, hashRound basics.Round) (
return
}
-// encodedAccountsRange returns an array containing the account data, in the same way it appear in the database
-// starting at entry startAccountIndex, and up to accountCount accounts long.
-func encodedAccountsRange(ctx context.Context, tx *sql.Tx, startAccountIndex, accountCount int) (bals []encodedBalanceRecord, err error) {
- rows, err := tx.QueryContext(ctx, "SELECT address, data FROM accountbase ORDER BY rowid LIMIT ? OFFSET ?", accountCount, startAccountIndex)
- if err != nil {
- return
- }
- defer rows.Close()
-
- bals = make([]encodedBalanceRecord, 0, accountCount)
- var addr basics.Address
- for rows.Next() {
- var addrbuf []byte
- var buf []byte
- err = rows.Scan(&addrbuf, &buf)
- if err != nil {
- return
- }
-
- if len(addrbuf) != len(addr) {
- err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
- return
- }
-
- copy(addr[:], addrbuf)
-
- bals = append(bals, encodedBalanceRecord{Address: addr, AccountData: buf})
- }
-
- err = rows.Err()
- if err == nil {
- // the encodedAccountsRange typically called in a loop iterating over all the accounts. This could clearly take more than the
- // "standard" 1 second, so we want to extend the timeout on each iteration. If the last iteration takes more than a second, then
- // it should be noted. The one second here is quite liberal to ensure symmetrical behaviour on low-power devices.
- // The return value from ResetTransactionWarnDeadline can be safely ignored here since it would only default to writing the warnning
- // message, which would let us know that it failed anyway.
- db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(time.Second))
- }
- return
-}
-
// totalAccounts returns the total number of accounts
func totalAccounts(ctx context.Context, tx *sql.Tx) (total uint64, err error) {
err = tx.QueryRowContext(ctx, "SELECT count(*) FROM accountbase").Scan(&total)
@@ -938,3 +897,68 @@ func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) {
func (mc *merkleCommitter) GetNodesCountPerPage() (pageSize int64) {
return merkleCommitterNodesPerPage
}
+
+// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
+type encodedAccountsBatchIter struct {
+ rows *sql.Rows
+ orderByAddress bool
+}
+
+// Next returns an array containing the account data, in the same way it appear in the database
+// returning accountCount accounts data at a time.
+func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int) (bals []encodedBalanceRecord, err error) {
+ if iterator.rows == nil {
+ if iterator.orderByAddress {
+ iterator.rows, err = tx.QueryContext(ctx, "SELECT address, data FROM accountbase ORDER BY address")
+ } else {
+ iterator.rows, err = tx.QueryContext(ctx, "SELECT address, data FROM accountbase")
+ }
+
+ if err != nil {
+ return
+ }
+ }
+
+ // gather up to accountCount encoded accounts.
+ bals = make([]encodedBalanceRecord, 0, accountCount)
+ var addr basics.Address
+ for iterator.rows.Next() {
+ var addrbuf []byte
+ var buf []byte
+ err = iterator.rows.Scan(&addrbuf, &buf)
+ if err != nil {
+ iterator.Close()
+ return
+ }
+
+ if len(addrbuf) != len(addr) {
+ err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
+ return
+ }
+
+ copy(addr[:], addrbuf)
+
+ bals = append(bals, encodedBalanceRecord{Address: addr, AccountData: buf})
+ if len(bals) == accountCount {
+ // we're done with this iteration.
+ return
+ }
+ }
+
+ err = iterator.rows.Err()
+ if err != nil {
+ iterator.Close()
+ return
+ }
+ // we just finished reading the table.
+ iterator.Close()
+ return
+}
+
+// Close shuts down the encodedAccountsBatchIter, releasing database resources.
+func (iterator *encodedAccountsBatchIter) Close() {
+ if iterator.rows != nil {
+ iterator.rows.Close()
+ iterator.rows = nil
+ }
+}
diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go
index 9f6748971..9d9374847 100644
--- a/ledger/accountdb_test.go
+++ b/ledger/accountdb_test.go
@@ -543,7 +543,10 @@ func randomCreatableSampling(iteration int, crtbsList []basics.CreatableIndex,
for i := delSegmentStart; i < delSegmentEnd; i++ {
ctb := creatables[crtbsList[i]]
- if ctb.created && 1 == (crypto.RandUint64()%2) {
+ if ctb.created &&
+ // Always delete the first element, to make sure at least one
+ // element is always deleted.
+ (i == delSegmentStart || 1 == (crypto.RandUint64()%2)) {
ctb.created = false
newSample[crtbsList[i]] = ctb
delete(expectedDbImage, crtbsList[i])
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index bc796d3c4..c33648dee 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -50,7 +50,9 @@ const (
pendingDeltasFlushThreshold = 128
// trieRebuildAccountChunkSize defines the number of accounts that would get read at a single chunk
// before added to the trie during trie construction
- trieRebuildAccountChunkSize = 512
+ trieRebuildAccountChunkSize = 16384
+ // trieRebuildCommitFrequency defines the number of accounts that would get added before we call evict to commit the changes and adjust the memory cache.
+ trieRebuildCommitFrequency = 65536
// trieAccumulatedChangesFlush defines the number of pending changes that would be applied to the merkle trie before
// we attempt to commit them to disk while writing a batch of rounds balances to disk.
trieAccumulatedChangesFlush = 256
@@ -112,6 +114,9 @@ type accountUpdates struct {
// 0 means don't store any, -1 mean unlimited and positive number suggest the number of most recent catchpoint files.
catchpointFileHistoryLength int
+ // vacuumOnStartup controls whether the accounts database would get vacuumed on startup.
+ vacuumOnStartup bool
+
// dynamic variables
// Connection to the database.
@@ -218,6 +223,7 @@ func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, gene
if cfg.CatchpointFileHistoryLength < -1 {
au.catchpointFileHistoryLength = -1
}
+ au.vacuumOnStartup = cfg.OptimizeAccountsDatabaseOnStartup
// initialize the commitSyncerClosed with a closed channel ( since the commitSyncer go-routine is not active )
au.commitSyncerClosed = make(chan struct{})
close(au.commitSyncerClosed)
@@ -706,11 +712,14 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo
return
}
- au.accountsq, err = accountsDbInit(au.dbs.rdb.Handle, au.dbs.wdb.Handle)
+ // the VacuumDatabase would be a no-op if au.vacuumOnStartup is cleared.
+ au.vacuumDatabase(context.Background())
if err != nil {
return
}
+ au.accountsq, err = accountsDbInit(au.dbs.rdb.Handle, au.dbs.wdb.Handle)
+
au.lastCatchpointLabel, _, err = au.accountsq.readCatchpointStateString(context.Background(), catchpointStateLastCatchpoint)
if err != nil {
return
@@ -791,6 +800,12 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b
au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 1 : %v", err)
return 0, err
}
+ case 2:
+ dbVersion, err = au.upgradeDatabaseSchema2(ctx, tx)
+ if err != nil {
+ au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 2 : %v", err)
+ return 0, err
+ }
default:
return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion)
}
@@ -833,16 +848,25 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b
if err != nil {
return rnd, fmt.Errorf("accountsInitialize was unable to retrieve trie root hash: %v", err)
}
+
if rootHash.IsZero() {
- accountIdx := 0
+ au.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
+ var accountsIterator encodedAccountsBatchIter
+ defer accountsIterator.Close()
+ startTrieBuildTime := time.Now()
+ accountsCount := 0
+ lastRebuildTime := startTrieBuildTime
+ pendingAccounts := 0
for {
- bal, err := encodedAccountsRange(ctx, tx, accountIdx, trieRebuildAccountChunkSize)
+ bal, err := accountsIterator.Next(ctx, tx, trieRebuildAccountChunkSize)
if err != nil {
return rnd, err
}
if len(bal) == 0 {
break
}
+ accountsCount += len(bal)
+ pendingAccounts += len(bal)
for _, balance := range bal {
var accountData basics.AccountData
err = protocol.Decode(balance.AccountData, &accountData)
@@ -859,16 +883,32 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b
}
}
- // this trie Evict will commit using the current transaction.
- // if anything goes wrong, it will still get rolled back.
- _, err = trie.Evict(true)
- if err != nil {
- return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ if pendingAccounts >= trieRebuildCommitFrequency {
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
+ }
+ pendingAccounts = 0
}
+
if len(bal) < trieRebuildAccountChunkSize {
break
}
- accountIdx += trieRebuildAccountChunkSize
+
+ if time.Now().Sub(lastRebuildTime) > 5*time.Second {
+ // let the user know that the trie is still being rebuilt.
+ au.log.Infof("accountsInitialize still building the trie, and processed so far %d accounts", accountsCount)
+ lastRebuildTime = time.Now()
+ }
+ }
+
+ // this trie Evict will commit using the current transaction.
+ // if anything goes wrong, it will still get rolled back.
+ _, err = trie.Evict(true)
+ if err != nil {
+ return 0, fmt.Errorf("accountsInitialize was unable to commit changes to trie: %v", err)
}
// we've just updated the markle trie, update the hashRound to reflect that.
@@ -876,6 +916,8 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b
if err != nil {
return 0, fmt.Errorf("accountsInitialize was unable to update the account round to %d: %v", rnd, err)
}
+
+ au.log.Infof("accountsInitialize rebuilt the merkle trie with %d entries in %v", accountsCount, time.Now().Sub(startTrieBuildTime))
}
au.balancesTrie = trie
return rnd, nil
@@ -983,6 +1025,23 @@ func (au *accountUpdates) upgradeDatabaseSchema1(ctx context.Context, tx *sql.Tx
return 2, nil
}
+// upgradeDatabaseSchema2 upgrades the database schema from version 2 to version 3
+//
+// This upgrade only enables the database vacuuming which will take place once the upgrade process is complete.
+// If the user has already specified the OptimizeAccountsDatabaseOnStartup flag in the configuration file, this
+// step becomes a no-op.
+//
+func (au *accountUpdates) upgradeDatabaseSchema2(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) {
+ au.vacuumOnStartup = true
+
+ // update version
+ _, err = db.SetUserVersion(ctx, tx, 3)
+ if err != nil {
+ return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 2 to 3: %v", err)
+ }
+ return 3, nil
+}
+
// deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk.
// once all the files have been deleted, it would go ahead and remove the entries from the table.
func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) {
@@ -1492,8 +1551,6 @@ func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label
relCatchpointFileName := filepath.Join("catchpoints", catchpointRoundToPath(committedRound))
absCatchpointFileName := filepath.Join(au.dbDirectory, relCatchpointFileName)
- catchpointWriter := makeCatchpointWriter(absCatchpointFileName, au.dbs.rdb, committedRound, committedRoundDigest, label)
-
more := true
const shortChunkExecutionDuration = 50 * time.Millisecond
const longChunkExecutionDuration = 1 * time.Second
@@ -1504,37 +1561,54 @@ func (au *accountUpdates) generateCatchpoint(committedRound basics.Round, label
default:
chunkExecutionDuration = shortChunkExecutionDuration
}
- for more {
- stepCtx, stepCancelFunction := context.WithTimeout(au.ctx, chunkExecutionDuration)
- writeStepStartTime := time.Now()
- more, err = catchpointWriter.WriteStep(stepCtx)
- // accumulate the actual time we've spent writing in this step.
- catchpointGenerationStats.CPUTime += uint64(time.Now().Sub(writeStepStartTime).Nanoseconds())
- stepCancelFunction()
- if more && err == nil {
- // we just wrote some data, but there is more to be written.
- // go to sleep for while.
- select {
- case <-time.After(100 * time.Millisecond):
- case <-au.ctx.Done():
- retryCatchpointCreation = true
+
+ var catchpointWriter *catchpointWriter
+ err = au.dbs.rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ catchpointWriter = makeCatchpointWriter(au.ctx, absCatchpointFileName, tx, committedRound, committedRoundDigest, label)
+ for more {
+ stepCtx, stepCancelFunction := context.WithTimeout(au.ctx, chunkExecutionDuration)
+ writeStepStartTime := time.Now()
+ more, err = catchpointWriter.WriteStep(stepCtx)
+ // accumulate the actual time we've spent writing in this step.
+ catchpointGenerationStats.CPUTime += uint64(time.Now().Sub(writeStepStartTime).Nanoseconds())
+ stepCancelFunction()
+ if more && err == nil {
+ // we just wrote some data, but there is more to be written.
+ // go to sleep for while.
+ // before going to sleep, extend the transaction timeout so that we won't get warnings:
+ db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(1*time.Second))
+ select {
+ case <-time.After(100 * time.Millisecond):
+ case <-au.ctx.Done():
+ retryCatchpointCreation = true
+ err2 := catchpointWriter.Abort()
+ if err2 != nil {
+ return fmt.Errorf("error removing catchpoint file : %v", err2)
+ }
+ return nil
+ case <-au.catchpointSlowWriting:
+ chunkExecutionDuration = longChunkExecutionDuration
+ }
+ }
+ if err != nil {
+ err = fmt.Errorf("unable to create catchpoint : %v", err)
err2 := catchpointWriter.Abort()
if err2 != nil {
au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
}
return
- case <-au.catchpointSlowWriting:
- chunkExecutionDuration = longChunkExecutionDuration
}
}
- if err != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: unable to create catchpoint : %v", err)
- err2 := catchpointWriter.Abort()
- if err2 != nil {
- au.log.Warnf("accountUpdates: generateCatchpoint: error removing catchpoint file : %v", err2)
- }
- return
- }
+ return
+ })
+
+ if err != nil {
+ au.log.Warnf("accountUpdates: generateCatchpoint: %v", err)
+ return
+ }
+ if catchpointWriter == nil {
+ au.log.Warnf("accountUpdates: generateCatchpoint: nil catchpointWriter")
+ return
}
err = au.saveCatchpointFile(committedRound, relCatchpointFileName, catchpointWriter.GetSize(), catchpointWriter.GetCatchpoint())
@@ -1611,3 +1685,52 @@ func (au *accountUpdates) saveCatchpointFile(round basics.Round, fileName string
}
return
}
+
+// the vacuumDatabase performs a full vacuum of the accounts database.
+func (au *accountUpdates) vacuumDatabase(ctx context.Context) (err error) {
+ if !au.vacuumOnStartup {
+ return
+ }
+
+ startTime := time.Now()
+ vacuumExitCh := make(chan struct{}, 1)
+ vacuumLoggingAbort := sync.WaitGroup{}
+ vacuumLoggingAbort.Add(1)
+ // vacuuming the database can take a while. A long while. We want to have a logging function running in a separate go-routine that would log the progress to the log file.
+ // also, when we're done vacuuming, we should sent an event notifying of the total time it took to vacuum the database.
+ go func() {
+ defer vacuumLoggingAbort.Done()
+ au.log.Infof("Vacuuming accounts database started")
+ for {
+ select {
+ case <-time.After(5 * time.Second):
+ au.log.Infof("Vacuuming accounts database in progress")
+ case <-vacuumExitCh:
+ return
+ }
+ }
+ }()
+
+ vacuumStats, err := au.dbs.wdb.Vacuum(ctx)
+ close(vacuumExitCh)
+ vacuumLoggingAbort.Wait()
+
+ if err != nil {
+ au.log.Warnf("Vacuuming account database failed : %v", err)
+ return err
+ }
+ vacuumElapsedTime := time.Now().Sub(startTime)
+
+ au.log.Infof("Vacuuming accounts database completed within %v, reducing number of pages from %d to %d and size from %d to %d", vacuumElapsedTime, vacuumStats.PagesBefore, vacuumStats.PagesAfter, vacuumStats.SizeBefore, vacuumStats.SizeAfter)
+
+ vacuumTelemetryStats := telemetryspec.BalancesAccountVacuumEventDetails{
+ VacuumTimeNanoseconds: vacuumElapsedTime.Nanoseconds(),
+ BeforeVacuumPageCount: vacuumStats.PagesBefore,
+ AfterVacuumPageCount: vacuumStats.PagesAfter,
+ BeforeVacuumSpaceBytes: vacuumStats.SizeBefore,
+ AfterVacuumSpaceBytes: vacuumStats.SizeAfter,
+ }
+
+ au.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.BalancesAccountVacuumEvent, vacuumTelemetryStats)
+ return
+}
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index a63f0458b..ff198f30f 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -845,3 +845,178 @@ func TestAcctUpdatesDeleteStoredCatchpoints(t *testing.T) {
require.NoError(t, err)
require.Equal(t, 0, len(fileNames))
}
+
+// listAndCompareComb lists the assets/applications and then compares against the expected
+// It repeats with different combinations of the limit parameters
+func listAndCompareComb(t *testing.T, au *accountUpdates, expected map[basics.CreatableIndex]modifiedCreatable) {
+
+ // test configuration parameters
+
+ // pick the second largest index for the app and asset
+ // This is to make sure exactly one element is left out
+ // as a result of max index
+ maxAss1 := basics.CreatableIndex(0)
+ maxAss2 := basics.CreatableIndex(0)
+ maxApp1 := basics.CreatableIndex(0)
+ maxApp2 := basics.CreatableIndex(0)
+ for a, b := range expected {
+ // A moving window of the last two largest indexes: [maxAss1, maxAss2]
+ if b.ctype == basics.AssetCreatable {
+ if maxAss2 < a {
+ maxAss1 = maxAss2
+ maxAss2 = a
+ } else if maxAss1 < a {
+ maxAss1 = a
+ }
+ }
+ if b.ctype == basics.AppCreatable {
+ if maxApp2 < a {
+ maxApp1 = maxApp2
+ maxApp2 = a
+ } else if maxApp1 < a {
+ maxApp1 = a
+ }
+ }
+ }
+
+ // No limits. max asset index, max app index and max results have no effect
+ // This is to make sure the deleted elements do not show up
+ maxAssetIdx := basics.AssetIndex(maxAss2)
+ maxAppIdx := basics.AppIndex(maxApp2)
+ maxResults := uint64(len(expected))
+ listAndCompare(t, maxAssetIdx, maxAppIdx, maxResults, au, expected)
+
+ // Limit with max asset index and max app index (max results has no effect)
+ maxAssetIdx = basics.AssetIndex(maxAss1)
+ maxAppIdx = basics.AppIndex(maxApp1)
+ maxResults = uint64(len(expected))
+ listAndCompare(t, maxAssetIdx, maxAppIdx, maxResults, au, expected)
+
+ // Limit with max results
+ maxResults = 1
+ listAndCompare(t, maxAssetIdx, maxAppIdx, maxResults, au, expected)
+}
+
+// listAndCompareComb lists the assets/applications and then compares against the expected
+// It uses the provided limit parameters
+func listAndCompare(t *testing.T,
+ maxAssetIdx basics.AssetIndex,
+ maxAppIdx basics.AppIndex,
+ maxResults uint64,
+ au *accountUpdates,
+ expected map[basics.CreatableIndex]modifiedCreatable) {
+
+ // get the results with the given parameters
+ assetRes, err := au.ListAssets(maxAssetIdx, maxResults)
+ require.NoError(t, err)
+ appRes, err := au.ListApplications(maxAppIdx, maxResults)
+ require.NoError(t, err)
+
+ // count the expected number of results
+ expectedAssetCount := uint64(0)
+ expectedAppCount := uint64(0)
+ for a, b := range expected {
+ if b.created {
+ if b.ctype == basics.AssetCreatable &&
+ a <= basics.CreatableIndex(maxAssetIdx) &&
+ expectedAssetCount < maxResults {
+ expectedAssetCount++
+ }
+ if b.ctype == basics.AppCreatable &&
+ a <= basics.CreatableIndex(maxAppIdx) &&
+ expectedAppCount < maxResults {
+ expectedAppCount++
+ }
+ }
+ }
+
+ // check the total counts are as expected
+ require.Equal(t, int(expectedAssetCount), len(assetRes))
+ require.Equal(t, int(expectedAppCount), len(appRes))
+
+ // verify the results are correct
+ for _, respCrtor := range assetRes {
+ crtor := expected[respCrtor.Index]
+ require.NotNil(t, crtor)
+ require.Equal(t, basics.AssetCreatable, crtor.ctype)
+ require.Equal(t, true, crtor.created)
+
+ require.Equal(t, basics.AssetCreatable, respCrtor.Type)
+ require.Equal(t, crtor.creator, respCrtor.Creator)
+ }
+ for _, respCrtor := range appRes {
+ crtor := expected[respCrtor.Index]
+ require.NotNil(t, crtor)
+ require.Equal(t, basics.AppCreatable, crtor.ctype)
+ require.Equal(t, true, crtor.created)
+
+ require.Equal(t, basics.AppCreatable, respCrtor.Type)
+ require.Equal(t, crtor.creator, respCrtor.Creator)
+ }
+}
+
+// TestListCreatables tests ListAssets and ListApplications
+// It tests with all elements in cache, all synced to database, and combination of both
+// It also tests the max results, max app index and max asset index
+func TestListCreatables(t *testing.T) {
+
+ // test configuration parameters
+ numElementsPerSegement := 25
+
+ // set up the database
+ dbs, _ := dbOpenTest(t, true)
+ setDbLogging(t, dbs)
+ defer dbs.close()
+
+ tx, err := dbs.wdb.Handle.Begin()
+ require.NoError(t, err)
+ defer tx.Rollback()
+
+ proto := config.Consensus[protocol.ConsensusCurrentVersion]
+
+ accts := make(map[basics.Address]basics.AccountData)
+ err = accountsInit(tx, accts, proto)
+ require.NoError(t, err)
+
+ au := &accountUpdates{}
+ au.accountsq, err = accountsDbInit(tx, tx)
+ require.NoError(t, err)
+
+ // ******* All results are obtained from the cache. Empty database *******
+ // ******* No deletes *******
+ // get random data. Inital batch, no deletes
+ ctbsList, randomCtbs := randomCreatables(numElementsPerSegement)
+ expectedDbImage := make(map[basics.CreatableIndex]modifiedCreatable)
+ ctbsWithDeletes := randomCreatableSampling(1, ctbsList, randomCtbs,
+ expectedDbImage, numElementsPerSegement)
+ // set the cache
+ au.creatables = ctbsWithDeletes
+ listAndCompareComb(t, au, expectedDbImage)
+
+ // ******* All results are obtained from the database. Empty cache *******
+ // ******* No deletes *******
+ // sync with the database
+ var updates map[basics.Address]accountDelta
+ err = accountsNewRound(tx, updates, ctbsWithDeletes)
+ require.NoError(t, err)
+ // nothing left in cache
+ au.creatables = make(map[basics.CreatableIndex]modifiedCreatable)
+ listAndCompareComb(t, au, expectedDbImage)
+
+ // ******* Results are obtained from the database and from the cache *******
+ // ******* No deletes in the database. *******
+ // ******* Data in the database deleted in the cache *******
+ au.creatables = randomCreatableSampling(2, ctbsList, randomCtbs,
+ expectedDbImage, numElementsPerSegement)
+ listAndCompareComb(t, au, expectedDbImage)
+
+ // ******* Results are obtained from the database and from the cache *******
+ // ******* Deletes are in the database and in the cache *******
+ // sync with the database. This has deletes synced to the database.
+ err = accountsNewRound(tx, updates, au.creatables)
+ require.NoError(t, err)
+ // get new creatables in the cache. There will be deletes in the cache from the previous batch.
+ au.creatables = randomCreatableSampling(3, ctbsList, randomCtbs,
+ expectedDbImage, numElementsPerSegement)
+ listAndCompareComb(t, au, expectedDbImage)
+}
diff --git a/ledger/blockdb.go b/ledger/blockdb.go
index 284f9a199..844b5bbd4 100644
--- a/ledger/blockdb.go
+++ b/ledger/blockdb.go
@@ -166,6 +166,11 @@ func blockReplaceIfExists(tx *sql.Tx, log logging.Logger, blk bookkeeping.Block,
newBlk := protocol.Encode(&blk)
newCert := protocol.Encode(&cert)
+ // if the header hasn't been modified, just return.
+ if bytes.Equal(oldHdr[:], newHdr[:]) {
+ return false, nil
+ }
+
// Log if protocol version or certificate changed for the block we're replacing
if newProto != oldProto {
log.Warnf("blockReplaceIfExists(%v): old proto %v != new proto %v", blk.Round(), oldProto, newProto)
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 9756e2340..5427bc095 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -32,7 +32,6 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util/db"
)
const (
@@ -49,9 +48,10 @@ const (
// the writing is complete. It might take multiple steps until the operation is over, and the caller
// has the option of throtteling the CPU utilization in between the calls.
type catchpointWriter struct {
+ ctx context.Context
hasher hash.Hash
innerWriter io.WriteCloser
- dbr db.Accessor
+ tx *sql.Tx
filePath string
file *os.File
gzip *gzip.Writer
@@ -65,6 +65,7 @@ type catchpointWriter struct {
blocksRound basics.Round
blockHeaderDigest crypto.Digest
label string
+ accountsIterator encodedAccountsBatchIter
}
type encodedBalanceRecord struct {
@@ -94,17 +95,20 @@ type catchpointFileBalancesChunk struct {
Balances []encodedBalanceRecord `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
}
-func makeCatchpointWriter(filePath string, dbr db.Accessor, blocksRound basics.Round, blockHeaderDigest crypto.Digest, label string) *catchpointWriter {
+func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, blocksRound basics.Round, blockHeaderDigest crypto.Digest, label string) *catchpointWriter {
return &catchpointWriter{
+ ctx: ctx,
filePath: filePath,
- dbr: dbr,
+ tx: tx,
blocksRound: blocksRound,
blockHeaderDigest: blockHeaderDigest,
label: label,
+ accountsIterator: encodedAccountsBatchIter{orderByAddress: true},
}
}
func (cw *catchpointWriter) Abort() error {
+ cw.accountsIterator.Close()
if cw.tar != nil {
cw.tar.Close()
}
@@ -118,7 +122,7 @@ func (cw *catchpointWriter) Abort() error {
return err
}
-func (cw *catchpointWriter) WriteStep(ctx context.Context) (more bool, err error) {
+func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err error) {
if cw.file == nil {
err = os.MkdirAll(filepath.Dir(cw.filePath), 0700)
if err != nil {
@@ -133,19 +137,19 @@ func (cw *catchpointWriter) WriteStep(ctx context.Context) (more bool, err error
}
// have we timed-out / canceled by that point ?
- if more, err = hasContextDeadlineExceeded(ctx); more == true || err != nil {
+ if more, err = hasContextDeadlineExceeded(stepCtx); more == true || err != nil {
return
}
if cw.fileHeader == nil {
- err = cw.dbr.Atomic(cw.readHeaderFromDatabase)
+ err = cw.readHeaderFromDatabase(cw.ctx, cw.tx)
if err != nil {
return
}
}
// have we timed-out / canceled by that point ?
- if more, err = hasContextDeadlineExceeded(ctx); more == true || err != nil {
+ if more, err = hasContextDeadlineExceeded(stepCtx); more == true || err != nil {
return
}
@@ -168,19 +172,19 @@ func (cw *catchpointWriter) WriteStep(ctx context.Context) (more bool, err error
for {
// have we timed-out / canceled by that point ?
- if more, err = hasContextDeadlineExceeded(ctx); more == true || err != nil {
+ if more, err = hasContextDeadlineExceeded(stepCtx); more == true || err != nil {
return
}
if len(cw.balancesChunk.Balances) == 0 {
- err = cw.dbr.Atomic(cw.readDatabaseStep)
+ err = cw.readDatabaseStep(cw.ctx, cw.tx)
if err != nil {
return
}
}
// have we timed-out / canceled by that point ?
- if more, err = hasContextDeadlineExceeded(ctx); more == true || err != nil {
+ if more, err = hasContextDeadlineExceeded(stepCtx); more == true || err != nil {
return
}
@@ -222,7 +226,7 @@ func (cw *catchpointWriter) WriteStep(ctx context.Context) (more bool, err error
}
func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (err error) {
- cw.balancesChunk.Balances, err = encodedAccountsRange(ctx, tx, cw.balancesOffset, BalancesPerCatchpointFileChunk)
+ cw.balancesChunk.Balances, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk)
if err == nil {
cw.balancesOffset += BalancesPerCatchpointFileChunk
}
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index c64b01af8..b9672da10 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -155,14 +155,20 @@ func TestBasicCatchpointWriter(t *testing.T) {
blocksRound := basics.Round(12345)
blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
- writer := makeCatchpointWriter(fileName, ml.trackerDB().rdb, blocksRound, blockHeaderDigest, catchpointLabel)
- for {
- more, err := writer.WriteStep(context.Background())
- require.NoError(t, err)
- if !more {
- break
+
+ readDb := ml.trackerDB().rdb
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ writer := makeCatchpointWriter(context.Background(), fileName, tx, blocksRound, blockHeaderDigest, catchpointLabel)
+ for {
+ more, err := writer.WriteStep(context.Background())
+ require.NoError(t, err)
+ if !more {
+ break
+ }
}
- }
+ return
+ })
+ require.NoError(t, err)
// load the file from disk.
fileContent, err := ioutil.ReadFile(fileName)
@@ -248,14 +254,19 @@ func TestFullCatchpointWriter(t *testing.T) {
blocksRound := basics.Round(12345)
blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
- writer := makeCatchpointWriter(fileName, ml.trackerDB().rdb, blocksRound, blockHeaderDigest, catchpointLabel)
- for {
- more, err := writer.WriteStep(context.Background())
- require.NoError(t, err)
- if !more {
- break
+ readDb := ml.trackerDB().rdb
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ writer := makeCatchpointWriter(context.Background(), fileName, tx, blocksRound, blockHeaderDigest, catchpointLabel)
+ for {
+ more, err := writer.WriteStep(context.Background())
+ require.NoError(t, err)
+ if !more {
+ break
+ }
}
- }
+ return
+ })
+ require.NoError(t, err)
// create a ledger.
l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, InitState{}, conf)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index b3a4a7968..af7236b18 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -216,6 +216,10 @@ type CatchpointCatchupAccessorProgress struct {
ProcessedBytes uint64
TotalChunks uint64
SeenHeader bool
+
+ // Having the cachedTrie here would help to accelarate the catchup process since the trie maintain an internal cache of nodes.
+ // While rebuilding the trie, we don't want to force and reload (some) of these nodes into the cache for each catchpoint file chunk.
+ cachedTrie *merkletrie.Trie
}
// ProgressStagingBalances deserialize the given bytes as a temporary staging balances
@@ -289,13 +293,19 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wdb := c.ledger.trackerDB().wdb
err = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
// create the merkle trie for the balances
- mc, err0 := makeMerkleCommitter(tx, true)
- if err0 != nil {
- return err0
- }
- trie, err := merkletrie.MakeTrie(mc, trieCachedNodesCount)
+ var mc *merkleCommitter
+ mc, err = makeMerkleCommitter(tx, true)
if err != nil {
- return err
+ return
+ }
+
+ if progress.cachedTrie == nil {
+ progress.cachedTrie, err = merkletrie.MakeTrie(mc, trieCachedNodesCount)
+ if err != nil {
+ return
+ }
+ } else {
+ progress.cachedTrie.SetCommitter(mc)
}
err = writeCatchpointStagingBalances(ctx, tx, balances.Balances)
@@ -307,7 +317,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
var accountData basics.AccountData
err = protocol.Decode(balance.AccountData, &accountData)
if err != nil {
- return err
+ return
}
// if the account has any asset params, it means that it's the creator of an asset.
@@ -315,7 +325,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
for aidx := range accountData.AssetParams {
err = writeCatchpointStagingCreatable(ctx, tx, balance.Address, basics.CreatableIndex(aidx), basics.AssetCreatable)
if err != nil {
- return err
+ return
}
}
}
@@ -324,33 +334,50 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
for aidx := range accountData.AppParams {
err = writeCatchpointStagingCreatable(ctx, tx, balance.Address, basics.CreatableIndex(aidx), basics.AppCreatable)
if err != nil {
- return err
+ return
}
}
}
hash := accountHashBuilder(balance.Address, accountData, balance.AccountData)
- added, err := trie.Add(hash)
+ var added bool
+ added, err = progress.cachedTrie.Add(hash)
if !added {
return fmt.Errorf("CatchpointCatchupAccessorImpl::processStagingBalances: The provided catchpoint file contained the same account more than once. Account address %#v, account data %#v, hash '%s'", balance.Address, accountData, hex.EncodeToString(hash))
}
if err != nil {
- return err
+ return
}
}
- err = trie.Commit()
- if err != nil {
- return
- }
+
+ // periodically, perform commit & evict to flush it to the disk and rebalance the cache memory utilization.
+ err = progress.EvictAsNeeded(uint64(len(balances.Balances)))
return
})
if err == nil {
progress.ProcessedAccounts += uint64(len(balances.Balances))
progress.ProcessedBytes += uint64(len(bytes))
}
+ // not strictly required, but clean up the pointer in case of either a failuire or when we're done.
+ if err != nil || progress.ProcessedAccounts == progress.TotalAccounts {
+ progress.cachedTrie = nil
+ }
return err
}
+// EvictAsNeeded calls Evict on the cachedTrie priodically, or once we're done updating the trie.
+func (progress *CatchpointCatchupAccessorProgress) EvictAsNeeded(balancesCount uint64) (err error) {
+ if progress.cachedTrie == nil {
+ return nil
+ }
+ // periodically, perform commit & evict to flush it to the disk and rebalance the cache memory utilization.
+ if (progress.ProcessedAccounts/trieRebuildCommitFrequency) < ((progress.ProcessedAccounts+balancesCount)/trieRebuildCommitFrequency) ||
+ (progress.ProcessedAccounts+balancesCount) == progress.TotalAccounts {
+ _, err = progress.cachedTrie.Evict(true)
+ }
+ return
+}
+
// GetCatchupBlockRound returns the latest block round matching the current catchpoint
func (c *CatchpointCatchupAccessorImpl) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) {
var iRound uint64
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index ba1c79ea6..741f7a285 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -305,3 +305,21 @@ type CatchpointGenerationEventDetails struct {
// CatchpointLabel is the catchpoint label for which the catchpoint file was generated.
CatchpointLabel string
}
+
+// BalancesAccountVacuumEvent event
+const BalancesAccountVacuumEvent Event = "VacuumBalances"
+
+// BalancesAccountVacuumEventDetails is generated once the balances account get vacuumed, and provides
+// some statistics about that event.
+type BalancesAccountVacuumEventDetails struct {
+ // VacuumTimeNanoseconds is the total amount of time, in nanoseconds, that the vacuum operation took
+ VacuumTimeNanoseconds int64
+ // BeforeVacuumPageCount is the number of pages that the balances database had prior of running the vacuuming process.
+ BeforeVacuumPageCount uint64
+ // AfterVacuumPageCount is the number of pages that the balances database had after running the vacuuming process.
+ AfterVacuumPageCount uint64
+ // BeforeVacuumSpaceBytes is the number of bytes used by the database prior of running the vacuuming process.
+ BeforeVacuumSpaceBytes uint64
+ // AfterVacuumSpaceBytes is the number of bytes used by the database after running the vacuuming process.
+ AfterVacuumSpaceBytes uint64
+}
diff --git a/network/requestTracker.go b/network/requestTracker.go
index 5fae77ce7..5e25dbf12 100644
--- a/network/requestTracker.go
+++ b/network/requestTracker.go
@@ -46,10 +46,12 @@ type TrackerRequest struct {
otherTelemetryGUID string
otherInstanceName string
otherPublicAddr string
+ connection net.Conn
+ noPrune bool
}
// makeTrackerRequest creates a new TrackerRequest.
-func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime time.Time) *TrackerRequest {
+func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime time.Time, conn net.Conn) *TrackerRequest {
if remoteHost == "" {
remoteHost, remotePort, _ = net.SplitHostPort(remoteAddr)
}
@@ -59,13 +61,15 @@ func makeTrackerRequest(remoteAddr, remoteHost, remotePort string, createTime ti
remoteAddr: remoteAddr,
remoteHost: remoteHost,
remotePort: remotePort,
+ connection: conn,
}
}
// hostIncomingRequests holds all the requests that are originating from a single host.
type hostIncomingRequests struct {
- remoteHost string
- requests []*TrackerRequest // this is an ordered list, according to the requestsHistory.created
+ remoteHost string
+ requests []*TrackerRequest // this is an ordered list, according to the requestsHistory.created
+ additionalHostRequests map[*TrackerRequest]struct{} // additional requests that aren't included in the "requests", and always assumed to be "alive".
}
// findTimestampIndex finds the first an index (i) in the sorted requests array, where requests[i].created is greater than t.
@@ -80,6 +84,45 @@ func (ard *hostIncomingRequests) findTimestampIndex(t time.Time) int {
return i
}
+// convertToAdditionalRequest converts the given trackerRequest into a "additional request".
+// unlike regular tracker requests, additional requests does not get pruned.
+func (ard *hostIncomingRequests) convertToAdditionalRequest(trackerRequest *TrackerRequest) {
+ if _, has := ard.additionalHostRequests[trackerRequest]; has {
+ return
+ }
+
+ i := sort.Search(len(ard.requests), func(i int) bool {
+ return ard.requests[i].created.After(trackerRequest.created)
+ })
+ i--
+ if i < 0 {
+ return
+ }
+ // we could have several entries with the same timestamp, so we need to consider all of them.
+ for ; i >= 0; i-- {
+ if ard.requests[i] == trackerRequest {
+ break
+ }
+ if ard.requests[i].created != trackerRequest.created {
+ // we can't find the item in the list.
+ return
+ }
+ }
+ if i < 0 {
+ return
+ }
+ // ok, item was found at index i.
+ copy(ard.requests[i:], ard.requests[i+1:])
+ ard.requests[len(ard.requests)-1] = nil
+ ard.requests = ard.requests[:len(ard.requests)-1]
+ ard.additionalHostRequests[trackerRequest] = struct{}{}
+}
+
+// removeTrackedConnection removes a trackerRequest from the additional requests map
+func (ard *hostIncomingRequests) removeTrackedConnection(trackerRequest *TrackerRequest) {
+ delete(ard.additionalHostRequests, trackerRequest)
+}
+
// add adds the trackerRequest at the correct index within the sorted array.
func (ard *hostIncomingRequests) add(trackerRequest *TrackerRequest) {
// find the new item index.
@@ -102,7 +145,7 @@ func (ard *hostIncomingRequests) add(trackerRequest *TrackerRequest) {
// countConnections counts the number of connection that we have that occured after the provided specified time
func (ard *hostIncomingRequests) countConnections(rateLimitingWindowStartTime time.Time) (count uint) {
i := ard.findTimestampIndex(rateLimitingWindowStartTime)
- return uint(len(ard.requests) - i)
+ return uint(len(ard.requests) - i + len(ard.additionalHostRequests))
}
type hostsIncomingMap map[string]*hostIncomingRequests
@@ -136,8 +179,9 @@ func (him *hostsIncomingMap) addRequest(trackerRequest *TrackerRequest) {
requestData, has := (*him)[trackerRequest.remoteHost]
if !has {
requestData = &hostIncomingRequests{
- remoteHost: trackerRequest.remoteHost,
- requests: make([]*TrackerRequest, 0, 1),
+ remoteHost: trackerRequest.remoteHost,
+ requests: make([]*TrackerRequest, 0, 1),
+ additionalHostRequests: make(map[*TrackerRequest]struct{}),
}
(*him)[trackerRequest.remoteHost] = requestData
}
@@ -153,6 +197,24 @@ func (him *hostsIncomingMap) countOriginConnections(remoteHost string, rateLimit
return 0
}
+// convertToAdditionalRequest converts the given trackerRequest into a "additional request".
+func (him *hostsIncomingMap) convertToAdditionalRequest(trackerRequest *TrackerRequest) {
+ requestData, has := (*him)[trackerRequest.remoteHost]
+ if !has {
+ return
+ }
+ requestData.convertToAdditionalRequest(trackerRequest)
+}
+
+// removeTrackedConnection removes a trackerRequest from the additional requests map
+func (him *hostsIncomingMap) removeTrackedConnection(trackerRequest *TrackerRequest) {
+ requestData, has := (*him)[trackerRequest.remoteHost]
+ if !has {
+ return
+ }
+ requestData.removeTrackedConnection(trackerRequest)
+}
+
// RequestTracker tracks the incoming request connections
type RequestTracker struct {
downstreamHandler http.Handler
@@ -185,6 +247,25 @@ func makeRequestsTracker(downstreamHandler http.Handler, log logging.Logger, con
}
}
+// requestTrackedConnection used to track the active connections. In particular, it used to remove the
+// tracked connection entry from the RequestTracker once a connection is closed.
+type requestTrackedConnection struct {
+ net.Conn
+ tracker *RequestTracker
+}
+
+// Close removes the connection from the tracker's connections map and call the underlaying Close function.
+func (c *requestTrackedConnection) Close() error {
+ c.tracker.hostRequestsMu.Lock()
+ trackerRequest := c.tracker.acceptedConnections[c.Conn.LocalAddr()]
+ delete(c.tracker.acceptedConnections, c.Conn.LocalAddr())
+ if trackerRequest != nil {
+ c.tracker.hostRequests.removeTrackedConnection(trackerRequest)
+ }
+ c.tracker.hostRequestsMu.Unlock()
+ return c.Conn.Close()
+}
+
// Accept waits for and returns the next connection to the listener.
func (rt *RequestTracker) Accept() (conn net.Conn, err error) {
// the following for loop is a bit tricky :
@@ -196,7 +277,7 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) {
return
}
- trackerRequest := makeTrackerRequest(conn.RemoteAddr().String(), "", "", time.Now())
+ trackerRequest := makeTrackerRequest(conn.RemoteAddr().String(), "", "", time.Now(), conn)
rateLimitingWindowStartTime := trackerRequest.created.Add(-time.Duration(rt.config.ConnectionsRateLimitingWindowSeconds) * time.Second)
rt.hostRequestsMu.Lock()
@@ -232,6 +313,7 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) {
// add an entry to the acceptedConnections so that the ServeHTTP could find the connection quickly.
rt.acceptedConnections[conn.LocalAddr()] = trackerRequest
rt.hostRequestsMu.Unlock()
+ conn = &requestTrackedConnection{Conn: conn, tracker: rt}
return
}
}
@@ -255,10 +337,11 @@ func (rt *RequestTracker) sendBlockedConnectionResponse(conn net.Conn, requestTi
}
// pruneAcceptedConnections clean stale items form the acceptedConnections map; it's syncornized via the acceptedConnectionsMu mutex which is expected to be taken by the caller.
+// in case the created is 0, the pruning is disabled for this connection. The HTTP handlers would call Close to have this entry cleared out.
func (rt *RequestTracker) pruneAcceptedConnections(pruneStartDate time.Time) {
localAddrToRemove := []net.Addr{}
for localAddr, request := range rt.acceptedConnections {
- if request.created.Before(pruneStartDate) {
+ if request.noPrune == false && request.created.Before(pruneStartDate) {
localAddrToRemove = append(localAddrToRemove, localAddr)
}
}
@@ -292,6 +375,14 @@ func (rt *RequestTracker) GetTrackedRequest(request *http.Request) (trackedReque
return rt.httpConnections[localAddr]
}
+// GetRequestConnection return the underlying connection for the given request
+func (rt *RequestTracker) GetRequestConnection(request *http.Request) net.Conn {
+ rt.httpConnectionsMu.Lock()
+ defer rt.httpConnectionsMu.Unlock()
+ localAddr := request.Context().Value(http.LocalAddrContextKey).(net.Addr)
+ return rt.httpConnections[localAddr].connection
+}
+
func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.Request) {
// this function is called only after we've fetched all the headers. on some malicious clients, this could get delayed, so we can't rely on the
// tcp-connection established time to align with current time.
@@ -302,16 +393,20 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http.
rt.hostRequestsMu.Lock()
trackedRequest := rt.acceptedConnections[localAddr]
- delete(rt.acceptedConnections, localAddr)
if trackedRequest != nil {
+ // update the original tracker request so that it won't get pruned.
+ if trackedRequest.noPrune == false {
+ trackedRequest.noPrune = true
+ rt.hostRequests.convertToAdditionalRequest(trackedRequest)
+ }
// create a copy, so we can unlock
- trackedRequest = makeTrackerRequest(trackedRequest.remoteAddr, trackedRequest.remoteHost, trackedRequest.remotePort, trackedRequest.created)
+ trackedRequest = makeTrackerRequest(trackedRequest.remoteAddr, trackedRequest.remoteHost, trackedRequest.remotePort, trackedRequest.created, trackedRequest.connection)
}
rt.hostRequestsMu.Unlock()
// we have no request tracker ? no problem; create one on the fly.
if trackedRequest == nil {
- trackedRequest = makeTrackerRequest(request.RemoteAddr, "", "", time.Now())
+ trackedRequest = makeTrackerRequest(request.RemoteAddr, "", "", time.Now(), nil)
}
// update the origin address.
diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go
index d0169c34e..923c49ad2 100644
--- a/network/requestTracker_test.go
+++ b/network/requestTracker_test.go
@@ -45,7 +45,7 @@ func TestHostIncomingRequestsOrdering(t *testing.T) {
now := time.Now()
perm := rand.Perm(100)
for i := 0; i < 100; i++ {
- trackedRequest := makeTrackerRequest("remoteaddr", "host", "port", now.Add(time.Duration(perm[i])*time.Minute))
+ trackedRequest := makeTrackerRequest("remoteaddr", "host", "port", now.Add(time.Duration(perm[i])*time.Minute), nil)
hir.add(trackedRequest)
}
require.Equal(t, 100, len(hir.requests))
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 93e8d2bd9..af836e282 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -185,6 +185,10 @@ type GossipNode interface {
// arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar
// characteristics as with a watchdog timer.
OnNetworkAdvance()
+
+ // GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same
+ // request that was provided to the http handler ( or provide a fallback Context() to that )
+ GetHTTPRequestConnection(request *http.Request) (conn net.Conn)
}
// IncomingMessage represents a message arriving from some peer in our p2p network
@@ -947,6 +951,17 @@ func (wn *WebsocketNetwork) checkIncomingConnectionVariables(response http.Respo
return http.StatusOK
}
+// GetHTTPRequestConnection returns the underlying connection for the given request. Note that the request must be the same
+// request that was provided to the http handler ( or provide a fallback Context() to that )
+// if the provided request has no associated connection, it returns nil. ( this should not happen for any http request that was registered
+// by WebsocketNetwork )
+func (wn *WebsocketNetwork) GetHTTPRequestConnection(request *http.Request) (conn net.Conn) {
+ if wn.requestsTracker != nil {
+ conn = wn.requestsTracker.GetRequestConnection(request)
+ }
+ return
+}
+
// ServerHTTP handles the gossip network functions over websockets
func (wn *WebsocketNetwork) ServeHTTP(response http.ResponseWriter, request *http.Request) {
trackedRequest := wn.requestsTracker.GetTrackedRequest(request)
diff --git a/node/node.go b/node/node.go
index 91bdab6f1..6cc6c1f97 100644
--- a/node/node.go
+++ b/node/node.go
@@ -98,7 +98,6 @@ type AlgorandFullNode struct {
transactionPool *pools.TransactionPool
txHandler *data.TxHandler
accountManager *data.AccountManager
- feeTracker *pools.FeeTracker
agreementService *agreement.Service
catchupService *catchup.Service
@@ -204,11 +203,6 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
}
node.ledger.RegisterBlockListeners(blockListeners)
node.txHandler = data.MakeTxHandler(node.transactionPool, node.ledger, node.net, node.genesisID, node.genesisHash, node.lowPriorityCryptoVerificationPool)
- node.feeTracker, err = pools.MakeFeeTracker()
- if err != nil {
- log.Error(err)
- return nil, err
- }
// Indexer setup
if cfg.IsIndexerActive && cfg.Archival {
@@ -664,7 +658,7 @@ func (node *AlgorandFullNode) PoolStats() PoolStats {
// SuggestedFee returns the suggested fee per byte recommended to ensure a new transaction is processed in a timely fashion.
// Caller should set fee to max(MinTxnFee, SuggestedFee() * len(encoded SignedTxn))
func (node *AlgorandFullNode) SuggestedFee() basics.MicroAlgos {
- return node.feeTracker.EstimateFee()
+ return basics.MicroAlgos{Raw: node.transactionPool.FeePerByte()}
}
// GetPendingTxnsFromPool returns a snapshot of every pending transactions from the node's transaction pool in a slice.
@@ -761,9 +755,6 @@ func (node *AlgorandFullNode) IsArchival() bool {
// OnNewBlock implements the BlockListener interface so we're notified after each block is written to the ledger
func (node *AlgorandFullNode) OnNewBlock(block bookkeeping.Block, delta ledger.StateDelta) {
- // Update fee tracker
- node.feeTracker.ProcessBlock(block)
-
node.mu.Lock()
node.lastRoundTimestamp = time.Now()
node.hasSyncedSinceStartup = true
diff --git a/package-deploy.yaml b/package-deploy.yaml
index a243dfa9f..2206ed4a7 100644
--- a/package-deploy.yaml
+++ b/package-deploy.yaml
@@ -1,53 +1,41 @@
-tasks:
- - task: docker.Version
- configFilePath: scripts/configure_dev-deps.sh
-
- - task: shell.docker.Ensure
- name: deb
- image: algorand/mule-linux-ubuntu
- version: '{{ docker.Version.outputs.version }}'
+agents:
+ - name: deb
dockerFilePath: docker/build/docker.ubuntu.Dockerfile
- dependencies: docker.Version
-
- - task: shell.docker.Ensure
- name: rpm
- image: algorand/mule-linux-centos
- version: '{{ docker.Version.outputs.version }}'
+ image: algorand/mule-linux-ubuntu
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ env:
+ - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
+ - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ workDir: $HOME/projects/go-algorand
+
+ - name: rpm
dockerFilePath: docker/build/mule.go.centos.Dockerfile
- dependencies: docker.Version
+ image: algorand/mule-linux-centos
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ env:
+ - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
+ - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
+ volumes:
+ - $HOME/packages:/root/packages,
+ - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
+ - $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
+ workDir: $HOME/projects/go-algorand
+tasks:
- task: docker.Make
name: deb
- docker:
- image: algorand/mule-linux-ubuntu
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [
- $HOME/packages:/root/packages,
- $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
- ]
+ agent: deb
target: mule-deploy-deb
- task: docker.Make
name: rpm
- docker:
- image: algorand/mule-linux-centos
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [
- $HOME/packages:/root/packages,
- $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
- ]
+ agent: rpm
target: mule-deploy-rpm
- task: s3.BucketCopy
@@ -79,11 +67,7 @@ tasks:
jobs:
package-deploy:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.deb
- - shell.docker.Ensure.rpm
- docker.Make.deb
- docker.Make.rpm
diff --git a/package-sign.yaml b/package-sign.yaml
index 70e5b04d8..bb3234001 100644
--- a/package-sign.yaml
+++ b/package-sign.yaml
@@ -1,72 +1,37 @@
-tasks:
- - task: docker.Version
- configFilePath: scripts/configure_dev-deps.sh
-
- - task: shell.docker.Ensure
- name: mule-debian
- image: algorand/go-algorand-ci-mule-debian
- version: '{{ docker.Version.outputs.version }}'
+agents:
+ - name: deb
dockerFilePath: docker/build/mule.go.debian.Dockerfile
- dependencies: docker.Version
+ image: algorand/go-algorand-ci-mule-debian
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ env:
+ - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
+ - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
+ volumes:
+ - $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
+ - $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
+ workDir: $HOME/projects/go-algorand
+tasks:
- task: docker.Make
name: package-sign-deb
- docker:
- image: algorand/go-algorand-ci-mule-debian
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [
- $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
- ]
+ agent: deb
target: mule-sign-deb
- task: docker.Make
name: package-sign-rpm
- docker:
- image: algorand/go-algorand-ci-mule-debian
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [
- $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
- ]
+ agent: deb
target: mule-sign-rpm
- task: docker.Make
name: package-sign-tarball
- docker:
- image: algorand/go-algorand-ci-mule-debian
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [
- $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
- ]
+ agent: deb
target: mule-sign-tar.gz
- task: docker.Make
name: package-sign-source
- docker:
- image: algorand/go-algorand-ci-mule-debian
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- volumes: [
- $XDG_RUNTIME_DIR/gnupg/S.gpg-agent:/root/.gnupg/S.gpg-agent,
- $HOME/.gnupg/pubring.kbx:/root/.gnupg/pubring.kbx
- ]
+ agent: deb
target: mule-sign-source
- task: s3.DownloadFile
@@ -90,10 +55,7 @@ tasks:
jobs:
package-sign:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.mule-debian
- docker.Make.package-sign-deb
- docker.Make.package-sign-rpm
- docker.Make.package-sign-tarball
diff --git a/package-test.yaml b/package-test.yaml
index f2718ab37..511974c77 100644
--- a/package-test.yaml
+++ b/package-test.yaml
@@ -1,38 +1,26 @@
-tasks:
- - task: docker.Version
- configFilePath: scripts/configure_dev-deps.sh
-
- - task: shell.docker.Ensure
- name: package
- image: algorand/mule-linux-debian
- version: '{{ docker.Version.outputs.version }}'
+agents:
+ - name: deb
dockerFilePath: docker/build/mule.go.debian.Dockerfile
- dependencies: docker.Version
+ image: algorand/mule-linux-debian
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ env:
+ - AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
+ - AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ workDir: $HOME/projects/go-algorand
+tasks:
- task: docker.Make
name: package-test-deb
- docker:
- image: algorand/mule-linux-debian
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [ /var/run/docker.sock:/var/run/docker.sock ]
+ agent: deb
target: mule-test-deb
- task: docker.Make
name: package-test-rpm
- docker:
- image: algorand/mule-linux-debian
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- env: [
- AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID,
- AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY
- ]
- volumes: [ /var/run/docker.sock:/var/run/docker.sock ]
+ agent: deb
target: mule-test-rpm
- task: s3.DownloadFile
@@ -49,24 +37,15 @@ tasks:
jobs:
package-test-deb:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.package
- docker.Make.package-test-deb
package-test-rpm:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.package
- docker.Make.package-test-rpm
package-test:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.package
- docker.Make.package-test-deb
- docker.Make.package-test-rpm
diff --git a/package.yaml b/package.yaml
index 2ac7dffbb..767a69461 100644
--- a/package.yaml
+++ b/package.yaml
@@ -1,75 +1,57 @@
-tasks:
- - task: docker.Version
- configFilePath: scripts/configure_dev-deps.sh
-
- - task: shell.docker.Ensure
- name: docker-ubuntu
- image: algorand/go-algorand-docker-linux-ubuntu
- version: '{{ docker.Version.outputs.version }}'
- dockerFilePath: docker/build/docker.ubuntu.Dockerfile
- dependencies: docker.Version
+agents:
+ - name: deb
+ dockerFilePath: docker/build/cicd.ubuntu.Dockerfile
+ image: algorand/go-algorand-ci-linux-ubuntu
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ workDir: $HOME/projects/go-algorand
- - task: shell.docker.Ensure
- name: rpm
- image: algorand/go-algorand-ci-linux-centos
- version: '{{ docker.Version.outputs.version }}'
+ - name: rpm
dockerFilePath: docker/build/cicd.centos.Dockerfile
- dependencies: docker.Version
+ image: algorand/go-algorand-ci-linux-centos
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ workDir: $HOME/projects/go-algorand
- - task: shell.docker.Ensure
- name: deb
- image: algorand/go-algorand-ci-linux-ubuntu
- version: '{{ docker.Version.outputs.version }}'
- dockerFilePath: docker/build/cicd.ubuntu.Dockerfile
- dependencies: docker.Version
+ - name: docker-ubuntu
+ dockerFilePath: docker/build/docker.ubuntu.Dockerfile
+ image: algorand/go-algorand-docker-linux-ubuntu
+ version: scripts/configure_dev-deps.sh
+ buildArgs:
+ - GOLANG_VERSION=`./scripts/get_golang_version.sh`
+ volumes:
+ - /var/run/docker.sock:/var/run/docker.sock
+ workDir: $HOME/projects/go-algorand
+tasks:
- task: docker.Make
name: build
- docker:
- image: algorand/go-algorand-ci-linux-centos
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
target: ci-build
- task: docker.Make
name: rpm
- docker:
- image: algorand/go-algorand-ci-linux-centos
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
+ agent: rpm
target: mule-package-rpm
- task: docker.Make
name: deb
- docker:
- image: algorand/go-algorand-ci-linux-ubuntu
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
+ agent: deb
target: mule-package-deb
- task: docker.Make
name: docker-image
- docker:
- image: algorand/go-algorand-docker-linux-ubuntu
- version: '{{ docker.Version.outputs.version }}'
- workDir: /projects/go-algorand
- volumes: [ /var/run/docker.sock:/var/run/docker.sock ]
+ agent: docker-ubuntu
target: mule-package-docker
jobs:
package:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.rpm
- docker.Make.rpm
- - shell.docker.Ensure.deb
- docker.Make.deb
package-docker:
- configs:
- arch: amd64
tasks:
- - shell.docker.Ensure.docker-ubuntu
- docker.Make.docker-image
diff --git a/rpcs/ledgerService.go b/rpcs/ledgerService.go
index 3e7c4eb22..5093eb470 100644
--- a/rpcs/ledgerService.go
+++ b/rpcs/ledgerService.go
@@ -25,6 +25,7 @@ import (
"strings"
"sync"
"sync/atomic"
+ "time"
"github.com/gorilla/mux"
@@ -36,14 +37,25 @@ import (
"github.com/algorand/go-algorand/network"
)
-// LedgerResponseContentType is the HTTP Content-Type header for a raw ledger block
-const LedgerResponseContentType = "application/x-algorand-ledger-v2.1"
+const (
+ // LedgerResponseContentType is the HTTP Content-Type header for a raw ledger block
+ LedgerResponseContentType = "application/x-algorand-ledger-v2.1"
-const ledgerServerMaxBodyLength = 512 // we don't really pass meaningful content here, so 512 bytes should be a safe limit
+ ledgerServerMaxBodyLength = 512 // we don't really pass meaningful content here, so 512 bytes should be a safe limit
-// LedgerServiceLedgerPath is the path to register LedgerService as a handler for when using gorilla/mux
-// e.g. .Handle(LedgerServiceLedgerPath, &ls)
-const LedgerServiceLedgerPath = "/v{version:[0-9.]+}/{genesisID}/ledger/{round:[0-9a-z]+}"
+ // LedgerServiceLedgerPath is the path to register LedgerService as a handler for when using gorilla/mux
+ // e.g. .Handle(LedgerServiceLedgerPath, &ls)
+ LedgerServiceLedgerPath = "/v{version:[0-9.]+}/{genesisID}/ledger/{round:[0-9a-z]+}"
+
+ // maxCatchpointFileSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk.
+ maxCatchpointFileSize = 512 * 1024 * 1024 // 512MB
+
+ // expectedWorstDownloadSpeedBytesPerSecond defines the worst-case scenario upload speed we expect to get while uploading a catchpoint file
+ expectedWorstDownloadSpeedBytesPerSecond = 200 * 1024
+
+ // maxCatchpointFileChunkDownloadDuration is the maximum amount of time we would wait to download a single chunk off a catchpoint file
+ maxCatchpointFileWritingDuration = 2*time.Minute + maxCatchpointFileSize*time.Second/expectedWorstDownloadSpeedBytesPerSecond
+)
// LedgerService represents the Ledger RPC API
type LedgerService struct {
@@ -165,6 +177,11 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
response.Write([]byte(fmt.Sprintf("specified round number could not be parsed using base 36 : %v", err)))
return
}
+ if conn := ls.net.GetHTTPRequestConnection(request); conn != nil {
+ conn.SetWriteDeadline(time.Now().Add(maxCatchpointFileWritingDuration))
+ } else {
+ logging.Base().Warnf("LedgerService.ServeHTTP unable to set connection timeout")
+ }
cs, err := ls.ledger.GetCatchpointStream(basics.Round(round))
if err != nil {
switch err.(type) {
@@ -186,16 +203,22 @@ func (ls *LedgerService) ServeHTTP(response http.ResponseWriter, request *http.R
requestedCompressedResponse := strings.Contains(request.Header.Get("Accept-Encoding"), "gzip")
if requestedCompressedResponse {
response.Header().Set("Content-Encoding", "gzip")
- io.Copy(response, cs)
+ written, err := io.Copy(response, cs)
+ if err != nil {
+ logging.Base().Infof("LedgerService.ServeHTTP : unable to write compressed catchpoint file for round %d, written bytes %d : %v", round, written, err)
+ }
return
}
decompressedGzip, err := gzip.NewReader(cs)
if err != nil {
- logging.Base().Warnf("ServeHTTP : failed to decompress catchpoint %d %v", round, err)
+ logging.Base().Warnf("LedgerService.ServeHTTP : failed to decompress catchpoint %d %v", round, err)
response.WriteHeader(http.StatusInternalServerError)
response.Write([]byte(fmt.Sprintf("catchpoint file for round %d could not be decompressed due to internal error : %v", round, err)))
return
}
defer decompressedGzip.Close()
- io.Copy(response, decompressedGzip)
+ written, err := io.Copy(response, decompressedGzip)
+ if err != nil {
+ logging.Base().Infof("LedgerService.ServeHTTP : unable to write decompressed catchpoint file for round %d, written bytes %d : %v", round, written, err)
+ }
}
diff --git a/scripts/release/mule/package/rpm/package.sh b/scripts/release/mule/package/rpm/package.sh
index 4aae0bad4..5bf71facd 100755
--- a/scripts/release/mule/package/rpm/package.sh
+++ b/scripts/release/mule/package/rpm/package.sh
@@ -4,16 +4,16 @@ set -ex
echo "Building RPM package"
-REPO_DIR=/projects/go-algorand
+REPO_DIR=$(pwd)
ARCH=$(./scripts/archtype.sh)
OS_TYPE=$(./scripts/ostype.sh)
FULLVERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)}
BRANCH=${BRANCH:-$(git rev-parse --abbrev-ref HEAD)}
CHANNEL=${CHANNEL:-$(./scripts/compute_branch_channel.sh "$BRANCH")}
-ALGO_BIN="$REPO_DIR/tmp/node_pkgs/$OS_TYPE/$ARCH/$CHANNEL/${OS_TYPE}-${ARCH}/bin"
+ALGO_BIN="$REPO_DIR/tmp/node_pkgs/$OS_TYPE/$ARCH/$CHANNEL/$OS_TYPE-$ARCH/bin"
# TODO: Should there be a default network?
DEFAULTNETWORK=devnet
-DEFAULT_RELEASE_NETWORK=$(./scripts/compute_branch_release_network.sh "${DEFAULTNETWORK}")
+DEFAULT_RELEASE_NETWORK=$(./scripts/compute_branch_release_network.sh "$DEFAULTNETWORK")
PKG_NAME=$(./scripts/compute_package_name.sh "${CHANNEL:-stable}")
# The following need to be exported for use in ./go-algorand/installer/rpm/algorand.spec.
@@ -28,7 +28,7 @@ trap 'rm -rf $RPMTMP' 0
TEMPDIR=$(mktemp -d)
trap 'rm -rf $TEMPDIR' 0
< "./installer/rpm/algorand.spec" \
- sed -e "s,@PKG_NAME@,${PKG_NAME}," \
+ sed -e "s,@PKG_NAME@,$PKG_NAME," \
-e "s,@VER@,$FULLVERSION," \
> "$TEMPDIR/algorand.spec"
diff --git a/test/README.md b/test/README.md
new file mode 100644
index 000000000..096e18071
--- /dev/null
+++ b/test/README.md
@@ -0,0 +1,51 @@
+# End to end tests
+
+This directory contains the category of tests which we like to call "end to end". Primarily they consist of tests which first start a private network and then run a series of commands against that network.
+
+These tests have grown since the project started and we have a number of different frameworks. There are a number of different tests, frameworks and tools in this directory.
+
+
+# Directories
+## Tests / Test Frameworks
+
+* scripts - shell scripted integration test framework.
+* e2e-go - tests that can be run with the `go test`.
+* framework - functions and utilities used by the e2e-go tests.
+* release-testing - a spot for specific release tests, see README files in subdirectories.
+* muleCI - scripts run tests on a Jenkins server with mule
+* packages - test that algod can be packaged on different docker environments.
+* platform - test the algod amd64 package compatibility across different distributions.
+
+## Tools / Data
+* commandandcontrol - a remote control tool for algod. It allows you to manage many instances across many nodes.
+* netperf-go - tools for semi-automated performance tests.
+* testdata - datasets used by other tools not included in this repository.
+
+# Scripts
+
+Entry point to our integration test framework, including the e2e-go tests.
+
+Must run from the root project directory, `./test/scripts/e2e.sh`
+
+## scripts/e2e_client_runner.py and scripts/e2e_subs/
+
+These tests are shell scripts which all run in series against a single private network.
+
+Each script is provided with a wallet which contains a large supply of algos to use during the test.
+```
+usage: e2e_client_runner.py [-h] [--keep-temps] [--timeout TIMEOUT] [--verbose] [scripts [scripts ...]]
+
+positional arguments:
+ scripts scripts to run
+
+optional arguments:
+ -h, --help show this help message and exit
+ --keep-temps if set, keep all the test files
+ --timeout TIMEOUT integer seconds to wait for the scripts to run
+ --verbose
+```
+
+To run a specific test:
+```
+~$ ./e2e_client_runner.py full/path/to/test_script.sh
+```
diff --git a/test/scripts/e2e_subs/e2e-app-bootloader.sh b/test/scripts/e2e_subs/e2e-app-bootloader.sh
index cb90d3bfb..170a96f09 100755
--- a/test/scripts/e2e_subs/e2e-app-bootloader.sh
+++ b/test/scripts/e2e_subs/e2e-app-bootloader.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-date '+keyreg-teal-test start %Y%m%d_%H%M%S'
+date '+app-bootloader-test start %Y%m%d_%H%M%S'
set -e
set -x
diff --git a/test/scripts/e2e_subs/e2e-app-delete-clear.sh b/test/scripts/e2e_subs/e2e-app-delete-clear.sh
new file mode 100755
index 000000000..d46c1d298
--- /dev/null
+++ b/test/scripts/e2e_subs/e2e-app-delete-clear.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+
+date '+app-delete-clear-test start %Y%m%d_%H%M%S'
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+# approval program
+printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+PROGRAM_FILE="${TEMPDIR}/simple.teal"
+
+GLOBAL_INTS=2
+
+# Succeed in creating app with on-completion delete
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --on-completion "DeleteApplication" --approval-prog "${PROGRAM_FILE}" --clear-prog "${PROGRAM_FILE}" --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+# Check that the app is not created
+APPID_CHECK=$(${gcmd} app info --app-id $APPID 2>&1 || true)
+EXPERROR="application does not exist"
+if [[ $APPID_CHECK != *"${EXPERROR}"* ]]; then
+ date '+app-create-delete-test FAIL the deleted application should not exist %Y%m%d_%H%M%S'
+ false
+fi
+
+# Fail if creating app with on-completion clear
+RES=$(${gcmd} app create --creator ${ACCOUNT} --on-completion "ClearState" --approval-prog "${PROGRAM_FILE}" --clear-prog "${PROGRAM_FILE}" --global-byteslices 0 --global-ints ${GLOBAL_INTS} --local-byteslices 0 --local-ints 0 2>&1 || true )
+EXPERROR1='cannot clear state for app'
+EXPERROR2='is not currently opted in'
+if [[ $RES != *"${EXPERROR1}"*"${EXPERROR2}"* ]]; then
+ date '+app-create-clear FAIL should fail to create app with on-completion ClearState %Y%m%d_%H%M%S'
+ false
+fi
+
diff --git a/test/scripts/e2e_subs/e2e-app-real-assets-round.sh b/test/scripts/e2e_subs/e2e-app-real-assets-round.sh
index 6d2b8ed0a..6793d6bb5 100755
--- a/test/scripts/e2e_subs/e2e-app-real-assets-round.sh
+++ b/test/scripts/e2e_subs/e2e-app-real-assets-round.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-date '+keyreg-teal-test start %Y%m%d_%H%M%S'
+date '+app-real-assets-round-test start %Y%m%d_%H%M%S'
set -e
set -x
diff --git a/test/scripts/e2e_subs/e2e-app-simple.sh b/test/scripts/e2e_subs/e2e-app-simple.sh
index c6b415eac..d67486c10 100755
--- a/test/scripts/e2e_subs/e2e-app-simple.sh
+++ b/test/scripts/e2e_subs/e2e-app-simple.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-date '+keyreg-teal-test start %Y%m%d_%H%M%S'
+date '+app-simple-test start %Y%m%d_%H%M%S'
set -e
set -x
diff --git a/test/scripts/e2e_subs/e2e-app-stateful-global.sh b/test/scripts/e2e_subs/e2e-app-stateful-global.sh
index c04ee8467..eac38d313 100755
--- a/test/scripts/e2e_subs/e2e-app-stateful-global.sh
+++ b/test/scripts/e2e_subs/e2e-app-stateful-global.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-date '+keyreg-teal-test start %Y%m%d_%H%M%S'
+date '+app-stateful-global-test start %Y%m%d_%H%M%S'
set -e
set -x
diff --git a/test/scripts/e2e_subs/e2e-app-stateful-local.sh b/test/scripts/e2e_subs/e2e-app-stateful-local.sh
index b2281e7c7..4f9201499 100755
--- a/test/scripts/e2e_subs/e2e-app-stateful-local.sh
+++ b/test/scripts/e2e_subs/e2e-app-stateful-local.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-date '+keyreg-teal-test start %Y%m%d_%H%M%S'
+date '+app-stateful-local-test start %Y%m%d_%H%M%S'
set -e
set -x
diff --git a/test/scripts/e2e_subs/e2e-app-x-app-reads.sh b/test/scripts/e2e_subs/e2e-app-x-app-reads.sh
index 31eec6480..fdfb51f94 100755
--- a/test/scripts/e2e_subs/e2e-app-x-app-reads.sh
+++ b/test/scripts/e2e_subs/e2e-app-x-app-reads.sh
@@ -1,6 +1,6 @@
#!/bin/bash
-date '+keyreg-teal-test start %Y%m%d_%H%M%S'
+date '+app-x-app-reads-test start %Y%m%d_%H%M%S'
set -e
set -x
diff --git a/test/scripts/e2e_subs/rekey.sh b/test/scripts/e2e_subs/rekey.sh
index a87c64ee4..516e295cb 100755
--- a/test/scripts/e2e_subs/rekey.sh
+++ b/test/scripts/e2e_subs/rekey.sh
@@ -71,31 +71,31 @@ mnemonic=$(grep 'Private key mnemonic:' < "${TEMPDIR}/rekey" | sed 's/Private ke
ACCOUNTC=$(grep 'Public key:' < "${TEMPDIR}/rekey" | sed 's/Public key: //')
${gcmd} account import -m "${mnemonic}"
-${gcmd} clerk send -a 10000000 -f "${ACCOUNT}" -t "${ACCOUNTB}" --rekey-to "${ACCOUNTC}"
+${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" --rekey-to "${ACCOUNTC}"
-${gcmd} clerk send -a 13000000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -o "${TEMPDIR}/ntxn"
+${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -o "${TEMPDIR}/ntxn"
${gcmd} clerk sign -S "${ACCOUNTC}" -i "${TEMPDIR}/ntxn" -o "${TEMPDIR}/nstxn"
${gcmd} clerk rawsend -f "${TEMPDIR}/nstxn"
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
-if [ "$BALANCEB" -ne 23000000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=23000000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+if [ "$BALANCEB" -ne 200000 ]; then
+ date "+e2e_subs/rekey.sh FAIL wanted balance=200000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
# Rekey from A to C back to A [A -> C -> A].
-${gcmd} clerk send -a 10000000 -f "${ACCOUNT}" -t "${ACCOUNTB}" --rekey-to "${ACCOUNT}" -s -o "${TEMPDIR}/ntxn2"
+${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" --rekey-to "${ACCOUNT}" -s -o "${TEMPDIR}/ntxn2"
${gcmd} clerk sign -S "${ACCOUNTC}" -i "${TEMPDIR}/ntxn2" -o "${TEMPDIR}/nstxn2"
${gcmd} clerk rawsend -f "${TEMPDIR}/nstxn2"
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
-if [ "$BALANCEB" -ne 33000000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=33000000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+if [ "$BALANCEB" -ne 300000 ]; then
+ date "+e2e_subs/rekey.sh FAIL wanted balance=300000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
# Fail case. Try to sign and send from A signed by C.
-${gcmd} clerk send -a 10000000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -s -o "${TEMPDIR}/ntxn3"
+${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}" -s -o "${TEMPDIR}/ntxn3"
${gcmd} clerk sign -S "${ACCOUNTC}" -i "${TEMPDIR}/ntxn3" -o "${TEMPDIR}/nstxn3"
# This should fail because $ACCOUNT should have signed the transaction.
@@ -108,17 +108,17 @@ fi
# Account balance should be the same amount as before.
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
-if [ "$BALANCEB" -ne 33000000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=33000000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+if [ "$BALANCEB" -ne 300000 ]; then
+ date "+e2e_subs/rekey.sh FAIL wanted balance=300000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
# After restoring, let's just do a trivial transfer as a sanity.
-${gcmd} clerk send -a 10000000 -f "${ACCOUNT}" -t "${ACCOUNTB}"
+${gcmd} clerk send -a 100000 -f "${ACCOUNT}" -t "${ACCOUNTB}"
BALANCEB=$(${gcmd} account balance -a "${ACCOUNTB}" | awk '{ print $1 }')
-if [ "$BALANCEB" -ne 43000000 ]; then
- date "+e2e_subs/rekey.sh FAIL wanted balance=43000000 but got ${BALANCEB} %Y%m%d_%H%M%S"
+if [ "$BALANCEB" -ne 400000 ]; then
+ date "+e2e_subs/rekey.sh FAIL wanted balance=400000 but got ${BALANCEB} %Y%m%d_%H%M%S"
false
fi
diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh
new file mode 100755
index 000000000..aaaa5bd08
--- /dev/null
+++ b/test/scripts/e2e_subs/rest.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+# TIMEOUT=300
+
+date '+rest.sh start %Y%m%d_%H%M%S'
+
+set -ex
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+gcmd="goal -w ${WALLET}"
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+# REST Parameters
+PUB_TOKEN=$(cat "$ALGORAND_DATA"/algod.token)
+ADMIN_TOKEN=$(cat "$ALGORAND_DATA"/algod.admin.token)
+NET=$(cat "$ALGORAND_DATA"/algod.net)
+
+function call_admin {
+ curl -q -s -H "Authorization: Bearer ${ADMIN_TOKEN}" "$NET$1"
+}
+
+function call {
+ curl -q -s -H "Authorization: Bearer ${PUB_TOKEN}" "$NET$1"
+}
+
+# $1 - test description.
+# $2 - query
+# $3 - substring that should be in the response
+function call_and_verify {
+ local RES
+ RES=$(call "$2")
+ if [[ "$RES" != *"$3"* ]]; then
+ echo "Failed test - $2: $1"
+ exit 1
+ fi
+}
+
+
+function test_applications_endpoint {
+ # Create an application
+ printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal"
+ APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 2 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }')
+
+ # Good request, non-existant app id
+ call_and_verify "Should not find app." "/v2/applications/987654321" 'application does not exist'
+ # Good request
+ call_and_verify "Should contain app data." "/v2/applications/$APPID" '"global-state-schema":{"num-byte-slice":0,"num-uint":2}'
+ # Good request, pretty response
+ call_and_verify "Should contain app data." "/v2/applications/$APPID?pretty" '
+ "global-state-schema": {
+ "num-byte-slice": 0,
+ "num-uint": 2
+ },
+ "local-state-schema": {
+ "num-byte-slice": 0,
+ "num-uint": 0
+ }
+'
+ # Some invalid path parameters
+ call_and_verify "Parameter parsing error." /v2/applications/-2 "Invalid format for parameter application-id"
+ call_and_verify "Parameter parsing error." /v2/applications/not-a-number "Invalid format for parameter application-id"
+
+ # Good request, but invalid query parameters
+ call_and_verify "Invalid parameter" "/v2/applications/$APPID?this-should-fail=200" 'Unknown parameter detected: this-should-fail'
+}
+
+test_applications_endpoint
diff --git a/test/testdata/configs/config-v10.json b/test/testdata/configs/config-v10.json
new file mode 100644
index 000000000..93fe6229b
--- /dev/null
+++ b/test/testdata/configs/config-v10.json
@@ -0,0 +1,76 @@
+{
+ "Version": 10,
+ "AnnounceParticipationKey": true,
+ "Archival": false,
+ "BaseLoggerDebugLevel": 4,
+ "BroadcastConnectionsLimit": -1,
+ "CadaverSizeTarget": 1073741824,
+ "CatchpointFileHistoryLength": 365,
+ "CatchpointInterval": 10000,
+ "CatchupBlockDownloadRetryAttempts": 1000,
+ "CatchupFailurePeerRefreshRate": 10,
+ "CatchupGossipBlockFetchTimeoutSec": 4,
+ "CatchupHTTPBlockFetchTimeoutSec": 4,
+ "CatchupLedgerDownloadRetryAttempts": 50,
+ "CatchupParallelBlocks": 16,
+ "ConnectionsRateLimitingCount": 60,
+ "ConnectionsRateLimitingWindowSeconds": 1,
+ "DNSBootstrapID": "<network>.algorand.network",
+ "DNSSecurityFlags": 1,
+ "DeadlockDetection": 0,
+ "DisableOutgoingConnectionThrottling": false,
+ "EnableAgreementReporting": false,
+ "EnableAgreementTimeMetrics": false,
+ "EnableAssembleStats": false,
+ "EnableBlockService": false,
+ "EnableDeveloperAPI": false,
+ "EnableGossipBlockService": true,
+ "EnableIncomingMessageFilter": false,
+ "EnableLedgerService": false,
+ "EnableMetricReporting": false,
+ "EnableOutgoingNetworkMessageFiltering": true,
+ "EnablePingHandler": true,
+ "EnableProcessBlockStats": false,
+ "EnableProfiler": false,
+ "EnableRequestLogger": false,
+ "EnableTopAccountsReporting": false,
+ "EndpointAddress": "127.0.0.1:0",
+ "FallbackDNSResolverAddress": "",
+ "ForceRelayMessages": false,
+ "GossipFanout": 4,
+ "IncomingConnectionsLimit": 10000,
+ "IncomingMessageFilterBucketCount": 5,
+ "IncomingMessageFilterBucketSize": 512,
+ "IsIndexerActive": false,
+ "LogArchiveMaxAge": "",
+ "LogArchiveName": "node.archive.log",
+ "LogSizeLimit": 1073741824,
+ "MaxConnectionsPerIP": 30,
+ "NetAddress": "",
+ "NetworkProtocolVersion": "",
+ "NodeExporterListenAddress": ":9100",
+ "NodeExporterPath": "./node_exporter",
+ "OptimizeAccountsDatabaseOnStartup": false,
+ "OutgoingMessageFilterBucketCount": 3,
+ "OutgoingMessageFilterBucketSize": 128,
+ "PeerConnectionsUpdateInterval": 3600,
+ "PeerPingPeriodSeconds": 0,
+ "PriorityPeers": {},
+ "PublicAddress": "",
+ "ReconnectTime": 60000000000,
+ "ReservedFDs": 256,
+ "RestReadTimeoutSeconds": 15,
+ "RestWriteTimeoutSeconds": 120,
+ "RunHosted": false,
+ "SuggestedFeeBlockHistory": 3,
+ "SuggestedFeeSlidingWindowSize": 50,
+ "TLSCertFile": "",
+ "TLSKeyFile": "",
+ "TelemetryToLog": true,
+ "TxPoolExponentialIncreaseFactor": 2,
+ "TxPoolSize": 15000,
+ "TxSyncIntervalSeconds": 60,
+ "TxSyncServeResponseSize": 1000000,
+ "TxSyncTimeoutSeconds": 30,
+ "UseXForwardedForAddressField": ""
+}
diff --git a/util/db/dbutil.go b/util/db/dbutil.go
index 9ff43f0aa..a5ce68bfe 100644
--- a/util/db/dbutil.go
+++ b/util/db/dbutil.go
@@ -60,6 +60,18 @@ type Accessor struct {
log logging.Logger
}
+// VacuumStats returns the database statistics before and after a vacuum operation
+type VacuumStats struct {
+ // PagesBefore is the number of pages in the database before the vacuum operation
+ PagesBefore uint64
+ // SizeBefore is the amount of data used by the database ( number of pages * size of a page) before the vacuum operation
+ SizeBefore uint64
+ // PagesAfter is the number of pages in the database after the vacuum operation
+ PagesAfter uint64
+ // SizeAfter is the amount of data used by the database ( number of pages * size of a page) after the vacuum operation
+ SizeAfter uint64
+}
+
// txExecutionContext contains the data that is associated with every created transaction
// before sending it to the user-defined callback. This allows the callback function to
// make changes to the execution setting of an ongoing transaction.
@@ -369,6 +381,37 @@ func (db *Accessor) AtomicCommitWriteLock(fn idemFn, commitLocker sync.Locker, e
return db.atomic(fn, commitLocker, extras...)
}
+// Vacuum perform a full-vacuum on the given database. In order for the vacuum to succeed, the storage needs to have
+// double the amount of the current database size ( roughly ), and we cannot have any other transaction ( either read
+// or write ) being active.
+func (db *Accessor) Vacuum(ctx context.Context) (stats VacuumStats, err error) {
+ if db.readOnly {
+ return stats, fmt.Errorf("read-only database was used to attempt and perform vacuuming")
+ }
+ if db.inMemory {
+ return stats, nil
+ }
+ pageSize, err2 := db.GetPageSize(ctx)
+ if err2 != nil {
+ return stats, err2
+ }
+ stats.PagesBefore, err = db.GetPageCount(ctx)
+ if err != nil {
+ return stats, err
+ }
+ stats.SizeBefore = pageSize * stats.PagesBefore
+ _, err = db.Handle.ExecContext(ctx, "VACUUM")
+ if err != nil {
+ return stats, err
+ }
+ stats.PagesAfter, err = db.GetPageCount(ctx)
+ if err != nil {
+ return stats, err
+ }
+ stats.SizeAfter = pageSize * stats.PagesAfter
+ return
+}
+
// URI returns the sqlite URI given a db filename as an input.
func URI(filename string, readOnly bool, memory bool) string {
uri := fmt.Sprintf("file:%s?_busy_timeout=%d&_synchronous=full", filename, busy)
@@ -382,6 +425,24 @@ func URI(filename string, readOnly bool, memory bool) string {
return uri
}
+// GetPageCount returns the total number of pages in the database
+func (db *Accessor) GetPageCount(ctx context.Context) (pageCount uint64, err error) {
+ err = db.Handle.QueryRowContext(ctx, "PRAGMA page_count").Scan(&pageCount)
+ if err == sql.ErrNoRows {
+ err = fmt.Errorf("sqlite database doesn't support `PRAGMA page_count`")
+ }
+ return
+}
+
+// GetPageSize returns the number of bytes per database page
+func (db *Accessor) GetPageSize(ctx context.Context) (pageSize uint64, err error) {
+ err = db.Handle.QueryRowContext(ctx, "PRAGMA page_size").Scan(&pageSize)
+ if err == sql.ErrNoRows {
+ err = fmt.Errorf("sqlite database doesn't support `PRAGMA page_size`")
+ }
+ return
+}
+
// dbretry returns true if the error might be temporary
func dbretry(obj error) bool {
err, ok := obj.(sqlite3.Error)