summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGary <982483+gmalouf@users.noreply.github.com>2024-01-25 13:26:03 -0500
committerGitHub <noreply@github.com>2024-01-25 13:26:03 -0500
commit86319605bebbc2d48f68b6d4360ebc917e018e85 (patch)
tree97b5bd72fd0e00868314d5b1fa8a2d8bdba13e71
parent2e41eef1ae64a283f90b2fe54e07df90b47cc5a1 (diff)
Archival Support: Remove configurable support for catching up from "archivers" (#5920)
Co-authored-by: John Jannotti <jannotti@gmail.com>
-rw-r--r--catchup/catchpointService.go19
-rw-r--r--catchup/peerSelector_test.go69
-rw-r--r--catchup/service.go80
-rw-r--r--catchup/service_test.go104
-rw-r--r--config/localTemplate.go20
-rw-r--r--config/local_defaults.go2
-rw-r--r--installer/config.json.example2
-rw-r--r--network/wsNetwork.go15
-rw-r--r--network/wsNetwork_test.go132
-rw-r--r--rpcs/blockService.go32
-rw-r--r--rpcs/blockService_test.go156
-rw-r--r--test/testdata/configs/config-v33.json2
12 files changed, 227 insertions, 406 deletions
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index f71a4209a..2c4f6dfc4 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -806,20 +806,11 @@ func (cs *CatchpointCatchupService) initDownloadPeerSelector() {
}
func (cs *CatchpointCatchupService) makeCatchpointPeerSelector() *peerSelector {
- if cs.config.EnableCatchupFromArchiveServers {
- return makePeerSelector(
- cs.net,
- []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
- })
- } else {
- return makePeerSelector(
- cs.net,
- []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookRelays},
- })
- }
+ return makePeerSelector(
+ cs.net,
+ []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookRelays},
+ })
}
// checkLedgerDownload sends a HEAD request to the ledger endpoint of peers to validate the catchpoint's availability
diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go
index 2a907934a..aa8d348d4 100644
--- a/catchup/peerSelector_test.go
+++ b/catchup/peerSelector_test.go
@@ -134,7 +134,7 @@ func TestPeerSelector_RankPeer(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) []network.Peer {
return peers
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes}},
)
psp, err := peerSelector.getNextPeer()
@@ -194,14 +194,14 @@ func TestPeerSelector_PeerDownloadRanking(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
} else {
peers = append(peers, peers2...)
}
}
return
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
)
archivalPeer, err := peerSelector.getNextPeer()
@@ -243,7 +243,7 @@ func TestPeerSelector_FindMissingPeer(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) []network.Peer {
return []network.Peer{}
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes}},
)
poolIdx, peerIdx := peerSelector.findPeer(&peerSelectorPeer{mockHTTPPeer{address: "abcd"}, 0})
@@ -261,14 +261,14 @@ func TestPeerSelector_HistoricData(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
} else {
peers = append(peers, peers2...)
}
}
return
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
)
@@ -335,14 +335,14 @@ func TestPeerSelector_PeersDownloadFailed(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
} else {
peers = append(peers, peers2...)
}
}
return
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
)
@@ -411,14 +411,14 @@ func TestPeerSelector_Penalty(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
} else {
peers = append(peers, peers2...)
}
}
return
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
)
@@ -468,44 +468,39 @@ func TestPeerSelector_PeerDownloadDurationToRank(t *testing.T) {
peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}}
peers3 := []network.Peer{&mockHTTPPeer{address: "c1"}, &mockHTTPPeer{address: "c2"}}
peers4 := []network.Peer{&mockHTTPPeer{address: "d1"}, &mockHTTPPeer{address: "b2"}}
- peers5 := []network.Peer{&mockHTTPPeer{address: "e1"}, &mockHTTPPeer{address: "b2"}}
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookRelays {
peers = append(peers, peers1...)
- } else if opt == network.PeersPhonebookRelays {
- peers = append(peers, peers2...)
} else if opt == network.PeersConnectedOut {
- peers = append(peers, peers3...)
+ peers = append(peers, peers2...)
} else if opt == network.PeersPhonebookArchivalNodes {
- peers = append(peers, peers4...)
+ peers = append(peers, peers3...)
} else { // PeersConnectedIn
- peers = append(peers, peers5...)
+ peers = append(peers, peers4...)
}
}
return
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersConnectedIn}},
+ }), []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}},
)
_, err := peerSelector.getNextPeer()
require.NoError(t, err)
require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank0LowBlockTime, peerRank0HighBlockTime),
- peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers1[0], network.PeersPhonebookArchivers}, 500*time.Millisecond))
+ peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers1[0], network.PeersPhonebookRelays}, 500*time.Millisecond))
require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank1LowBlockTime, peerRank1HighBlockTime),
- peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers2[0], network.PeersPhonebookRelays}, 500*time.Millisecond))
+ peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers2[0], network.PeersConnectedOut}, 500*time.Millisecond))
require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime),
- peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers3[0], network.PeersConnectedOut}, 500*time.Millisecond))
+ peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers3[0], network.PeersPhonebookArchivalNodes}, 500*time.Millisecond))
require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime),
- peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers4[0], network.PeersPhonebookArchivalNodes}, 500*time.Millisecond))
- require.Equal(t, downloadDurationToRank(500*time.Millisecond, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank4LowBlockTime, peerRank4HighBlockTime),
- peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers5[0], network.PeersConnectedIn}, 500*time.Millisecond))
+ peerSelector.peerDownloadDurationToRank(&peerSelectorPeer{peers4[0], network.PeersConnectedIn}, 500*time.Millisecond))
}
@@ -513,7 +508,7 @@ func TestPeerSelector_LowerUpperBounds(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- classes := []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ classes := []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
@@ -536,7 +531,7 @@ func TestPeerSelector_FullResetRequestPenalty(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- class := peerClass{initialRank: 0, peerClass: network.PeersPhonebookArchivers}
+ class := peerClass{initialRank: 0, peerClass: network.PeersPhonebookArchivalNodes}
hs := makeHistoricStatus(10, class)
hs.push(5, 1, class)
require.Equal(t, 1, len(hs.requestGaps))
@@ -551,7 +546,7 @@ func TestPeerSelector_PenaltyBounds(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- class := peerClass{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}
+ class := peerClass{initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes}
hs := makeHistoricStatus(peerHistoryWindowSize, class)
for x := 0; x < 65; x++ {
r0 := hs.push(peerRank2LowBlockTime+50, uint64(x+1), class)
@@ -578,11 +573,11 @@ func TestPeerSelector_ClassUpperBound(t *testing.T) {
t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}}
- pClass := peerClass{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}
+ pClass := peerClass{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes}
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
}
}
@@ -613,11 +608,11 @@ func TestPeerSelector_ClassLowerBound(t *testing.T) {
t.Parallel()
peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}}
- pClass := peerClass{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}
+ pClass := peerClass{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes}
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
}
}
@@ -647,14 +642,14 @@ func TestPeerSelector_EvictionAndUpgrade(t *testing.T) {
peerSelector := makePeerSelector(
makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) {
for _, opt := range options {
- if opt == network.PeersPhonebookArchivers {
+ if opt == network.PeersPhonebookArchivalNodes {
peers = append(peers, peers1...)
} else {
peers = append(peers, peers2...)
}
}
return
- }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers},
+ }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}},
)
diff --git a/catchup/service.go b/catchup/service.go
index 1093f851a..58fc3ae6b 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -868,72 +868,34 @@ func (s *Service) roundIsNotSupported(nextRound basics.Round) bool {
func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch bool) *peerSelector {
var peerClasses []peerClass
- if cfg.EnableCatchupFromArchiveServers {
- if pipelineFetch {
- if cfg.NetAddress != "" { // Relay node
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersConnectedIn},
- }
- } else {
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
- }
+ if pipelineFetch {
+ if cfg.NetAddress != "" && cfg.EnableGossipService { // Relay node
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
}
} else {
- if cfg.NetAddress != "" { // Relay node
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialFifthPriority, peerClass: network.PeersPhonebookArchivers},
- }
- } else {
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers},
- }
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
}
}
} else {
- if pipelineFetch {
- if cfg.NetAddress != "" { // Relay node
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn},
- }
- } else {
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
- }
+ if cfg.NetAddress != "" && cfg.EnableGossipService { // Relay node
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
}
} else {
- if cfg.NetAddress != "" { // Relay node
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookRelays},
- }
- } else {
- peerClasses = []peerClass{
- {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
- {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
- {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
- }
+ peerClasses = []peerClass{
+ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
+ {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivalNodes},
+ {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays},
}
}
}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index c5b8cfcca..8deb692b0 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -954,71 +954,18 @@ func TestCatchupUnmatchedCertificate(t *testing.T) {
}
}
-// TestCreatePeerSelector tests if the correct peer selector coonfigurations are prepared
+// TestCreatePeerSelector tests if the correct peer selector configurations are prepared
func TestCreatePeerSelector(t *testing.T) {
partitiontest.PartitionTest(t)
// Make Service
cfg := defaultConfig
- cfg.EnableCatchupFromArchiveServers = true
-
+ // cfg.NetAddress != ""; cfg.EnableGossipService = true; pipelineFetch = true
cfg.NetAddress = "someAddress"
+ cfg.EnableGossipService = true
s := MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps := createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 5, len(ps.peerClasses))
- require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
- require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
- require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
- require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
- require.Equal(t, peerRankInitialFifthPriority, ps.peerClasses[4].initialRank)
-
- require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
- require.Equal(t, network.PeersConnectedIn, ps.peerClasses[4].peerClass)
-
- // cfg.EnableCatchupFromArchiveServers = true; cfg.NetAddress == ""; pipelineFetch = true;
- cfg.EnableCatchupFromArchiveServers = true
- cfg.NetAddress = ""
- s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 4, len(ps.peerClasses))
- require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
- require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
- require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
- require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
-
- require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersConnectedOut, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
-
- // cfg.EnableCatchupFromArchiveServers = true; cfg.NetAddress != ""; pipelineFetch = false
- cfg.EnableCatchupFromArchiveServers = true
- cfg.NetAddress = "someAddress"
- s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = createPeerSelector(s.net, s.cfg, false)
-
- require.Equal(t, 5, len(ps.peerClasses))
- require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
- require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
- require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
- require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
- require.Equal(t, peerRankInitialFifthPriority, ps.peerClasses[4].initialRank)
-
- require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersConnectedIn, ps.peerClasses[1].peerClass)
- require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[4].peerClass)
-
- // cfg.EnableCatchupFromArchiveServers = true; cfg.NetAddress == ""; pipelineFetch = false
- cfg.EnableCatchupFromArchiveServers = true
- cfg.NetAddress = ""
- s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -1029,28 +976,26 @@ func TestCreatePeerSelector(t *testing.T) {
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersPhonebookArchivers, ps.peerClasses[3].peerClass)
+ require.Equal(t, network.PeersConnectedIn, ps.peerClasses[3].peerClass)
- // cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress != ""; pipelineFetch = true
- cfg.EnableCatchupFromArchiveServers = false
- cfg.NetAddress = "someAddress"
+ // cfg.NetAddress == ""; cfg.EnableGossipService = true; pipelineFetch = true
+ cfg.NetAddress = ""
+ cfg.EnableGossipService = true
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, true)
- require.Equal(t, 4, len(ps.peerClasses))
+ require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
- require.Equal(t, peerRankInitialFourthPriority, ps.peerClasses[3].initialRank)
- require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
- require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[0].peerClass)
+ require.Equal(t, network.PeersConnectedOut, ps.peerClasses[1].peerClass)
require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
- require.Equal(t, network.PeersConnectedIn, ps.peerClasses[3].peerClass)
- // cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress == ""; pipelineFetch = true
- cfg.EnableCatchupFromArchiveServers = false
- cfg.NetAddress = ""
+ // cfg.NetAddress != ""; cfg.EnableGossipService = false; pipelineFetch = true
+ cfg.NetAddress = "someAddress"
+ cfg.EnableGossipService = false
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, true)
@@ -1063,9 +1008,9 @@ func TestCreatePeerSelector(t *testing.T) {
require.Equal(t, network.PeersConnectedOut, ps.peerClasses[1].peerClass)
require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
- // cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress != ""; pipelineFetch = false
- cfg.EnableCatchupFromArchiveServers = false
+ // cfg.NetAddress != ""; cfg.EnableGossipService = true; pipelineFetch = false
cfg.NetAddress = "someAddress"
+ cfg.EnableGossipService = true
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, false)
@@ -1080,9 +1025,24 @@ func TestCreatePeerSelector(t *testing.T) {
require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[2].peerClass)
require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[3].peerClass)
- // cfg.EnableCatchupFromArchiveServers = false; cfg.NetAddress == ""; pipelineFetch = false
- cfg.EnableCatchupFromArchiveServers = false
+ // cfg.NetAddress == ""; cfg.EnableGossipService = true; pipelineFetch = false
cfg.NetAddress = ""
+ cfg.EnableGossipService = true
+ s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
+ ps = createPeerSelector(s.net, s.cfg, false)
+
+ require.Equal(t, 3, len(ps.peerClasses))
+ require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
+ require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
+ require.Equal(t, peerRankInitialThirdPriority, ps.peerClasses[2].initialRank)
+
+ require.Equal(t, network.PeersConnectedOut, ps.peerClasses[0].peerClass)
+ require.Equal(t, network.PeersPhonebookArchivalNodes, ps.peerClasses[1].peerClass)
+ require.Equal(t, network.PeersPhonebookRelays, ps.peerClasses[2].peerClass)
+
+ // cfg.NetAddress != ""; cfg.EnableGossipService = false; pipelineFetch = false
+ cfg.NetAddress = "someAddress"
+ cfg.EnableGossipService = false
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
ps = createPeerSelector(s.net, s.cfg, false)
diff --git a/config/localTemplate.go b/config/localTemplate.go
index c6e1ba0a2..618facfd2 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -44,13 +44,13 @@ type Local struct {
// for an existing parameter. This field tag must be updated any time we add a new version.
Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17" version[18]:"18" version[19]:"19" version[20]:"20" version[21]:"21" version[22]:"22" version[23]:"23" version[24]:"24" version[25]:"25" version[26]:"26" version[27]:"27" version[28]:"28" version[29]:"29" version[30]:"30" version[31]:"31" version[32]:"32" version[33]:"33"`
- // Archival nodes retain a full copy of the block history. Non-Archival nodes will delete old blocks and only retain what's need to properly validate blockchain messages (the precise number of recent blocks depends on the consensus parameters. Currently the last 1321 blocks are required). This means that non-Archival nodes require significantly less storage than Archival nodes. Relays (nodes with a valid NetAddress) are always Archival, regardless of this setting. This may change in the future. If setting this to true for the first time, the existing ledger may need to be deleted to get the historical values stored as the setting only effects current blocks forward. To do this, shutdown the node and delete all .sqlite files within the data/testnet-version directory, except the crash.sqlite file. Restart the node and wait for the node to sync.
+ // Archival nodes retain a full copy of the block history. Non-Archival nodes will delete old blocks and only retain what's need to properly validate blockchain messages (the precise number of recent blocks depends on the consensus parameters. Currently the last 1321 blocks are required). This means that non-Archival nodes require significantly less storage than Archival nodes. If setting this to true for the first time, the existing ledger may need to be deleted to get the historical values stored as the setting only affects current blocks forward. To do this, shutdown the node and delete all .sqlite files within the data/testnet-version directory, except the crash.sqlite file. Restart the node and wait for the node to sync.
Archival bool `version[0]:"false"`
// GossipFanout sets the maximum number of peers the node will connect to with outgoing connections. If the list of peers is less than this setting, fewer connections will be made. The node will not connect to the same peer multiple times (with outgoing connections).
GossipFanout int `version[0]:"4"`
- // NetAddress is the address and/or port on which the relay node listens for incoming connections, or blank to ignore incoming connections. Specify an IP and port or just a port. For example, 127.0.0.1:0 will listen on a random port on the localhost.
+ // NetAddress is the address and/or port on which a node listens for incoming connections, or blank to ignore incoming connections. Specify an IP and port or just a port. For example, 127.0.0.1:0 will listen on a random port on the localhost.
NetAddress string `version[0]:""`
// ReconnectTime is deprecated and unused.
@@ -454,28 +454,16 @@ type Local struct {
// VerifiedTranscationsCacheSize defines the number of transactions that the verified transactions cache would hold before cycling the cache storage in a round-robin fashion.
VerifiedTranscationsCacheSize int `version[14]:"30000" version[23]:"150000"`
- // EnableCatchupFromArchiveServers controls which peers the catchup service would use in order to catchup.
- // When enabled, the catchup service would use the archive servers before falling back to the relays.
- // On networks that don't have archive servers, this becomes a no-op, as the catchup service would have no
- // archive server to pick from, and therefore automatically selects one of the relay nodes.
- EnableCatchupFromArchiveServers bool `version[15]:"false"`
-
// DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for
// connections that are originating from the local machine. Setting this to "true", allow to create large
// local-machine networks that won't trip the incoming connection limit observed by relays.
DisableLocalhostConnectionRateLimit bool `version[16]:"true"`
// BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to
- // redirect the http requests to in case it does not have the round. If it is not specified, will check
- // EnableBlockServiceFallbackToArchiver.
+ // redirect the http requests to in case it does not have the round. If empty, the block service will return
+ // StatusNotFound (404)
BlockServiceCustomFallbackEndpoints string `version[16]:""`
- // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to
- // an archiver or return StatusNotFound (404) when in does not have the requested round, and
- // BlockServiceCustomFallbackEndpoints is empty.
- // The archiver is randomly selected, if none is available, will return StatusNotFound (404).
- EnableBlockServiceFallbackToArchiver bool `version[16]:"true" version[31]:"false"`
-
// CatchupBlockValidateMode is a development and testing configuration used by the catchup service.
// It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation.
// This field is a bit-field with:
diff --git a/config/local_defaults.go b/config/local_defaults.go
index 791e6e222..d2a73d4c6 100644
--- a/config/local_defaults.go
+++ b/config/local_defaults.go
@@ -64,8 +64,6 @@ var defaultLocal = Local{
EnableAgreementTimeMetrics: false,
EnableAssembleStats: false,
EnableBlockService: false,
- EnableBlockServiceFallbackToArchiver: false,
- EnableCatchupFromArchiveServers: false,
EnableDeveloperAPI: false,
EnableExperimentalAPI: false,
EnableFollowMode: false,
diff --git a/installer/config.json.example b/installer/config.json.example
index aa1cb7171..d9188ef74 100644
--- a/installer/config.json.example
+++ b/installer/config.json.example
@@ -43,8 +43,6 @@
"EnableAgreementTimeMetrics": false,
"EnableAssembleStats": false,
"EnableBlockService": false,
- "EnableBlockServiceFallbackToArchiver": false,
- "EnableCatchupFromArchiveServers": false,
"EnableDeveloperAPI": false,
"EnableExperimentalAPI": false,
"EnableFollowMode": false,
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index 9ce5f331a..5bfabbaf2 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -1895,15 +1895,14 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses []
}
relaysAddresses = nil
}
- if wn.config.EnableCatchupFromArchiveServers || wn.config.EnableBlockServiceFallbackToArchiver {
- archiverAddresses, err = wn.resolveSRVRecords("archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
- if err != nil {
- // only log this warning on testnet or devnet
- if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet {
- wn.log.Warnf("Cannot lookup archive SRV record for %s: %v", dnsBootstrap, err)
- }
- archiverAddresses = nil
+
+ archiverAddresses, err = wn.resolveSRVRecords("archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced())
+ if err != nil {
+ // only log this warning on testnet or devnet
+ if wn.NetworkID == config.Devnet || wn.NetworkID == config.Testnet {
+ wn.log.Warnf("Cannot lookup archive SRV record for %s: %v", dnsBootstrap, err)
}
+ archiverAddresses = nil
}
return
}
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index b7cd873bc..e35cc7d17 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -4127,95 +4127,83 @@ func TestRefreshRelayArchivePhonebookAddresses(t *testing.T) {
var netA *WebsocketNetwork
var refreshRelayDNSBootstrapID = "<network>.algorand.network?backup=<network>.algorand.net&dedup=<name>.algorand-<network>.(network|net)"
- testRefreshWithConfig := func(refreshTestConf config.Local) {
- rapid.Check(t, func(t1 *rapid.T) {
- refreshTestConf.DNSBootstrapID = refreshRelayDNSBootstrapID
- netA = makeTestWebsocketNodeWithConfig(t, refreshTestConf)
- netA.NetworkID = nonHardcodedNetworkIDGen().Draw(t1, "network")
-
- primarySRVBootstrap := strings.Replace("<network>.algorand.network", "<network>", string(netA.NetworkID), -1)
- backupSRVBootstrap := strings.Replace("<network>.algorand.net", "<network>", string(netA.NetworkID), -1)
- var primaryRelayResolvedRecords []string
- var secondaryRelayResolvedRecords []string
- var primaryArchiveResolvedRecords []string
- var secondaryArchiveResolvedRecords []string
-
- for _, record := range []string{"r1.algorand-<network>.network",
- "r2.algorand-<network>.network", "r3.algorand-<network>.network"} {
- var recordSub = strings.Replace(record, "<network>", string(netA.NetworkID), -1)
- primaryRelayResolvedRecords = append(primaryRelayResolvedRecords, recordSub)
- secondaryRelayResolvedRecords = append(secondaryRelayResolvedRecords, strings.Replace(recordSub, "network", "net", -1))
- }
-
- for _, record := range []string{"r1archive.algorand-<network>.network",
- "r2archive.algorand-<network>.network", "r3archive.algorand-<network>.network"} {
- var recordSub = strings.Replace(record, "<network>", string(netA.NetworkID), -1)
- primaryArchiveResolvedRecords = append(primaryArchiveResolvedRecords, recordSub)
- secondaryArchiveResolvedRecords = append(secondaryArchiveResolvedRecords, strings.Replace(recordSub, "network", "net", -1))
- }
+ refreshTestConf := defaultConfig
- // Mock the SRV record lookup
- netA.resolveSRVRecords = func(service string, protocol string, name string, fallbackDNSResolverAddress string,
- secure bool) (addrs []string, err error) {
- if service == "algobootstrap" && protocol == "tcp" && name == primarySRVBootstrap {
- return primaryRelayResolvedRecords, nil
- } else if service == "algobootstrap" && protocol == "tcp" && name == backupSRVBootstrap {
- return secondaryRelayResolvedRecords, nil
- }
+ rapid.Check(t, func(t1 *rapid.T) {
+ refreshTestConf.DNSBootstrapID = refreshRelayDNSBootstrapID
+ netA = makeTestWebsocketNodeWithConfig(t, refreshTestConf)
+ netA.NetworkID = nonHardcodedNetworkIDGen().Draw(t1, "network")
+
+ primarySRVBootstrap := strings.Replace("<network>.algorand.network", "<network>", string(netA.NetworkID), -1)
+ backupSRVBootstrap := strings.Replace("<network>.algorand.net", "<network>", string(netA.NetworkID), -1)
+ var primaryRelayResolvedRecords []string
+ var secondaryRelayResolvedRecords []string
+ var primaryArchiveResolvedRecords []string
+ var secondaryArchiveResolvedRecords []string
+
+ for _, record := range []string{"r1.algorand-<network>.network",
+ "r2.algorand-<network>.network", "r3.algorand-<network>.network"} {
+ var recordSub = strings.Replace(record, "<network>", string(netA.NetworkID), -1)
+ primaryRelayResolvedRecords = append(primaryRelayResolvedRecords, recordSub)
+ secondaryRelayResolvedRecords = append(secondaryRelayResolvedRecords, strings.Replace(recordSub, "network", "net", -1))
+ }
- if service == "archive" && protocol == "tcp" && name == primarySRVBootstrap {
- return primaryArchiveResolvedRecords, nil
- } else if service == "archive" && protocol == "tcp" && name == backupSRVBootstrap {
- return secondaryArchiveResolvedRecords, nil
- }
+ for _, record := range []string{"r1archive.algorand-<network>.network",
+ "r2archive.algorand-<network>.network", "r3archive.algorand-<network>.network"} {
+ var recordSub = strings.Replace(record, "<network>", string(netA.NetworkID), -1)
+ primaryArchiveResolvedRecords = append(primaryArchiveResolvedRecords, recordSub)
+ secondaryArchiveResolvedRecords = append(secondaryArchiveResolvedRecords, strings.Replace(recordSub, "network", "net", -1))
+ }
- return
+ // Mock the SRV record lookup
+ netA.resolveSRVRecords = func(service string, protocol string, name string, fallbackDNSResolverAddress string,
+ secure bool) (addrs []string, err error) {
+ if service == "algobootstrap" && protocol == "tcp" && name == primarySRVBootstrap {
+ return primaryRelayResolvedRecords, nil
+ } else if service == "algobootstrap" && protocol == "tcp" && name == backupSRVBootstrap {
+ return secondaryRelayResolvedRecords, nil
}
- relayPeers := netA.GetPeers(PeersPhonebookRelays)
- assert.Equal(t, 0, len(relayPeers))
-
- archivePeers := netA.GetPeers(PeersPhonebookArchivers)
- assert.Equal(t, 0, len(archivePeers))
-
- netA.refreshRelayArchivePhonebookAddresses()
+ if service == "archive" && protocol == "tcp" && name == primarySRVBootstrap {
+ return primaryArchiveResolvedRecords, nil
+ } else if service == "archive" && protocol == "tcp" && name == backupSRVBootstrap {
+ return secondaryArchiveResolvedRecords, nil
+ }
- relayPeers = netA.GetPeers(PeersPhonebookRelays)
+ return
+ }
- assert.Equal(t, 3, len(relayPeers))
- relayAddrs := make([]string, 0, len(relayPeers))
- for _, peer := range relayPeers {
- relayAddrs = append(relayAddrs, peer.(HTTPPeer).GetAddress())
- }
+ relayPeers := netA.GetPeers(PeersPhonebookRelays)
+ assert.Equal(t, 0, len(relayPeers))
- assert.ElementsMatch(t, primaryRelayResolvedRecords, relayAddrs)
+ archivePeers := netA.GetPeers(PeersPhonebookArchivers)
+ assert.Equal(t, 0, len(archivePeers))
- archivePeers = netA.GetPeers(PeersPhonebookArchivers)
+ netA.refreshRelayArchivePhonebookAddresses()
- if refreshTestConf.EnableBlockServiceFallbackToArchiver {
- // For the time being, we do not dedup resolved archive nodes
- assert.Equal(t, len(primaryArchiveResolvedRecords)+len(secondaryArchiveResolvedRecords), len(archivePeers))
+ relayPeers = netA.GetPeers(PeersPhonebookRelays)
- archiveAddrs := make([]string, 0, len(archivePeers))
- for _, peer := range archivePeers {
- archiveAddrs = append(archiveAddrs, peer.(HTTPPeer).GetAddress())
- }
+ assert.Equal(t, 3, len(relayPeers))
+ relayAddrs := make([]string, 0, len(relayPeers))
+ for _, peer := range relayPeers {
+ relayAddrs = append(relayAddrs, peer.(HTTPPeer).GetAddress())
+ }
- assert.ElementsMatch(t, append(primaryArchiveResolvedRecords, secondaryArchiveResolvedRecords...), archiveAddrs)
+ assert.ElementsMatch(t, primaryRelayResolvedRecords, relayAddrs)
- } else {
- assert.Equal(t, 0, len(archivePeers))
- }
+ archivePeers = netA.GetPeers(PeersPhonebookArchivers)
- })
- }
+ // TODO: For the time being, we do not dedup resolved archive nodes
+ assert.Equal(t, len(primaryArchiveResolvedRecords)+len(secondaryArchiveResolvedRecords), len(archivePeers))
- testRefreshWithConfig(defaultConfig)
+ archiveAddrs := make([]string, 0, len(archivePeers))
+ for _, peer := range archivePeers {
+ archiveAddrs = append(archiveAddrs, peer.(HTTPPeer).GetAddress())
+ }
- configWithBlockServiceFallbackToArchiverEnabled := config.GetDefaultLocal()
- configWithBlockServiceFallbackToArchiverEnabled.EnableBlockServiceFallbackToArchiver = true
+ assert.ElementsMatch(t, append(primaryArchiveResolvedRecords, secondaryArchiveResolvedRecords...), archiveAddrs)
- testRefreshWithConfig(configWithBlockServiceFallbackToArchiverEnabled)
+ })
}
/*
diff --git a/rpcs/blockService.go b/rpcs/blockService.go
index 7245eb188..8231b5a98 100644
--- a/rpcs/blockService.go
+++ b/rpcs/blockService.go
@@ -36,7 +36,6 @@ import (
"github.com/algorand/go-algorand/agreement"
"github.com/algorand/go-algorand/config"
- "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/ledger/ledgercore"
@@ -103,7 +102,6 @@ type BlockService struct {
enableService bool
enableServiceOverGossip bool
fallbackEndpoints fallbackEndpoints
- enableArchiverFallback bool
log logging.Logger
closeWaitGroup sync.WaitGroup
mu deadlock.Mutex
@@ -144,7 +142,6 @@ func MakeBlockService(log logging.Logger, config config.Local, ledger LedgerForB
enableService: config.EnableBlockService,
enableServiceOverGossip: config.EnableGossipBlockService,
fallbackEndpoints: makeFallbackEndpoints(log, config.BlockServiceCustomFallbackEndpoints),
- enableArchiverFallback: config.EnableBlockServiceFallbackToArchiver,
log: log,
memoryCap: config.BlockServiceMemCap,
}
@@ -384,13 +381,10 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc
return
}
-// redirectRequest redirects the request to the next round robin fallback endpoing if available, otherwise,
-// if EnableBlockServiceFallbackToArchiver is enabled, redirects to a random archiver.
+// redirectRequest redirects the request to the next round robin fallback endpoint if available
func (bs *BlockService) redirectRequest(round uint64, response http.ResponseWriter, request *http.Request) (ok bool) {
peerAddress := bs.getNextCustomFallbackEndpoint()
- if peerAddress == "" && bs.enableArchiverFallback {
- peerAddress = bs.getRandomArchiver()
- }
+
if peerAddress == "" {
return false
}
@@ -411,30 +405,14 @@ func (bs *BlockService) getNextCustomFallbackEndpoint() (endpointAddress string)
if len(bs.fallbackEndpoints.endpoints) == 0 {
return
}
+
+ bs.mu.Lock()
+ defer bs.mu.Unlock()
endpointAddress = bs.fallbackEndpoints.endpoints[bs.fallbackEndpoints.lastUsed]
bs.fallbackEndpoints.lastUsed = (bs.fallbackEndpoints.lastUsed + 1) % len(bs.fallbackEndpoints.endpoints)
return
}
-// getRandomArchiver returns a random archiver address
-func (bs *BlockService) getRandomArchiver() (endpointAddress string) {
- peers := bs.net.GetPeers(network.PeersPhonebookArchivers)
- httpPeers := make([]network.HTTPPeer, 0, len(peers))
-
- for _, peer := range peers {
- httpPeer, validHTTPPeer := peer.(network.HTTPPeer)
- if validHTTPPeer {
- httpPeers = append(httpPeers, httpPeer)
- }
- }
- if len(httpPeers) == 0 {
- return
- }
- randIndex := crypto.RandUint64() % uint64(len(httpPeers))
- endpointAddress = httpPeers[randIndex].GetAddress()
- return
-}
-
// rawBlockBytes returns the block/cert for a given round, while taking the lock
// to ensure the block service is currently active.
func (bs *BlockService) rawBlockBytes(round basics.Round) ([]byte, error) {
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index e77fc7aa0..3aab7c4ab 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -125,8 +125,9 @@ func TestHandleCatchupReqNegative(t *testing.T) {
require.Equal(t, roundNumberParseErrMsg, string(val))
}
-// TestRedirectFallbackArchiver tests the case when the block service fallback to another in the absence of a given block.
-func TestRedirectFallbackArchiver(t *testing.T) {
+// TestRedirectFallbackEndpoints tests the case when the block service falls back to another from
+// BlockServiceCustomFallbackEndpoints in the absence of a given block.
+func TestRedirectFallbackEndpoints(t *testing.T) {
partitiontest.PartitionTest(t)
log := logging.TestingLog(t)
@@ -142,25 +143,23 @@ func TestRedirectFallbackArchiver(t *testing.T) {
net1 := &httpTestPeerSource{}
net2 := &httpTestPeerSource{}
- config := config.GetDefaultLocal()
- // Need to enable block service fallbacks
- config.EnableBlockServiceFallbackToArchiver = true
-
- bs1 := MakeBlockService(log, config, ledger1, net1, "test-genesis-ID")
- bs2 := MakeBlockService(log, config, ledger2, net2, "test-genesis-ID")
-
nodeA := &basicRPCNode{}
nodeB := &basicRPCNode{}
-
- nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
nodeA.start()
defer nodeA.stop()
-
- nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
nodeB.start()
defer nodeB.stop()
- net1.addPeer(nodeB.rootURL())
+ config := config.GetDefaultLocal()
+ // Set the first to a bad address, the second to self, and the third to the one that has the block.
+ // If RR is right, should succeed.
+ config.BlockServiceCustomFallbackEndpoints = fmt.Sprintf("://badaddress,%s,%s", nodeA.rootURL(), nodeB.rootURL())
+
+ bs1 := MakeBlockService(log, config, ledger1, net1, "test-genesis-ID")
+ bs2 := MakeBlockService(log, config, ledger2, net2, "test-genesis-ID")
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
require.NoError(t, err)
@@ -235,60 +234,8 @@ func TestBlockServiceShutdown(t *testing.T) {
<-requestDone
}
-// TestRedirectBasic tests the case when the block service redirects the request to elsewhere
-func TestRedirectFallbackEndpoints(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- log := logging.TestingLog(t)
-
- ledger1 := makeLedger(t, "l1")
- defer ledger1.Close()
- ledger2 := makeLedger(t, "l2")
- defer ledger2.Close()
- addBlock(t, ledger2)
-
- net1 := &httpTestPeerSource{}
- net2 := &httpTestPeerSource{}
-
- nodeA := &basicRPCNode{}
- nodeB := &basicRPCNode{}
- nodeA.start()
- defer nodeA.stop()
- nodeB.start()
- defer nodeB.stop()
-
- config := config.GetDefaultLocal()
- // Set the first to a bad address, the second to self, and the third to the one that has the block.
- // If RR is right, should succeed.
- config.BlockServiceCustomFallbackEndpoints = fmt.Sprintf("://badaddress,%s,%s", nodeA.rootURL(), nodeB.rootURL())
- bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}")
- bs2 := MakeBlockService(log, config, ledger2, net2, "{genesisID}")
-
- nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
- nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
-
- parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
- require.NoError(t, err)
-
- client := http.Client{}
-
- ctx := context.Background()
- parsedURL.Path = FormatBlockQuery(uint64(1), parsedURL.Path, net1)
- blockURL := parsedURL.String()
- request, err := http.NewRequest("GET", blockURL, nil)
- require.NoError(t, err)
- requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
- defer requestCancel()
- request = request.WithContext(requestCtx)
- network.SetUserAgentHeader(request.Header)
- response, err := client.Do(request)
- require.NoError(t, err)
-
- require.Equal(t, http.StatusOK, response.StatusCode)
-}
-
-// TestRedirectFallbackArchiver tests the case when the block service
-// fallback to another because its memory use it at capacity
+// TestRedirectOnFullCapacity tests the case when the block service
+// fallback to another because its memory use is at capacity
func TestRedirectOnFullCapacity(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -313,27 +260,31 @@ func TestRedirectOnFullCapacity(t *testing.T) {
net1 := &httpTestPeerSource{}
net2 := &httpTestPeerSource{}
- config := config.GetDefaultLocal()
- // Need to enable block service fallbacks
- config.EnableBlockServiceFallbackToArchiver = true
- bs1 := MakeBlockService(log1, config, ledger1, net1, "test-genesis-ID")
- bs2 := MakeBlockService(log2, config, ledger2, net2, "test-genesis-ID")
- // set the memory cap so that it can serve only 1 block at a time
- bs1.memoryCap = 250
- bs2.memoryCap = 250
-
nodeA := &basicRPCNode{}
nodeB := &basicRPCNode{}
-
- nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
nodeA.start()
defer nodeA.stop()
-
- nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
nodeB.start()
defer nodeB.stop()
- net1.addPeer(nodeB.rootURL())
+ configWithRedirects := config.GetDefaultLocal()
+
+ configWithRedirects.BlockServiceCustomFallbackEndpoints = nodeB.rootURL()
+
+ bs1 := MakeBlockService(log1, configWithRedirects, ledger1, net1, "test-genesis-ID")
+
+ // config with no redirects
+ configNoRedirects := config.GetDefaultLocal()
+ configNoRedirects.BlockServiceCustomFallbackEndpoints = ""
+
+ bs2 := MakeBlockService(log2, configNoRedirects, ledger2, net2, "test-genesis-ID")
+ // set the memory cap so that it can serve only 1 block at a time
+ bs1.memoryCap = 250
+ bs2.memoryCap = 250
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+
+ nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
require.NoError(t, err)
@@ -483,27 +434,36 @@ func TestWsBlockLimiting(t *testing.T) {
func TestRedirectExceptions(t *testing.T) {
partitiontest.PartitionTest(t)
- log := logging.TestingLog(t)
+ log1 := logging.TestingLog(t)
+ log2 := logging.TestingLog(t)
ledger1 := makeLedger(t, "l1")
+ ledger2 := makeLedger(t, "l2")
defer ledger1.Close()
+ defer ledger2.Close()
addBlock(t, ledger1)
net1 := &httpTestPeerSource{}
-
- config := config.GetDefaultLocal()
- // Need to enable block service fallbacks
- config.EnableBlockServiceFallbackToArchiver = true
-
- bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}")
+ net2 := &httpTestPeerSource{}
nodeA := &basicRPCNode{}
-
- nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeB := &basicRPCNode{}
nodeA.start()
defer nodeA.stop()
+ nodeB.start()
+ defer nodeB.stop()
+
+ configInvalidRedirects := config.GetDefaultLocal()
+ configInvalidRedirects.BlockServiceCustomFallbackEndpoints = "badAddress"
+
+ configWithRedirectToSelf := config.GetDefaultLocal()
+ configWithRedirectToSelf.BlockServiceCustomFallbackEndpoints = nodeB.rootURL()
- net1.peers = append(net1.peers, "invalidPeer")
+ bs1 := MakeBlockService(log1, configInvalidRedirects, ledger1, net1, "{genesisID}")
+ bs2 := MakeBlockService(log2, configWithRedirectToSelf, ledger2, net2, "{genesisID}")
+
+ nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1)
+ nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2)
parsedURL, err := network.ParseHostOrURL(nodeA.rootURL())
require.NoError(t, err)
@@ -515,7 +475,7 @@ func TestRedirectExceptions(t *testing.T) {
blockURL := parsedURL.String()
request, err := http.NewRequest("GET", blockURL, nil)
require.NoError(t, err)
- requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
+ requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(configInvalidRedirects.CatchupHTTPBlockFetchTimeoutSec)*time.Second)
defer requestCancel()
request = request.WithContext(requestCtx)
network.SetUserAgentHeader(request.Header)
@@ -524,8 +484,14 @@ func TestRedirectExceptions(t *testing.T) {
require.NoError(t, err)
require.Equal(t, response.StatusCode, http.StatusNotFound)
- net1.addPeer(nodeA.rootURL())
- _, err = client.Do(request)
+ parsedURLNodeB, err := network.ParseHostOrURL(nodeB.rootURL())
+ require.NoError(t, err)
+
+ parsedURLNodeB.Path = FormatBlockQuery(uint64(4), parsedURLNodeB.Path, net2)
+ blockURLNodeB := parsedURLNodeB.String()
+ requestNodeB, err := http.NewRequest("GET", blockURLNodeB, nil)
+ _, err = client.Do(requestNodeB)
+
require.Error(t, err)
require.Contains(t, err.Error(), "stopped after 10 redirects")
}
diff --git a/test/testdata/configs/config-v33.json b/test/testdata/configs/config-v33.json
index aa1cb7171..d9188ef74 100644
--- a/test/testdata/configs/config-v33.json
+++ b/test/testdata/configs/config-v33.json
@@ -43,8 +43,6 @@
"EnableAgreementTimeMetrics": false,
"EnableAssembleStats": false,
"EnableBlockService": false,
- "EnableBlockServiceFallbackToArchiver": false,
- "EnableCatchupFromArchiveServers": false,
"EnableDeveloperAPI": false,
"EnableExperimentalAPI": false,
"EnableFollowMode": false,