summaryrefslogtreecommitdiff
path: root/ledger/catchpointfilewriter.go
blob: 140223c3ff4b5bf43fdd080cc25b5010bbe5c024 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
// Copyright (C) 2019-2023 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand.  If not, see <https://www.gnu.org/licenses/>.

package ledger

import (
	"archive/tar"
	"context"
	"fmt"
	"io"
	"os"
	"path/filepath"

	"github.com/algorand/go-algorand/ledger/encoded"
	"github.com/algorand/go-algorand/ledger/ledgercore"
	"github.com/algorand/go-algorand/ledger/store/trackerdb"
	"github.com/algorand/go-algorand/protocol"
)

const (
	// BalancesPerCatchpointFileChunk defines the number of accounts that would be stored in each chunk in the catchpoint file.
	// note that the last chunk would typically be less than this number.
	BalancesPerCatchpointFileChunk = 512

	// ResourcesPerCatchpointFileChunk defines the max number of resources that go in a singular chunk
	// 100,000 resources * 20KB/resource => roughly max 2GB per chunk if all of them are max'ed out apps.
	// In reality most entries are asset holdings, and they are very small.
	ResourcesPerCatchpointFileChunk = 100_000

	// SPContextPerCatchpointFile defines the maximum number of state proof verification data stored
	// in the catchpoint file.
	// (2 years * 31536000 seconds per year) / (256 rounds per state proof verification data * 3.6 seconds per round) ~= 70000
	SPContextPerCatchpointFile = 70000
)

// catchpointFileWriter is the struct managing the persistence of accounts data into the catchpoint file.
// it's designed to work in a step fashion : a caller will call the FileWriteStep method in a loop until
// the writing is complete. It might take multiple steps until the operation is over, and the caller
// has the option of throttling the CPU utilization in between the calls.
type catchpointFileWriter struct {
	ctx                  context.Context
	tx                   trackerdb.SnapshotScope
	filePath             string
	totalAccounts        uint64
	totalKVs             uint64
	file                 *os.File
	tar                  *tar.Writer
	compressor           io.WriteCloser
	chunk                catchpointFileChunkV6
	chunkNum             uint64
	writtenBytes         int64
	biggestChunkLen      uint64
	accountsIterator     accountsBatchIter
	maxResourcesPerChunk int
	accountsDone         bool
	kvRows               kvIter
}

type kvIter interface {
	Next() bool
	KeyValue() ([]byte, []byte, error)
	Close()
}

type accountsBatchIter interface {
	Next(ctx context.Context, accountCount int, resourceCount int) ([]encoded.BalanceRecordV6, uint64, error)
	Close()
}

type catchpointFileBalancesChunkV5 struct {
	_struct  struct{}                  `codec:",omitempty,omitemptyarray"`
	Balances []encoded.BalanceRecordV5 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
}

type catchpointFileChunkV6 struct {
	_struct struct{} `codec:",omitempty,omitemptyarray"`

	Balances    []encoded.BalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
	numAccounts uint64
	KVs         []encoded.KVRecordV6 `codec:"kv,allocbound=BalancesPerCatchpointFileChunk"`
}

func (chunk catchpointFileChunkV6) empty() bool {
	return len(chunk.Balances) == 0 && len(chunk.KVs) == 0
}

type catchpointStateProofVerificationContext struct {
	_struct struct{}                                   `codec:",omitempty,omitemptyarray"`
	Data    []ledgercore.StateProofVerificationContext `codec:"spd,allocbound=SPContextPerCatchpointFile"`
}

func (data catchpointStateProofVerificationContext) ToBeHashed() (protocol.HashID, []byte) {
	return protocol.StateProofVerCtx, protocol.Encode(&data)
}

func makeCatchpointFileWriter(ctx context.Context, filePath string, tx trackerdb.SnapshotScope, maxResourcesPerChunk int) (*catchpointFileWriter, error) {
	aw, err := tx.MakeAccountsReader()
	if err != nil {
		return nil, err
	}

	totalAccounts, err := aw.TotalAccounts(ctx)
	if err != nil {
		return nil, err
	}

	totalKVs, err := aw.TotalKVs(ctx)
	if err != nil {
		return nil, err
	}

	err = os.MkdirAll(filepath.Dir(filePath), 0700)
	if err != nil {
		return nil, err
	}
	file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, 0644)
	if err != nil {
		return nil, err
	}
	compressor, err := catchpointStage1Encoder(file)
	if err != nil {
		return nil, err
	}
	tar := tar.NewWriter(compressor)

	res := &catchpointFileWriter{
		ctx:                  ctx,
		tx:                   tx,
		filePath:             filePath,
		totalAccounts:        totalAccounts,
		totalKVs:             totalKVs,
		file:                 file,
		compressor:           compressor,
		tar:                  tar,
		accountsIterator:     tx.MakeEncodedAccoutsBatchIter(),
		maxResourcesPerChunk: maxResourcesPerChunk,
	}
	return res, nil
}

func (cw *catchpointFileWriter) Abort() error {
	cw.accountsIterator.Close()
	cw.tar.Close()
	cw.compressor.Close()
	cw.file.Close()
	return os.Remove(cw.filePath)
}

func (cw *catchpointFileWriter) FileWriteSPVerificationContext(encodedData []byte) error {
	err := cw.tar.WriteHeader(&tar.Header{
		Name: catchpointSPVerificationFileName,
		Mode: 0600,
		Size: int64(len(encodedData)),
	})

	if err != nil {
		return err
	}

	_, err = cw.tar.Write(encodedData)
	if err != nil {
		return err
	}

	if chunkLen := uint64(len(encodedData)); cw.biggestChunkLen < chunkLen {
		cw.biggestChunkLen = chunkLen
	}

	return nil
}

// FileWriteStep works for a short period of time (determined by stepCtx) to get
// some more data (accounts/resources/kvpairs) by using readDatabaseStep, and
// write that data to the open tar file in cw.tar.  The writing is done in
// asyncWriter, so that it can proceed concurrently with reading the data from
// the db. asyncWriter only runs long enough to process the data read during a
// single call to FileWriteStep, and FileWriteStep ensures that asyncWriter has finished
// writing by waiting for it in a defer block, collecting any errors that may
// have occurred during writing.  Therefore, FileWriteStep looks like a simple
// synchronous function to its callers.
func (cw *catchpointFileWriter) FileWriteStep(stepCtx context.Context) (more bool, err error) {
	// have we timed-out / canceled by that point ?
	if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
		return
	}

	writerRequest := make(chan catchpointFileChunkV6, 1)
	writerResponse := make(chan error, 2)
	go cw.asyncWriter(writerRequest, writerResponse, cw.chunkNum)
	defer func() {
		// For simplicity, all cleanup is done once, here. The writerRequest is
		// closed, signaling asyncWriter that it can exit, and then
		// writerResponse is drained, ensuring any problems from asyncWriter are
		// noted (and that the writing is done).
		close(writerRequest)

		// drain the writerResponse queue
		for {
			writerError, open := <-writerResponse
			if open {
				err = writerError
			} else {
				break
			}
		}
		if !more {
			// If we're done, close up the tar file and report on size
			cw.tar.Close()
			cw.compressor.Close()
			cw.file.Close()
			fileInfo, statErr := os.Stat(cw.filePath)
			if statErr != nil {
				err = statErr
			}
			cw.writtenBytes = fileInfo.Size()

			// These don't HAVE to be closed, since the "owning" tx will be committed/rolledback
			cw.accountsIterator.Close()
			if cw.kvRows != nil {
				cw.kvRows.Close()
				cw.kvRows = nil
			}
		}
	}()

	for {
		// have we timed-out or been canceled ?
		if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
			return
		}

		if cw.chunk.empty() {
			err = cw.readDatabaseStep(cw.ctx)
			if err != nil {
				return
			}
			// readDatabaseStep yielded nothing, we're done
			if cw.chunk.empty() {
				return false, nil
			}
		}

		// have we timed-out or been canceled ?
		if more, err = hasContextDeadlineExceeded(stepCtx); more || err != nil {
			return
		}

		// check if we had any error on the writer from previous iterations.
		// this should not be required for correctness, since we'll find the
		// error in the defer block. But this might notice earlier.
		select {
		case err := <-writerResponse:
			return false, err
		default:
		}

		// send the chunk to the asyncWriter channel
		cw.chunkNum++
		writerRequest <- cw.chunk
		// indicate that we need a readDatabaseStep
		cw.chunk = catchpointFileChunkV6{}
	}
}

func (cw *catchpointFileWriter) asyncWriter(chunks chan catchpointFileChunkV6, response chan error, chunkNum uint64) {
	defer close(response)
	for chk := range chunks {
		chunkNum++
		if chk.empty() {
			break
		}
		encodedChunk := protocol.Encode(&chk)
		err := cw.tar.WriteHeader(&tar.Header{
			Name: fmt.Sprintf(catchpointBalancesFileNameTemplate, chunkNum),
			Mode: 0600,
			Size: int64(len(encodedChunk)),
		})
		if err != nil {
			response <- err
			break
		}
		_, err = cw.tar.Write(encodedChunk)
		if err != nil {
			response <- err
			break
		}
		if chunkLen := uint64(len(encodedChunk)); cw.biggestChunkLen < chunkLen {
			cw.biggestChunkLen = chunkLen
		}
	}
}

// readDatabaseStep places the next chunk of records into cw.chunk. It yields
// all of the account chunks first, and then the kv chunks. Even if the accounts
// are evenly divisible by BalancesPerCatchpointFileChunk, it must not return an
// empty chunk between accounts and kvs.
func (cw *catchpointFileWriter) readDatabaseStep(ctx context.Context) error {
	if !cw.accountsDone {
		balances, numAccounts, err := cw.accountsIterator.Next(ctx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
		if err != nil {
			return err
		}
		if len(balances) > 0 {
			cw.chunk = catchpointFileChunkV6{Balances: balances, numAccounts: numAccounts}
			return nil
		}
		// It might seem reasonable, but do not close accountsIterator here,
		// else it will start over on the next iteration
		// cw.accountsIterator.Close()
		cw.accountsDone = true
	}

	// Create the *Rows iterator JIT
	if cw.kvRows == nil {
		rows, err := cw.tx.MakeKVsIter(ctx)
		if err != nil {
			return err
		}
		cw.kvRows = rows
	}

	kvrs := make([]encoded.KVRecordV6, 0, BalancesPerCatchpointFileChunk)
	for cw.kvRows.Next() {
		k, v, err := cw.kvRows.KeyValue()
		if err != nil {
			return err
		}
		kvrs = append(kvrs, encoded.KVRecordV6{Key: k, Value: v})
		if len(kvrs) == BalancesPerCatchpointFileChunk {
			break
		}
	}
	cw.chunk = catchpointFileChunkV6{KVs: kvrs}
	return nil
}

// hasContextDeadlineExceeded examine the given context and see if it was canceled or timed-out.
// if it has timed out, the function returns contextExceeded=true and contextError = nil.
// if it's a non-timeout error, the functions returns contextExceeded=false and contextError = error.
// otherwise, the function returns the contextExceeded=false and contextError = nil.
func hasContextDeadlineExceeded(ctx context.Context) (contextExceeded bool, contextError error) {
	// have we timed-out / canceled by that point ?
	select {
	case <-ctx.Done():
		contextError = ctx.Err()
		if contextError == context.DeadlineExceeded {
			contextExceeded = true
			contextError = nil
			return
		}
	default:
	}
	return
}