summaryrefslogtreecommitdiff
path: root/logging/collector.go
blob: 39203964c940d5a9c5b71c822b8ba0af6e258619 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
// Copyright (C) 2019-2023 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand.  If not, see <https://www.gnu.org/licenses/>.

package logging

import (
	"archive/tar"
	"compress/gzip"
	"fmt"
	"io"
	"os"
	"path"
	"path/filepath"
	"sync"

	"github.com/algorand/go-algorand/util/s3"
)

// CollectAndUploadData combines all of the data files that we want packaged up and uploaded
// for analysis and uploads the tarball to S3.
// dataDir: the node's data directory containing the files of interest
// bundleFilename: the name of the resulting tarball on S3
// targetFolder: the subfolder in the s3 bucket to store the file
func CollectAndUploadData(dataDir string, bundleFilename string, targetFolder string) <-chan error {
	errorChannel := make(chan error, 1)
	pipeReader, pipeWriter := io.Pipe()
	go func() {
		// Close the error channel to signal completion
		defer close(errorChannel)

		bucket := s3.GetS3UploadBucket()
		s3Session, err := s3.MakeS3SessionForUploadWithBucket(bucket)
		if err != nil {
			errorChannel <- err
			return
		}
		wg := sync.WaitGroup{}
		wg.Add(1)
		targetFilename := filepath.Join(targetFolder, path.Base(bundleFilename))
		go func() {
			fmt.Printf("Uploading to s3://%s/%s\n", bucket, targetFilename)
			err = s3Session.UploadFileStream(targetFilename, pipeReader)
			if err != nil {
				errorChannel <- err
			}
			pipeReader.Close()
			wg.Done()
		}()

		err = collectAndWrite(dataDir, pipeWriter)
		if err != nil {
			errorChannel <- err
		}
		// Close writer (our source) so reader knows there's no more data
		pipeWriter.Close()
		// Now wait for reader (S3 uploader) to finish uploading
		wg.Wait()
	}()
	return errorChannel
}

func collectAndWrite(datadir string, writer io.Writer) error {
	// set up the gzip writer
	gw := gzip.NewWriter(writer)
	defer gw.Close()
	tw := tar.NewWriter(gw)
	defer tw.Close()

	logPaths, err := filepath.Glob(path.Join(datadir, "node*.log"))
	if err != nil {
		return err
	}
	paths := make([]string, 0)
	paths = append(paths, logPaths...)

	logPaths, err = filepath.Glob(path.Join(datadir, "algod-*.log"))
	if err != nil {
		return err
	}
	paths = append(paths, logPaths...)

	logPaths, err = filepath.Glob(path.Join(datadir, "host*.log"))
	if err != nil {
		return err
	}
	paths = append(paths, logPaths...)

	cadaverPaths, err := filepath.Glob(path.Join(datadir, "agreement.cdv*"))
	if err != nil {
		return err
	}
	paths = append(paths, cadaverPaths...)

	// add each file as needed into the current tar archive
	for i := range paths {
		if err := addFile(tw, paths[i]); err != nil {
			return err
		}
	}
	return nil
}

func addFile(tw *tar.Writer, filePath string) error {
	file, err := os.Open(filePath)
	if err != nil {
		return err
	}
	defer file.Close()
	if stat, err := file.Stat(); err == nil {
		// now create the header as needed for this file within the tarball
		header := new(tar.Header)
		_, header.Name = filepath.Split(filePath)
		header.Size = stat.Size()
		header.Mode = int64(stat.Mode())
		header.ModTime = stat.ModTime()
		// write the header to the tarball archive
		if err := tw.WriteHeader(header); err != nil {
			return err
		}
		// copy the file data to the tarball
		if _, err := io.CopyN(tw, file, stat.Size()); err != nil {
			return err
		}
	}
	return nil
}