summaryrefslogtreecommitdiff
path: root/util/rateLimit_test.go
blob: fd7a031408bf72f22791e20ee82dabfeab1d0536 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
// Copyright (C) 2019-2023 Algorand, Inc.
// This file is part of go-algorand
//
// go-algorand is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// go-algorand is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with go-algorand.  If not, see <https://www.gnu.org/licenses/>.

package util

import (
	"testing"
	"time"

	"github.com/algorand/go-algorand/test/partitiontest"
	"github.com/stretchr/testify/assert"
)

type mockClient string

type mockCongestionControl struct{}

func (cg mockCongestionControl) Start()                            {}
func (cg mockCongestionControl) Stop()                             {}
func (cg mockCongestionControl) Consumed(c ErlClient, t time.Time) {}
func (cg mockCongestionControl) Served(t time.Time)                {}
func (cg mockCongestionControl) ShouldDrop(c ErlClient) bool       { return true }

func (c mockClient) OnClose(func()) {
	return
}

func TestNewElasticRateLimiter(t *testing.T) {
	partitiontest.PartitionTest(t)
	erl := NewElasticRateLimiter(100, 10, time.Second, nil)

	assert.Equal(t, len(erl.sharedCapacity), 100)
	assert.Equal(t, len(erl.capacityByClient), 0)
}

func TestElasticRateLimiterCongestionControlled(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	erl := NewElasticRateLimiter(3, 2, time.Second, nil)
	// give the ERL a congestion controler with well defined behavior for testing
	erl.cm = mockCongestionControl{}

	_, err := erl.ConsumeCapacity(client)
	// because the ERL gives capacity to a reservation, and then asynchronously drains capacity from the share,
	// wait a moment before testing the size of the sharedCapacity
	time.Sleep(100 * time.Millisecond)
	assert.Equal(t, 1, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.NoError(t, err)

	erl.EnableCongestionControl()
	_, err = erl.ConsumeCapacity(client)
	assert.Equal(t, 0, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.NoError(t, err)

	_, err = erl.ConsumeCapacity(client)
	assert.Equal(t, 0, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.Error(t, err)

	erl.DisableCongestionControl()
	_, err = erl.ConsumeCapacity(client)
	assert.Equal(t, 0, len(erl.capacityByClient[client]))
	assert.Equal(t, 0, len(erl.sharedCapacity))
	assert.NoError(t, err)
}

func TestReservations(t *testing.T) {
	partitiontest.PartitionTest(t)
	client1 := mockClient("client1")
	client2 := mockClient("client2")
	erl := NewElasticRateLimiter(4, 1, time.Second, nil)

	_, err := erl.ConsumeCapacity(client1)
	// because the ERL gives capacity to a reservation, and then asynchronously drains capacity from the share,
	// wait a moment before testing the size of the sharedCapacity
	time.Sleep(100 * time.Millisecond)
	assert.Equal(t, 1, len(erl.capacityByClient))
	assert.NoError(t, err)

	_, err = erl.ConsumeCapacity(client2)
	// because the ERL gives capacity to a reservation, and then asynchronously drains capacity from the share,
	// wait a moment before testing the size of the sharedCapacity
	time.Sleep(100 * time.Millisecond)
	assert.Equal(t, 2, len(erl.capacityByClient))
	assert.NoError(t, err)

	erl.closeReservation(client1)
	assert.Equal(t, 1, len(erl.capacityByClient))
	erl.closeReservation(client2)
	assert.Equal(t, 0, len(erl.capacityByClient))
}

// When there is no reservation per client, the reservation map is not used
// This is so we never wait on a capacity queue which would not ever vend
func TestZeroSizeReservations(t *testing.T) {
	partitiontest.PartitionTest(t)
	client1 := mockClient("client1")
	client2 := mockClient("client2")
	erl := NewElasticRateLimiter(4, 0, time.Second, nil)

	_, err := erl.ConsumeCapacity(client1)
	time.Sleep(100 * time.Millisecond)
	assert.Equal(t, 0, len(erl.capacityByClient))
	assert.NoError(t, err)

	_, err = erl.ConsumeCapacity(client2)
	time.Sleep(100 * time.Millisecond)
	assert.Equal(t, 0, len(erl.capacityByClient))
	assert.NoError(t, err)

	erl.closeReservation(client1)
	assert.Equal(t, 0, len(erl.capacityByClient))
	erl.closeReservation(client2)
	assert.Equal(t, 0, len(erl.capacityByClient))
}

func TestConsumeReleaseCapacity(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	erl := NewElasticRateLimiter(4, 3, time.Second, nil)

	c1, err := erl.ConsumeCapacity(client)
	// because the ERL gives capacity to a reservation, and then asynchronously drains capacity from the share,
	// wait a moment before testing the size of the sharedCapacity
	time.Sleep(100 * time.Millisecond)
	assert.Equal(t, 2, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.NoError(t, err)

	_, err = erl.ConsumeCapacity(client)
	assert.Equal(t, 1, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.NoError(t, err)

	_, err = erl.ConsumeCapacity(client)
	assert.Equal(t, 0, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.NoError(t, err)

	// remember this capacity, as it is a shared capacity
	c4, err := erl.ConsumeCapacity(client)
	assert.Equal(t, 0, len(erl.capacityByClient[client]))
	assert.Equal(t, 0, len(erl.sharedCapacity))
	assert.NoError(t, err)

	_, err = erl.ConsumeCapacity(client)
	assert.Equal(t, 0, len(erl.capacityByClient[client]))
	assert.Equal(t, 0, len(erl.sharedCapacity))
	assert.Error(t, err)

	// now release the capacity and observe the items return to the correct places
	err = c1.Release()
	assert.Equal(t, 1, len(erl.capacityByClient[client]))
	assert.Equal(t, 0, len(erl.sharedCapacity))
	assert.NoError(t, err)

	// now release the capacity and observe the items return to the correct places
	err = c4.Release()
	assert.Equal(t, 1, len(erl.capacityByClient[client]))
	assert.Equal(t, 1, len(erl.sharedCapacity))
	assert.NoError(t, err)

}

func TestREDCongestionManagerShouldDrop(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	other := mockClient("other")
	red := NewREDCongestionManager(time.Second*10, 10000)
	// calculate the target rate every request for most accurate results
	red.targetRateRefreshTicks = 1
	red.Start()
	// indicate that the arrival rate is essentially 1/s
	for i := 0; i < 10; i++ {
		red.Consumed(client, time.Now())
	}
	// indicate that the service rate is essentially 0.9/s
	for i := 0; i < 9; i++ {
		red.Served(time.Now())
	}
	// allow the statistics to catch up before asserting
	time.Sleep(100 * time.Millisecond)
	// the service rate should be 0.9/s, and the arrival rate for this client should be 1/s
	// for this reason, it should always drop the message
	for i := 0; i < 100; i++ {
		assert.True(t, red.ShouldDrop(client))
	}
	// this caller hasn't consumed any capacity before, so it won't need to drop
	for i := 0; i < 10; i++ {
		assert.False(t, red.ShouldDrop(other))
	}
	// allow the congestion manager to consume and process the given messages
	time.Sleep(100 * time.Millisecond)
	red.Stop()
	assert.Equal(t, 10, len(*red.consumedByClient[client]))
	assert.Equal(t, float64(1), red.arrivalRateFor(red.consumedByClient[client]))
	assert.Equal(t, 0.0, red.arrivalRateFor(red.consumedByClient[other]))
	assert.Equal(t, 0.9, red.targetRate)
}

func TestREDCongestionManagerShouldntDrop(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	red := NewREDCongestionManager(time.Second*10, 10000)
	// calculate the target rate every request for most accurate results
	red.targetRateRefreshTicks = 1
	red.Start()

	// indicate that the arrival rate is essentially 0.1/s!
	red.Consumed(client, time.Now())

	// drive 10k messages, in batches of 500, with 100ms sleeps
	for i := 0; i < 20; i++ {
		for j := 0; j < 500; j++ {
			red.Served(time.Now())
		}
		time.Sleep(100 * time.Millisecond)
	}
	// the service rate should be 1000/s, and the arrival rate for this client should be 0.1/s
	// for this reason, shouldDrop should almost certainly return false (true only 1/100k times)
	for i := 0; i < 10; i++ {
		assert.False(t, red.ShouldDrop(client))
	}
	// allow the congestion manager to consume and process the given messages
	time.Sleep(1000 * time.Millisecond)
	red.Stop()
	assert.Equal(t, 1, len(*red.consumedByClient[client]))
	assert.Equal(t, 10000, len(red.serves))
	assert.Equal(t, 0.1, red.arrivalRateFor(red.consumedByClient[client]))
	assert.Equal(t, float64(1000), red.targetRate)
}

func TestREDCongestionManagerTargetRate(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	red := NewREDCongestionManager(time.Second*10, 10000)
	red.Start()
	red.Consumed(client, time.Now())
	red.Consumed(client, time.Now())
	red.Consumed(client, time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	// allow the congestion manager to consume and process the given messages
	time.Sleep(100 * time.Millisecond)
	red.Stop()
	assert.Equal(t, 0.3, red.arrivalRateFor(red.consumedByClient[client]))
	assert.Equal(t, 0.3, red.targetRate)
}

func TestREDCongestionManagerPrune(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	red := NewREDCongestionManager(time.Second*10, 10000)
	red.Start()
	red.Consumed(client, time.Now().Add(-11*time.Second))
	red.Consumed(client, time.Now().Add(-11*time.Second))
	red.Consumed(client, time.Now().Add(-11*time.Second))
	red.Consumed(client, time.Now())
	red.Served(time.Now().Add(-11 * time.Second))
	red.Served(time.Now().Add(-11 * time.Second))
	red.Served(time.Now().Add(-11 * time.Second))
	red.Served(time.Now())
	// allow the congestion manager to consume and process the given messages
	time.Sleep(100 * time.Millisecond)
	red.Stop()
	assert.Equal(t, 0.1, red.arrivalRateFor(red.consumedByClient[client]))
	assert.Equal(t, 0.1, red.targetRate)
}

func TestREDCongestionManagerStopStart(t *testing.T) {
	partitiontest.PartitionTest(t)
	client := mockClient("client")
	red := NewREDCongestionManager(time.Second*10, 10000)
	red.Start()
	red.Consumed(client, time.Now())
	red.Consumed(client, time.Now())
	red.Consumed(client, time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	// allow the congestion manager to consume and process the given messages
	time.Sleep(100 * time.Millisecond)
	red.Stop()
	assert.Equal(t, 0.3, red.arrivalRateFor(red.consumedByClient[client]))
	assert.Equal(t, 0.3, red.targetRate)
	// Do it all again, but with 2 calls instead of 3 and 4 serves instead of 3
	red.Start()
	red.Consumed(client, time.Now())
	red.Consumed(client, time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	red.Served(time.Now())
	// allow the congestion manager to consume and process the given messages
	time.Sleep(100 * time.Millisecond)
	red.Stop()
	assert.Equal(t, 0.2, red.arrivalRateFor(red.consumedByClient[client]))
	assert.Equal(t, 0.4, red.targetRate)
}