-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmicrobatch_test.go
310 lines (266 loc) · 9.1 KB
/
microbatch_test.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
package microbatch
import (
"context"
"errors"
"reflect"
"testing"
"time"
)
func TestNewBatcher(t *testing.T) {
for _, tc := range [...]struct {
name string
config *BatcherConfig
nilProcessor bool
wantErr bool
}{
{`valid config`, &BatcherConfig{MaxSize: 10, FlushInterval: 50 * time.Millisecond, MaxConcurrency: 2}, false, false},
{`nil config`, nil, false, false},
{`max size disabled`, &BatcherConfig{MaxSize: -1}, false, false},
{`flush interval disabled`, &BatcherConfig{FlushInterval: -1}, false, false},
{`all flush options disabled`, &BatcherConfig{MaxSize: -1, FlushInterval: -1}, false, true},
{`nil processor`, nil, true, true},
} {
t.Run(tc.name, func(t *testing.T) {
defer checkNumGoroutines(time.Second * 3)(t) // should always clean up
defer func() {
if r := recover(); r != nil && !tc.wantErr {
t.Errorf(`unexpected panic: %v`, r)
}
}()
processor := func(ctx context.Context, jobs []any) error {
panic(`should not be called`)
}
if tc.nilProcessor {
processor = nil
}
batcher := NewBatcher(tc.config, processor)
if batcher == nil {
t.Error(`batcher should never be nil`)
} else {
defer batcher.Close()
}
if tc.wantErr {
t.Error(`should have errored`)
}
})
}
}
// should be checked first, for consistency of errors
func TestBatcher_Submit_ctxCancelGuarded(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
cancel()
if result, err := (*Batcher[any])(nil).Submit(ctx, nil); err != context.Canceled || result != nil {
t.Fatal(result, err)
}
}
// like the above, should be checked early, for consistency of errors (no specific error, currently)
func TestBatcher_Submit_batcherClosedGuarded(t *testing.T) {
batcher := NewBatcher(nil, func(ctx context.Context, jobs []any) error {
panic(`should not be called`)
})
if err := batcher.Close(); err != nil {
t.Fatal(err)
}
if result, err := batcher.Submit(context.Background(), nil); err != context.Canceled || result != nil {
t.Fatal(result, err)
}
}
type processorArgsAny struct {
ctx context.Context
jobs []any
}
// sets up two job in a batcher, with channels to control the BatchProcessor
func setupBlockedSubmit(t *testing.T) (_ *Batcher[any], processorInCh <-chan processorArgsAny, processorOutCh chan<- error) {
processorIn := make(chan processorArgsAny) // called BatchProcessor
processorOut := make(chan error) // unblock BatchProcessor
batcher := NewBatcher(
&BatcherConfig{MaxSize: 1, FlushInterval: 1, MaxConcurrency: 1},
func(ctx context.Context, jobs []any) error {
processorIn <- processorArgsAny{ctx, jobs}
return <-processorOut
},
)
// submit a job so we reach max concurrency
if result1, err := batcher.Submit(context.Background(), 1); err != nil || result1 == nil {
t.Fatal(result1, err)
}
// ensure it started as expected
<-processorIn
// submit another job, which should block the control loop
if result2, err := batcher.Submit(context.Background(), 2); err != nil || result2 == nil {
t.Fatal(result2, err)
}
// ensure the second job isn't yet running, as expected
time.Sleep(time.Millisecond * 20)
select {
case <-processorIn:
t.Fatal(`expected no second job to be running`)
default:
}
return batcher, processorIn, processorOut
}
// test cancellation of a job during Submit, before it is added to the batch
func TestBatcher_Submit_ctxCancel(t *testing.T) {
defer checkNumGoroutines(time.Second * 3)(t) // should always clean up
batcher, processorIn, processorOut := setupBlockedSubmit(t)
// submit a third job in the background - this is the thing we are actually testing
done := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
go func() {
defer close(done)
defer cancel()
if result3, err := batcher.Submit(ctx, 3); err != context.Canceled || result3 != nil {
t.Error(result3, err)
}
}()
// wait for a bit then check to see if our third job was actually blocked on Submit
time.Sleep(time.Millisecond * 30)
select {
case <-done:
t.Fatal(`expected third job to be blocked on Submit`)
default:
}
// cancel Submit, should unblock
cancel()
<-done
if t.Failed() {
t.FailNow()
}
// test succeeded (probably), we just need to clean up
processorOut <- nil
<-processorIn
processorOut <- nil
if err := batcher.Shutdown(context.Background()); err != nil {
t.Error(err)
}
}
// consolidated test logic for three variants of stopping (Shutdown, Shutdown canceled, Close)
func testShutdownCloseJobInProgress(t *testing.T, expectCanceled bool, expectedResult error, stopBatcher func(batcher *Batcher[any]) error) {
defer checkNumGoroutines(time.Second * 3)(t) // should always clean up
batcher, processorIn, processorOut := setupBlockedSubmit(t)
// submit a third job in the background
done := make(chan struct{})
ctx, cancel := context.WithCancel(context.Background())
go func() {
defer close(done)
defer cancel()
if result3, err := batcher.Submit(ctx, 3); err != context.Canceled || result3 != nil {
t.Error(result3, err)
}
}()
// wait for a bit then check to see if our third job was actually blocked on Submit
// note: long sleep because the expectCanceled assertion is racey by nature
time.Sleep(time.Millisecond * 300)
select {
case <-done:
t.Fatal(`expected third job to be blocked on Submit`)
default:
}
// start the shutdown process in the background...
out := make(chan error)
go func() {
out <- stopBatcher(batcher)
}()
// should immediately unblock our third job, which hasn't been submitted yet
<-done
// finish up with the first job, with an error just because
processorOut <- errors.New(`some error`)
// the context the second job receives should match the expected state
{
args := <-processorIn
if (args.ctx.Err() != nil) != expectCanceled {
t.Errorf(`expected context canceled = %v`, expectCanceled)
}
if !reflect.DeepEqual(args.jobs, []any{2}) {
t.Errorf(`expected jobs to be [2], got %v`, args.jobs)
}
}
// wait a bit and verify we are still waiting for shutdown to finish
time.Sleep(time.Millisecond * 30)
select {
case <-out:
t.Fatal(`expected shutdown to still be in progress`)
default:
}
// another error, doesn't affect the shutdown process though
processorOut <- errors.New(`some other error`)
// we should be done
if err := <-out; err != expectedResult {
t.Error(err)
}
}
// test Shutdown with a job in progress, including a blocked Submit + queued up batch
func TestBatcher_Shutdown_jobInProgress(t *testing.T) {
testShutdownCloseJobInProgress(t, false, nil, func(batcher *Batcher[any]) error {
return batcher.Shutdown(context.Background())
})
}
// test Shutdown with a job in progress, with cancellation, including a blocked Submit + queued up batch
func TestBatcher_Shutdown_jobInProgressCanceled(t *testing.T) {
testShutdownCloseJobInProgress(t, true, context.Canceled, func(batcher *Batcher[any]) error {
ctx, cancel := context.WithCancel(context.Background())
cancel()
return batcher.Shutdown(ctx)
})
}
// this is effectively identical to calling Shutdown with a canceled context
func TestBatcher_Close_jobInProgress(t *testing.T) {
testShutdownCloseJobInProgress(t, true, nil, func(batcher *Batcher[any]) error {
return batcher.Close()
})
}
func TestJobResult_Wait_contextCancel(t *testing.T) {
result := JobResult[any]{batch: &batcherState[any]{}}
ctx, cancel := context.WithCancel(context.Background())
cancel()
if err := result.Wait(ctx); err != context.Canceled {
t.Errorf(`expected context canceled, got %v`, err)
}
}
// basic test to ensure it flushes after the interval as expected (testing timing is painful)
func TestBatcher_flushInterval(t *testing.T) {
defer checkNumGoroutines(time.Second * 3)(t) // should always clean up
processorIn := make(chan processorArgsAny)
processorOut := make(chan error)
const flushInterval = 100 * time.Millisecond
batcher := NewBatcher(
&BatcherConfig{MaxSize: -1, FlushInterval: flushInterval, MaxConcurrency: -1},
func(ctx context.Context, jobs []any) error {
processorIn <- processorArgsAny{ctx, jobs}
return <-processorOut
},
)
// submit 5 jobs, then wait for it to be flushed
firstSubmitTime := time.Now()
var jobs []*JobResult[any]
for i := 0; i < 5; i++ {
result, err := batcher.Submit(context.Background(), i)
if err != nil || result == nil || result.Job != i {
t.Fatal(result, err)
}
jobs = append(jobs, result)
time.Sleep(time.Millisecond * 5) // just because
}
// wait for the batch
if args := <-processorIn; len(args.jobs) != 5 {
t.Errorf(`expected 5 jobs, got %d`, len(args.jobs))
}
// ensure it took at least the expected time, but not too much longer
if elapsed := time.Since(firstSubmitTime); elapsed < time.Millisecond*90 || elapsed > time.Second {
t.Errorf(`expected flush interval to be 50ms, got %s`, elapsed)
} else {
t.Logf(`interval delta: %s`, elapsed-flushInterval)
}
err := errors.New(`expected error`)
processorOut <- err
// ensure all jobs are done, and have our expected error
for _, job := range jobs {
if e := job.Wait(context.Background()); e != err {
t.Fatal(e)
}
}
// close the batcher, for cleanup purposes
if err := batcher.Close(); err != nil {
t.Error(err)
}
}