-
Notifications
You must be signed in to change notification settings - Fork 0
/
pooler.go
1484 lines (1238 loc) · 36.5 KB
/
pooler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package retrypool
import (
"context"
"errors"
"fmt"
"math"
"math/rand"
"runtime"
"sync"
"sync/atomic"
"time"
)
// Worker interface for task processing
type Worker[T any] interface {
Run(ctx context.Context, data T) error
}
// DeadTask struct to hold failed task information
type DeadTask[T any] struct {
Data T
Retries int
TotalDuration time.Duration
Errors []error
}
// TaskWrapper includes scheduledTime, triedWorkers, errors, durations, and panicOnTimeout
type TaskWrapper[T any] struct {
data T
retries int
totalDuration time.Duration
timeLimit time.Duration // Zero means no overall limit
maxDuration time.Duration // Max duration per attempt
scheduledTime time.Time // For delay between retries
triedWorkers map[int]bool // Track workers that have tried this task
errors []error // Track errors for each attempt
durations []time.Duration // Track duration for each attempt
ctx context.Context
cancel context.CancelFunc
immediateRetry bool
panicOnTimeout bool // Field to trigger panic on timeout
}
func (t *TaskWrapper[T]) Data() T {
return t.data
}
func (t *TaskWrapper[T]) Retries() int {
return t.retries
}
func (t *TaskWrapper[T]) TotalDuration() time.Duration {
return t.totalDuration
}
func (t *TaskWrapper[T]) TimeLimit() time.Duration {
return t.timeLimit
}
func (t *TaskWrapper[T]) ScheduledTime() time.Time {
return t.scheduledTime
}
func (t *TaskWrapper[T]) TriedWorkers() map[int]bool {
return t.triedWorkers
}
func (t *TaskWrapper[T]) Errors() []error {
return t.errors
}
func (t *TaskWrapper[T]) Durations() []time.Duration {
return t.durations
}
// taskQueue stores pointers to TaskWrapper
type taskQueue[T any] struct {
tasks []*TaskWrapper[T]
}
// Option type for configuring the Pool
type Option[T any] func(*Pool[T])
// TaskOption type for configuring individual tasks
type TaskOption[T any] func(*TaskWrapper[T])
// Config struct to hold retry configurations
type Config[T any] struct {
attempts int
attemptsForError map[error]int
delay time.Duration
maxDelay time.Duration
maxJitter time.Duration
onRetry OnRetryFunc[T]
retryIf RetryIfFunc
delayType DelayTypeFunc[T]
lastErrorOnly bool
context context.Context
timer Timer
onTaskSuccess OnTaskSuccessFunc[T] // Callback when a task succeeds
onTaskFailure OnTaskFailureFunc[T] // Callback when a task fails
onNewDeadTask OnNewDeadTaskFunc[T]
maxBackOffN uint
contextFunc ContextFunc
panicHandler PanicHandlerFunc[T]
panicWorker PanicWorker
}
// workerState holds all per-worker data
type workerState[T any] struct {
worker Worker[T]
stopChan chan struct{}
doneChan chan struct{}
cancel context.CancelFunc
ctx context.Context
forcePanicFlag *atomic.Bool
currentTask *TaskWrapper[T] // Field to track the current task
interrupted bool // New field to track if the worker has been interrupted
}
// Pool struct updated to include Config and support dynamic worker management
type Pool[T any] struct {
workers map[int]*workerState[T] // Map of workers with unique worker IDs
nextWorkerID int // Counter for assigning unique worker IDs
workersToRemove map[int]bool
taskQueues map[int]taskQueue[T]
processing int
mu sync.Mutex
cond *sync.Cond
wg sync.WaitGroup
stopped bool
closed atomic.Bool
ctx context.Context
deadTasks []DeadTask[T]
deadTasksMutex sync.Mutex
config Config[T]
timer Timer
}
// New initializes the Pool with given workers and options
func New[T any](ctx context.Context, workers []Worker[T], options ...Option[T]) *Pool[T] {
pool := &Pool[T]{
workers: make(map[int]*workerState[T]),
nextWorkerID: 0,
workersToRemove: make(map[int]bool),
taskQueues: make(map[int]taskQueue[T]),
config: newDefaultConfig[T](),
timer: &timerImpl{},
ctx: ctx,
}
for _, option := range options {
option(pool)
}
pool.cond = sync.NewCond(&pool.mu)
// Initialize workers with unique IDs
for _, worker := range workers {
pool.AddWorker(worker)
}
return pool
}
// AddWorker adds a new worker to the pool dynamically
func (p *Pool[T]) AddWorker(worker Worker[T]) int {
done := make(chan int)
go func() {
defer close(done)
p.mu.Lock()
defer p.mu.Unlock()
workerID := p.nextWorkerID
p.nextWorkerID++
var workerCtx context.Context
var workerCancel context.CancelFunc
if p.config.contextFunc != nil {
workerCtx = p.config.contextFunc()
} else {
workerCtx = p.ctx
}
workerCtx, workerCancel = context.WithCancel(workerCtx)
state := &workerState[T]{
worker: worker,
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
cancel: workerCancel,
ctx: workerCtx,
forcePanicFlag: new(atomic.Bool),
}
p.workers[workerID] = state
p.wg.Add(1)
go p.workerLoop(workerID)
done <- workerID
}()
return <-done
}
// Method to mark worker for removal
func (p *Pool[T]) RemovalWorker(workerID int) {
p.mu.Lock()
p.workersToRemove[workerID] = true
p.mu.Unlock()
}
// WorkerController interface provides methods to control workers
type WorkerController[T any] interface {
AddWorker(worker Worker[T]) int
RemovalWorker(workerID int)
RestartWorker(workerID int) error
InterruptWorker(workerID int, options ...WorkerInterruptOption) error
}
func (p *Pool[T]) RestartWorker(workerID int) error {
p.mu.Lock()
defer p.mu.Unlock()
state, exists := p.workers[workerID]
if !exists {
return fmt.Errorf("worker %d does not exist", workerID)
}
if !state.interrupted {
return fmt.Errorf("worker %d is not interrupted and cannot be restarted", workerID)
}
// Create a new context for the worker
workerCtx, workerCancel := context.WithCancel(p.ctx)
state.ctx = workerCtx
state.cancel = workerCancel
state.forcePanicFlag.Store(false)
state.interrupted = false
// Create a new stopChan and doneChan
state.stopChan = make(chan struct{})
state.doneChan = make(chan struct{})
// Start a new goroutine for the worker
p.wg.Add(1)
go p.workerLoop(workerID)
fmt.Printf("Worker %d has been restarted\n", workerID)
return nil
}
// RemoveWorker removes a worker from the pool
func (p *Pool[T]) RemoveWorker(workerID int) error {
p.mu.Lock()
state, exists := p.workers[workerID]
if !exists {
p.mu.Unlock()
return fmt.Errorf("worker %d does not exist", workerID)
}
// Cancel the worker's context
state.cancel()
// Close the worker's stop channel
close(state.stopChan)
// Signal the worker to wake up if it's waiting
p.cond.Broadcast()
doneChan := state.doneChan
p.mu.Unlock() // Release lock while waiting
const workerStopTimeout = 5 * time.Second
const workerForceStopTimeout = 5 * time.Second
// Wait for the worker to finish, with a timeout
select {
case <-doneChan:
// Worker has exited
case <-time.After(workerStopTimeout):
// Worker did not exit in time, force panic
p.mu.Lock()
state.forcePanicFlag.Store(true)
p.mu.Unlock()
// Signal the worker again
p.cond.Broadcast()
// Wait again for the worker to exit
select {
case <-doneChan:
// Worker has exited
case <-time.After(workerForceStopTimeout):
// Even after forcing panic, worker did not exit
return fmt.Errorf("worker %d did not exit after forced panic", workerID)
}
}
// Now safe to remove worker from the pool
p.mu.Lock()
delete(p.workers, workerID)
p.mu.Unlock()
// Requeue any tasks assigned to this worker
p.requeueTasksFromWorker(workerID)
return nil
}
// requeueTasksFromWorker reassigns tasks from the removed worker to other workers
func (p *Pool[T]) requeueTasksFromWorker(workerID int) {
p.mu.Lock()
defer p.mu.Unlock()
// Remove the worker's triedTasks from all tasks
for retries, queue := range p.taskQueues {
newTasks := make([]*TaskWrapper[T], 0, len(queue.tasks))
for _, task := range queue.tasks {
if task.triedWorkers != nil {
delete(task.triedWorkers, workerID)
}
newTasks = append(newTasks, task)
}
p.taskQueues[retries] = taskQueue[T]{tasks: newTasks}
}
// Signal workers that the task queues have changed
p.cond.Broadcast()
}
// workerLoop updated to handle scheduledTime, triedWorkers, and worker interruption
func (p *Pool[T]) workerLoop(workerID int) {
defer p.wg.Done()
p.mu.Lock()
state, exists := p.workers[workerID]
if !exists {
p.mu.Unlock()
return
}
stopChan := state.stopChan
doneChan := state.doneChan
ctx := state.ctx
p.mu.Unlock()
defer func() {
p.mu.Lock()
defer p.mu.Unlock()
if r := recover(); r != nil {
// log.Printf("Worker %d recovered from panic: %v\n", workerID, r)
// Capture the stack trace
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
stackTrace := string(buf[:n])
// Create a concise error message
err := fmt.Errorf("panic occurred in worker %d: %v", workerID, r)
if p.config.panicWorker != nil {
// Call the panic handler with the task, panic error, and stack trace
p.config.panicWorker(workerID, r, err, stackTrace)
}
}
select {
case <-doneChan: // Channel is already closed
default:
close(doneChan)
}
}()
for {
select {
case <-stopChan:
return
default:
}
p.mu.Lock()
for p.isAllQueuesEmpty() && !p.stopped {
p.cond.Wait()
// Check if context is canceled
if ctx.Err() != nil {
p.mu.Unlock()
return
}
}
if p.stopped && p.isAllQueuesEmpty() {
p.mu.Unlock()
return
}
// Check if context is canceled before proceeding
if ctx.Err() != nil {
p.mu.Unlock()
return
}
retries, idx, task, ok := p.getNextTask(workerID)
if !ok {
p.mu.Unlock()
continue
}
now := time.Now()
if now.Before(task.scheduledTime) {
waitDuration := task.scheduledTime.Sub(now)
p.mu.Unlock()
select {
case <-p.timer.After(waitDuration):
case <-p.ctx.Done():
return
}
continue
}
// Remove the task from the queue
q := p.taskQueues[retries]
q.tasks = append(q.tasks[:idx], q.tasks[idx+1:]...)
if len(q.tasks) == 0 {
delete(p.taskQueues, retries)
} else {
p.taskQueues[retries] = q
}
// Mark the task as tried by this worker
if task.triedWorkers == nil {
task.triedWorkers = make(map[int]bool)
}
task.triedWorkers[workerID] = true
// Set the current task
state.currentTask = task
p.processing++
p.mu.Unlock()
// Check if context is canceled before processing the task
if ctx.Err() != nil {
p.mu.Lock()
p.processing--
p.mu.Unlock()
return
}
p.runWorkerWithFailsafe(workerID, task)
p.mu.Lock()
if p.workersToRemove[workerID] {
delete(p.workersToRemove, workerID)
p.mu.Unlock()
go p.RemoveWorker(workerID)
return
}
// Unset the current task
state.currentTask = nil
if state.interrupted {
p.processing--
p.mu.Unlock()
return // Exit the loop if the worker was interrupted
}
p.processing--
p.cond.Signal()
p.mu.Unlock()
}
}
func (p *Pool[T]) enforceTimeLimit(cancelFunc context.CancelFunc, timeLimit, totalDuration time.Duration, ctx context.Context) {
if timeLimit <= 0 {
return
}
remainingTime := timeLimit - totalDuration
if remainingTime <= 0 {
cancelFunc()
return
}
select {
case <-p.timer.After(remainingTime):
cancelFunc() // This cancels the attempt's context
case <-ctx.Done():
}
}
// isAllQueuesEmpty checks if all task queues are empty
func (p *Pool[T]) isAllQueuesEmpty() bool {
for _, q := range p.taskQueues {
if len(q.tasks) > 0 {
return false
}
}
return true
}
// getNextTask returns the next task that the worker hasn't tried
func (p *Pool[T]) getNextTask(workerID int) (int, int, *TaskWrapper[T], bool) {
// First, check for immediate retry tasks this worker hasn't tried
for retries, q := range p.taskQueues {
for idx, task := range q.tasks {
if task.immediateRetry && !task.triedWorkers[workerID] {
return retries, idx, task, true
}
}
}
// Then, check for any task this worker hasn't tried
for retries, q := range p.taskQueues {
for idx, task := range q.tasks {
if task.triedWorkers == nil {
task.triedWorkers = make(map[int]bool)
}
if !task.triedWorkers[workerID] {
return retries, idx, task, true
}
}
}
// If all tasks have been tried by this worker, return no task
return 0, 0, nil, false
}
// RangeTasks iterates over all tasks in the pool, including those currently being processed.
// The callback function receives the task data and the worker ID (-1 if the task is in the queue).
// If the callback returns false, the iteration stops.
type TaskStatus int
const (
TaskStatusQueued TaskStatus = iota
TaskStatusProcessing
)
func (p *Pool[T]) RangeTasks(cb func(data T, workerID int, status TaskStatus) bool) bool {
p.mu.Lock()
defer p.mu.Unlock()
// Iterate over tasks currently being processed
for workerID, state := range p.workers {
if state.currentTask != nil {
if !cb(state.currentTask.data, workerID, TaskStatusProcessing) {
return false
}
}
}
// Iterate over tasks in the queues
for workerID, queue := range p.taskQueues {
for _, task := range queue.tasks {
if !cb(task.data, workerID, TaskStatusQueued) {
return false
}
}
}
return true
}
type WorkerInterruptOption func(*WorkerInterruptConfig)
type WorkerInterruptConfig struct {
RemoveWorker bool
RemoveTask bool
ReassignTask bool
ForcePanic bool
Restart bool
}
// Option to restart the worker after interruption
func WithRestart() WorkerInterruptOption {
return func(cfg *WorkerInterruptConfig) {
cfg.Restart = true
}
}
// Option to remove the worker after interruption.
func WithRemoveWorker() WorkerInterruptOption {
return func(cfg *WorkerInterruptConfig) {
cfg.RemoveWorker = true
}
}
// Option to remove the task the worker was processing.
func WithRemoveTask() WorkerInterruptOption {
return func(cfg *WorkerInterruptConfig) {
cfg.RemoveTask = true
}
}
// Option to reassign the task for retrying.
func WithReassignTask() WorkerInterruptOption {
return func(cfg *WorkerInterruptConfig) {
cfg.ReassignTask = true
}
}
// Option to force the worker to panic.
func WithForcePanic() WorkerInterruptOption {
return func(cfg *WorkerInterruptConfig) {
cfg.ForcePanic = true
}
}
// InterruptWorker cancels a worker's current task and optionally removes the worker.
// It can also force the worker to panic.
func (p *Pool[T]) InterruptWorker(workerID int, options ...WorkerInterruptOption) error {
p.mu.Lock()
defer p.mu.Unlock()
state, exists := p.workers[workerID]
if !exists {
return fmt.Errorf("worker %d does not exist", workerID)
}
// Apply options
cfg := &WorkerInterruptConfig{
RemoveWorker: false,
RemoveTask: false,
ReassignTask: false,
ForcePanic: false,
Restart: false,
}
for _, opt := range options {
opt(cfg)
}
// Set the forcePanicFlag if requested
if cfg.ForcePanic {
state.forcePanicFlag.Store(true)
}
// Cancel the worker's context to trigger the interrupt
state.cancel()
// Cancel the current task's context if it's running
task := state.currentTask
if task != nil {
task.cancel()
}
// Close the stopChan to signal the worker to stop
close(state.stopChan)
// Handle task options
if task != nil {
if cfg.RemoveTask {
// Task is already canceled and will not be retried
} else if cfg.ReassignTask {
// Reset the task's context
taskCtx, cancel := context.WithCancel(p.ctx)
task.ctx = taskCtx
task.cancel = cancel
task.triedWorkers = make(map[int]bool)
task.scheduledTime = time.Now()
// Add the task back to the taskQueues for the current retry count
q := p.taskQueues[task.retries]
q.tasks = append(q.tasks, task)
p.taskQueues[task.retries] = q
}
}
if cfg.RemoveWorker {
// delete(p.workers, workerID)
err := p.RemoveWorker(workerID)
if err != nil {
return fmt.Errorf("failed to remove worker %d: %v", workerID, err)
}
// fmt.Printf("Worker %d has been removed\n", workerID)
} else {
state.interrupted = true
// fmt.Printf("Worker %d has been interrupted\n", workerID)
if cfg.Restart {
p.mu.Unlock()
err := p.RestartWorker(workerID)
p.mu.Lock()
if err != nil {
return fmt.Errorf("failed to restart worker %d: %v", workerID, err)
}
}
}
p.cond.Broadcast() // Signal all workers
return nil
}
// runWorkerWithFailsafe updated to handle OnRetry, RetryIf, and callbacks
func (p *Pool[T]) runWorkerWithFailsafe(workerID int, task *TaskWrapper[T]) {
// Create attempt-specific context
attemptCtx, attemptCancel := context.WithCancel(task.ctx)
defer attemptCancel() // Ensure the context is canceled when done
// Wrap context with panicContext by default
attemptCtx = &panicContext{Context: attemptCtx}
// Wrap context with panicOnTimeoutContext if enabled
if task.panicOnTimeout {
attemptCtx = &panicOnTimeoutContext{Context: attemptCtx}
}
// Reset attempt-specific duration tracking
start := time.Now()
// If maxDuration is set, enforce it by cancelling the attempt's context when exceeded
if task.maxDuration > 0 {
go func() {
select {
case <-p.timer.After(task.maxDuration):
attemptCancel() // Cancel the attempt's context when maxDuration is exceeded
case <-attemptCtx.Done():
}
}()
}
// Enforce time limit for the overall task duration
if task.timeLimit > 0 {
// Capture the current total duration to avoid data races
p.mu.Lock()
currentTotalDuration := task.totalDuration
p.mu.Unlock()
// Pass necessary data to avoid accessing shared fields concurrently
go func() {
p.enforceTimeLimit(attemptCancel, task.timeLimit, currentTotalDuration, task.ctx)
}()
}
var err error
// Attempt to run the worker within a panic-catching function
func() {
defer func() {
if r := recover(); r != nil {
// Capture the stack trace
buf := make([]byte, 4096)
n := runtime.Stack(buf, false)
stackTrace := string(buf[:n])
// Create a concise error message
err = fmt.Errorf("panic occurred in worker %d: %v", workerID, r)
if p.config.panicHandler != nil {
// Call the panic handler with the task, panic error, and stack trace
p.config.panicHandler(task.Data(), r, stackTrace)
}
}
}()
// Attempt to run the worker
p.mu.Lock()
state, exists := p.workers[workerID]
p.mu.Unlock()
if !exists {
err = fmt.Errorf("worker %d does not exist", workerID)
return
}
err = state.worker.Run(attemptCtx, task.data)
}()
// Check for panicOnTimeoutError
var timeoutErr *panicOnTimeoutError
if errors.As(err, &timeoutErr) {
// Handle the timeout error
err = fmt.Errorf("worker %d stopped due to timeout: %w", workerID, timeoutErr)
}
duration := time.Since(start)
// Safely update shared fields
p.mu.Lock()
task.totalDuration += duration
task.durations = append(task.durations, duration)
p.mu.Unlock()
if err != nil {
p.mu.Lock()
task.errors = append(task.errors, err)
p.mu.Unlock()
if IsUnrecoverable(err) {
p.addToDeadTasks(task, err)
return
}
// Check if the error is due to time limit or max duration exceeded
if errors.Is(err, context.DeadlineExceeded) || (errors.Is(err, context.Canceled) && duration >= task.maxDuration) {
// Exceeded maxDuration for this attempt
p.requeueTask(task, fmt.Errorf("task exceeded max duration of %v for attempt", task.maxDuration), false)
return
}
var action DeadTaskAction = DeadTaskActionRetry // Default action
if p.config.onTaskFailure != nil {
p.mu.Lock()
state, exists := p.workers[workerID]
p.mu.Unlock()
if exists {
action = p.config.onTaskFailure(p, workerID, state.worker, task, err)
}
}
switch action {
case DeadTaskActionAddToDeadTasks:
p.addToDeadTasks(task, err)
case DeadTaskActionRetry:
if err != context.Canceled && p.config.retryIf(err) && task.retries < p.config.attempts {
p.config.onRetry(task.retries, err, task)
p.requeueTask(task, err, false)
} else {
p.addToDeadTasks(task, err)
}
case DeadTaskActionForceRetry:
p.config.onRetry(task.retries, err, task)
p.requeueTask(task, err, true)
case DeadTaskActionDoNothing:
// Do nothing, as requested
}
} else {
if p.config.onTaskSuccess != nil {
p.mu.Lock()
state, exists := p.workers[workerID]
p.mu.Unlock()
if exists {
p.config.onTaskSuccess(p, workerID, state.worker, task)
}
}
}
}
func IsUnrecoverable(err error) bool {
var unrecoverableErr unrecoverableError
return errors.As(err, &unrecoverableErr)
}
// requeueTask updated to handle delays and keep triedWorkers intact
func (p *Pool[T]) requeueTask(task *TaskWrapper[T], err error, forceRetry bool) {
p.mu.Lock()
defer p.mu.Unlock()
task.retries++
// Check if task has exceeded time limit
if task.timeLimit > 0 && task.totalDuration >= task.timeLimit {
p.addToDeadTasks(task, err)
return
}
// Reset the per-attempt duration for the next attempt
task.durations = nil
// Check if max attempts reached (unless unlimited retries)
if !forceRetry && p.config.attempts != UnlimitedAttempts && task.retries >= p.config.attempts {
p.addToDeadTasks(task, err)
return
}
// Calculate delay before next retry
delay := p.calculateDelay(task.retries, err)
task.scheduledTime = time.Now().Add(delay)
if !task.immediateRetry {
// Randomly select a worker that hasn't tried this task
availableWorkers := make([]int, 0)
for workerID := range p.workers {
if !task.triedWorkers[workerID] {
availableWorkers = append(availableWorkers, workerID)
}
}
var selectedWorkerID int
if len(availableWorkers) > 0 {
selectedWorkerID = availableWorkers[rand.Intn(len(availableWorkers))]
} else {
// If all workers have tried, reset triedWorkers and select a random worker
task.triedWorkers = make(map[int]bool)
workerIDs := make([]int, 0, len(p.workers))
for workerID := range p.workers {
workerIDs = append(workerIDs, workerID)
}
selectedWorkerID = workerIDs[rand.Intn(len(workerIDs))]
}
q := p.taskQueues[selectedWorkerID]
q.tasks = append(q.tasks, task) // Put at the back of the queue
p.taskQueues[selectedWorkerID] = q
} else {
// Immediate retry
if len(task.triedWorkers) < len(p.workers) {
// Find a worker that hasn't tried this task
for workerID := range p.workers {
if !task.triedWorkers[workerID] {
q := p.taskQueues[workerID]
q.tasks = append([]*TaskWrapper[T]{task}, q.tasks...) // Put at the front of the queue
p.taskQueues[workerID] = q
break
}
}
} else {
// All workers have tried, reset triedWorkers and put at the back of a random worker's queue
task.triedWorkers = make(map[int]bool)
workerIDs := make([]int, 0, len(p.workers))
for workerID := range p.workers {
workerIDs = append(workerIDs, workerID)
}
randomWorkerID := workerIDs[rand.Intn(len(workerIDs))]
q := p.taskQueues[randomWorkerID]
q.tasks = append(q.tasks, task) // Put at the back of the queue
p.taskQueues[randomWorkerID] = q
}
}
p.cond.Broadcast() // Signal all workers
}
// calculateDelay calculates delay based on DelayType
func (p *Pool[T]) calculateDelay(n int, err error) time.Duration {
delayTime := p.config.delayType(n, err, &p.config)
if p.config.maxDelay > 0 && delayTime > p.config.maxDelay {
delayTime = p.config.maxDelay
}
return delayTime
}
// addToDeadTasks adds task to dead tasks list
func (p *Pool[T]) addToDeadTasks(task *TaskWrapper[T], finalError error) {
p.deadTasksMutex.Lock()
defer p.deadTasksMutex.Unlock()
totalDuration := task.totalDuration
for _, duration := range task.durations {
totalDuration += duration
}
errors := make([]error, len(task.errors))
copy(errors, task.errors)
if finalError != nil && (len(errors) == 0 || finalError.Error() != errors[len(errors)-1].Error()) {
errors = append(errors, finalError)
}
deadTask := DeadTask[T]{
Data: task.data,
Retries: task.retries,
TotalDuration: totalDuration,
Errors: errors,
}
p.deadTasks = append(p.deadTasks, deadTask)
if p.config.onNewDeadTask != nil {
p.config.onNewDeadTask(&deadTask)
}
}
// PullDeadTask removes and returns a dead task from the pool
func (p *Pool[T]) PullDeadTask(idx int) (*DeadTask[T], error) {
p.deadTasksMutex.Lock()
defer p.deadTasksMutex.Unlock()
if idx < 0 || idx >= len(p.deadTasks) {
return nil, fmt.Errorf("invalid dead task index: %d", idx)
}
deadTask := p.deadTasks[idx]
p.deadTasks = append(p.deadTasks[:idx], p.deadTasks[idx+1:]...)
return &deadTask, nil
}
// Dispatch updated to accept TaskOptions
func (p *Pool[T]) Dispatch(data T, options ...TaskOption[T]) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.stopped {
return errors.New("pool is closed")
}
taskCtx, cancel := context.WithCancel(p.ctx)
task := &TaskWrapper[T]{
data: data,
retries: 0,
triedWorkers: make(map[int]bool),
errors: make([]error, 0),
durations: make([]time.Duration, 0),
ctx: taskCtx,
cancel: cancel,
immediateRetry: false,