This repository has been archived by the owner on Nov 9, 2022. It is now read-only.
forked from contribsys/faktory_worker_go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
runner.go
226 lines (196 loc) · 4.73 KB
/
runner.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
package faktory_worker
import (
"encoding/json"
"fmt"
"math/rand"
"runtime"
"sort"
"time"
faktory "github.com/contribsys/faktory/client"
)
type lifecycleEventType int
const (
Startup lifecycleEventType = 1
Quiet lifecycleEventType = 2
Shutdown lifecycleEventType = 3
)
type NoHandlerError struct {
JobType string
}
func (s *NoHandlerError) Error() string {
return fmt.Sprintf("No handler registered for job type %s", s.JobType)
}
func heartbeat(mgr *Manager) {
mgr.shutdownWaiter.Add(1)
timer := time.NewTicker(10 * time.Second)
for {
select {
case <-timer.C:
// we don't care about errors, assume any network
// errors will heal eventually
err := mgr.with(func(c *faktory.Client) error {
data, err := c.Beat(mgr.state)
if err != nil || data == "" {
return err
}
var hash map[string]string
err = json.Unmarshal([]byte(data), &hash)
if err != nil {
return err
}
if state, ok := hash["state"]; ok && state != "" {
mgr.handleEvent(state)
}
return nil
})
if err != nil {
mgr.Logger.Error(fmt.Sprintf("heartbeat error: %v", err))
}
case <-mgr.done:
timer.Stop()
mgr.shutdownWaiter.Done()
return
}
}
}
func process(mgr *Manager, idx int) {
mgr.shutdownWaiter.Add(1)
// delay initial fetch randomly to prevent thundering herd.
// this will pause between 0 and 2B nanoseconds, i.e. 0-2 seconds
time.Sleep(time.Duration(rand.Int31()))
defer mgr.shutdownWaiter.Done()
for {
if mgr.state != "" {
return
}
// check for shutdown
select {
case <-mgr.done:
return
default:
}
err := processOne(mgr)
if err != nil {
mgr.Logger.Error(err)
if _, ok := err.(*NoHandlerError); !ok {
time.Sleep(1 * time.Second)
}
}
}
}
func processOne(mgr *Manager) error {
var job *faktory.Job
// explicit scopes to limit variable visibility
{
var e error
err := mgr.with(func(c *faktory.Client) error {
job, e = c.Fetch(mgr.queueList()...)
if e != nil {
return e
}
return nil
})
if err != nil {
return err
}
if job == nil {
return nil
}
}
perform := mgr.jobHandlers[job.Type]
if perform == nil {
je := &NoHandlerError{JobType: job.Type}
err := mgr.with(func(c *faktory.Client) error {
return c.Fail(job.Jid, je, nil)
})
if err != nil {
return err
}
return je
}
joberr := dispatch(mgr.middleware, jobContext(mgr.Pool, job), job, perform)
if joberr != nil {
// job errors are normal and expected, we don't return early from them
mgr.Logger.Errorf("Error running %s job %s: %v", job.Type, job.Jid, joberr)
}
for {
// we want to report the result back to Faktory.
// we stay in this loop until we successfully report.
err := mgr.with(func(c *faktory.Client) error {
if joberr != nil {
return c.Fail(job.Jid, joberr, nil)
} else {
return c.Ack(job.Jid)
}
})
if err == nil {
return nil
}
mgr.Logger.Error(err)
time.Sleep(1 * time.Second)
}
}
// expandWeightedQueues builds a slice of queues represented the number of times equal to their weights.
func expandWeightedQueues(queueWeights map[string]int) []string {
weightsTotal := 0
for _, queueWeight := range queueWeights {
weightsTotal += queueWeight
}
weightedQueues := make([]string, weightsTotal)
fillIndex := 0
for queue, nTimes := range queueWeights {
// Fill weightedQueues with queue n times
for idx := 0; idx < nTimes; idx++ {
weightedQueues[fillIndex] = queue
fillIndex++
}
}
// weightedQueues has to be stable so we can write tests
sort.Strings(weightedQueues)
return weightedQueues
}
func queueKeys(queues map[string]int) []string {
keys := make([]string, len(queues))
i := 0
for k := range queues {
keys[i] = k
i++
}
// queues has to be stable so we can write tests
sort.Strings(keys)
return keys
}
// shuffleQueues returns a copy of the slice with the elements shuffled.
func shuffleQueues(queues []string) []string {
wq := make([]string, len(queues))
copy(wq, queues)
rand.Shuffle(len(wq), func(i, j int) {
wq[i], wq[j] = wq[j], wq[i]
})
return wq
}
// uniqQueues returns a slice of length len, of the unique elements while maintaining order.
// The underlying array is modified to avoid allocating another one.
func uniqQueues(len int, queues []string) []string {
// Record the unique values and position.
pos := 0
uniqMap := make(map[string]int)
for _, v := range queues {
if _, ok := uniqMap[v]; !ok {
uniqMap[v] = pos
pos++
}
}
// Reuse the copied array, by updating the values.
for queue, position := range uniqMap {
queues[position] = queue
}
// Slice only what we need.
return queues[:len]
}
func dumpThreads(logg Logger) {
buf := make([]byte, 64*1024)
_ = runtime.Stack(buf, true)
logg.Info("FULL PROCESS THREAD DUMP:")
logg.Info(string(buf))
}