Restart zero worker if there is still work to do (#18658)

* Restart zero worker if there is still work to do

It is possible for the zero worker to timeout before all the work is finished.
This may mean that work may take a long time to complete because a worker will only
be induced on repushing.

Also ensure that requested count is reset after pulls and push mirror sync requests and add some more trace logging to the queue push.

Fix #18607

Signed-off-by: Andrew Thornton <art27@cantab.net>
tokarchuk/v1.17
zeripath 3 years ago committed by GitHub
parent 4d939845d2
commit df44017328
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 14
      modules/queue/workerpool.go
  2. 28
      services/mirror/mirror.go

@ -115,6 +115,9 @@ func (p *WorkerPool) hasNoWorkerScaling() bool {
return p.numberOfWorkers == 0 && (p.boostTimeout == 0 || p.boostWorkers == 0 || p.maxNumberOfWorkers == 0) return p.numberOfWorkers == 0 && (p.boostTimeout == 0 || p.boostWorkers == 0 || p.maxNumberOfWorkers == 0)
} }
// zeroBoost will add a temporary boost worker for a no worker queue
// p.lock must be locked at the start of this function BUT it will be unlocked by the end of this function
// (This is because addWorkers has to be called whilst unlocked)
func (p *WorkerPool) zeroBoost() { func (p *WorkerPool) zeroBoost() {
ctx, cancel := context.WithTimeout(p.baseCtx, p.boostTimeout) ctx, cancel := context.WithTimeout(p.baseCtx, p.boostTimeout)
mq := GetManager().GetManagedQueue(p.qid) mq := GetManager().GetManagedQueue(p.qid)
@ -316,6 +319,17 @@ func (p *WorkerPool) addWorkers(ctx context.Context, cancel context.CancelFunc,
} }
p.pause() p.pause()
} }
select {
case <-p.baseCtx.Done():
// this worker queue is shut-down don't reboost
default:
if p.numberOfWorkers == 0 && atomic.LoadInt64(&p.numInQueue) > 0 {
// OK there are no workers but... there's still work to be done -> Reboost
p.zeroBoost()
// p.lock will be unlocked by zeroBoost
return
}
}
p.lock.Unlock() p.lock.Unlock()
}() }()
} }

@ -59,11 +59,13 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
handler := func(idx int, bean interface{}, limit int) error { handler := func(idx int, bean interface{}, limit int) error {
var item SyncRequest var item SyncRequest
var repo *repo_model.Repository
if m, ok := bean.(*repo_model.Mirror); ok { if m, ok := bean.(*repo_model.Mirror); ok {
if m.Repo == nil { if m.Repo == nil {
log.Error("Disconnected mirror found: %d", m.ID) log.Error("Disconnected mirror found: %d", m.ID)
return nil return nil
} }
repo = m.Repo
item = SyncRequest{ item = SyncRequest{
Type: PullMirrorType, Type: PullMirrorType,
RepoID: m.RepoID, RepoID: m.RepoID,
@ -73,6 +75,7 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
log.Error("Disconnected push-mirror found: %d", m.ID) log.Error("Disconnected push-mirror found: %d", m.ID)
return nil return nil
} }
repo = m.Repo
item = SyncRequest{ item = SyncRequest{
Type: PushMirrorType, Type: PushMirrorType,
RepoID: m.RepoID, RepoID: m.RepoID,
@ -89,17 +92,16 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
default: default:
} }
// Check if this request is already in the queue
has, err := mirrorQueue.Has(&item)
if err != nil {
return err
}
if has {
return nil
}
// Push to the Queue // Push to the Queue
if err := mirrorQueue.Push(&item); err != nil { if err := mirrorQueue.Push(&item); err != nil {
if err == queue.ErrAlreadyInQueue {
if item.Type == PushMirrorType {
log.Trace("PushMirrors for %-v already queued for sync", repo)
} else {
log.Trace("PullMirrors for %-v already queued for sync", repo)
}
return nil
}
return err return err
} }
@ -110,23 +112,29 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
return nil return nil
} }
pullMirrorsRequested := 0
if pullLimit != 0 { if pullLimit != 0 {
requested = 0
if err := repo_model.MirrorsIterate(func(idx int, bean interface{}) error { if err := repo_model.MirrorsIterate(func(idx int, bean interface{}) error {
return handler(idx, bean, pullLimit) return handler(idx, bean, pullLimit)
}); err != nil && err != errLimit { }); err != nil && err != errLimit {
log.Error("MirrorsIterate: %v", err) log.Error("MirrorsIterate: %v", err)
return err return err
} }
pullMirrorsRequested, requested = requested, 0
} }
pushMirrorsRequested := 0
if pushLimit != 0 { if pushLimit != 0 {
requested = 0
if err := repo_model.PushMirrorsIterate(func(idx int, bean interface{}) error { if err := repo_model.PushMirrorsIterate(func(idx int, bean interface{}) error {
return handler(idx, bean, pushLimit) return handler(idx, bean, pushLimit)
}); err != nil && err != errLimit { }); err != nil && err != errLimit {
log.Error("PushMirrorsIterate: %v", err) log.Error("PushMirrorsIterate: %v", err)
return err return err
} }
pushMirrorsRequested, requested = requested, 0
} }
log.Trace("Finished: Update") log.Trace("Finished: Update: %d pull mirrors and %d push mirrors queued", pullMirrorsRequested, pushMirrorsRequested)
return nil return nil
} }

Loading…
Cancel
Save