Restart zero worker if there is still work to do (#18658) (#18672)

* Restart zero worker if there is still work to do (#18658)

Backport #18658

It is possible for the zero worker to timeout before all the work is finished.
This may mean that work may take a long time to complete because a worker will only
be induced on repushing.

Also ensure that requested count is reset after pulls and push mirror sync requests and add some more trace logging to the queue push.

Fix #18607

Signed-off-by: Andrew Thornton <art27@cantab.net>

* Update modules/queue/workerpool.go
This commit is contained in:
zeripath 2022-02-08 21:28:21 +00:00 committed by GitHub
parent 8671602ba9
commit 38fc6c75f3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
2 changed files with 47 additions and 10 deletions

View file

@ -87,6 +87,20 @@ func (p *WorkerPool) Push(data Data) {
} }
} }
// HasNoWorkerScaling will return true if the queue has no workers, and has no worker boosting
func (p *WorkerPool) HasNoWorkerScaling() bool {
p.lock.Lock()
defer p.lock.Unlock()
return p.hasNoWorkerScaling()
}
func (p *WorkerPool) hasNoWorkerScaling() bool {
return p.numberOfWorkers == 0 && (p.boostTimeout == 0 || p.boostWorkers == 0 || p.maxNumberOfWorkers == 0)
}
// zeroBoost will add a temporary boost worker for a no worker queue
// p.lock must be locked at the start of this function BUT it will be unlocked by the end of this function
// (This is because addWorkers has to be called whilst unlocked)
func (p *WorkerPool) zeroBoost() { func (p *WorkerPool) zeroBoost() {
ctx, cancel := context.WithTimeout(p.baseCtx, p.boostTimeout) ctx, cancel := context.WithTimeout(p.baseCtx, p.boostTimeout)
mq := GetManager().GetManagedQueue(p.qid) mq := GetManager().GetManagedQueue(p.qid)
@ -277,6 +291,21 @@ func (p *WorkerPool) addWorkers(ctx context.Context, cancel context.CancelFunc,
p.cond.Broadcast() p.cond.Broadcast()
cancel() cancel()
} }
select {
case <-p.baseCtx.Done():
// Don't warn if the baseCtx is shutdown
default:
if p.hasNoWorkerScaling() {
log.Warn(
"Queue: %d is configured to be non-scaling and has no workers - this configuration is likely incorrect.", p.qid)
} else if p.numberOfWorkers == 0 && atomic.LoadInt64(&p.numInQueue) > 0 {
// OK there are no workers but... there's still work to be done -> Reboost
p.zeroBoost()
// p.lock will be unlocked by zeroBoost
return
}
}
p.lock.Unlock() p.lock.Unlock()
}() }()
} }

View file

@ -59,11 +59,13 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
handler := func(idx int, bean interface{}, limit int) error { handler := func(idx int, bean interface{}, limit int) error {
var item SyncRequest var item SyncRequest
var repo *repo_model.Repository
if m, ok := bean.(*repo_model.Mirror); ok { if m, ok := bean.(*repo_model.Mirror); ok {
if m.Repo == nil { if m.Repo == nil {
log.Error("Disconnected mirror found: %d", m.ID) log.Error("Disconnected mirror found: %d", m.ID)
return nil return nil
} }
repo = m.Repo
item = SyncRequest{ item = SyncRequest{
Type: PullMirrorType, Type: PullMirrorType,
RepoID: m.RepoID, RepoID: m.RepoID,
@ -73,6 +75,7 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
log.Error("Disconnected push-mirror found: %d", m.ID) log.Error("Disconnected push-mirror found: %d", m.ID)
return nil return nil
} }
repo = m.Repo
item = SyncRequest{ item = SyncRequest{
Type: PushMirrorType, Type: PushMirrorType,
RepoID: m.RepoID, RepoID: m.RepoID,
@ -89,17 +92,16 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
default: default:
} }
// Check if this request is already in the queue
has, err := mirrorQueue.Has(&item)
if err != nil {
return err
}
if has {
return nil
}
// Push to the Queue // Push to the Queue
if err := mirrorQueue.Push(&item); err != nil { if err := mirrorQueue.Push(&item); err != nil {
if err == queue.ErrAlreadyInQueue {
if item.Type == PushMirrorType {
log.Trace("PushMirrors for %-v already queued for sync", repo)
} else {
log.Trace("PullMirrors for %-v already queued for sync", repo)
}
return nil
}
return err return err
} }
@ -110,23 +112,29 @@ func Update(ctx context.Context, pullLimit, pushLimit int) error {
return nil return nil
} }
pullMirrorsRequested := 0
if pullLimit != 0 { if pullLimit != 0 {
requested = 0
if err := repo_model.MirrorsIterate(func(idx int, bean interface{}) error { if err := repo_model.MirrorsIterate(func(idx int, bean interface{}) error {
return handler(idx, bean, pullLimit) return handler(idx, bean, pullLimit)
}); err != nil && err != errLimit { }); err != nil && err != errLimit {
log.Error("MirrorsIterate: %v", err) log.Error("MirrorsIterate: %v", err)
return err return err
} }
pullMirrorsRequested, requested = requested, 0
} }
pushMirrorsRequested := 0
if pushLimit != 0 { if pushLimit != 0 {
requested = 0
if err := repo_model.PushMirrorsIterate(func(idx int, bean interface{}) error { if err := repo_model.PushMirrorsIterate(func(idx int, bean interface{}) error {
return handler(idx, bean, pushLimit) return handler(idx, bean, pushLimit)
}); err != nil && err != errLimit { }); err != nil && err != errLimit {
log.Error("PushMirrorsIterate: %v", err) log.Error("PushMirrorsIterate: %v", err)
return err return err
} }
pushMirrorsRequested, requested = requested, 0
} }
log.Trace("Finished: Update") log.Trace("Finished: Update: %d pull mirrors and %d push mirrors queued", pullMirrorsRequested, pushMirrorsRequested)
return nil return nil
} }