2019-11-21 11:32:02 -07:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
2022-11-27 11:20:29 -07:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-11-21 11:32:02 -07:00
|
|
|
|
|
|
|
package graceful
|
|
|
|
|
|
|
|
import (
|
2019-11-30 07:40:22 -07:00
|
|
|
"context"
|
2022-03-25 05:47:12 -07:00
|
|
|
"runtime/pprof"
|
2019-12-15 02:51:28 -07:00
|
|
|
"sync"
|
2019-11-21 11:32:02 -07:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
2019-11-30 07:40:22 -07:00
|
|
|
"code.gitea.io/gitea/modules/process"
|
2019-11-21 11:32:02 -07:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
|
|
|
)
|
|
|
|
|
|
|
|
type state uint8
|
|
|
|
|
|
|
|
const (
|
|
|
|
stateInit state = iota
|
|
|
|
stateRunning
|
|
|
|
stateShuttingDown
|
|
|
|
stateTerminate
|
|
|
|
)
|
|
|
|
|
Improve queue and logger context (#24924)
Before there was a "graceful function": RunWithShutdownFns, it's mainly
for some modules which doesn't support context.
The old queue system doesn't work well with context, so the old queues
need it.
After the queue refactoring, the new queue works with context well, so,
use Golang context as much as possible, the `RunWithShutdownFns` could
be removed (replaced by RunWithCancel for context cancel mechanism), the
related code could be simplified.
This PR also fixes some legacy queue-init problems, eg:
* typo : archiver: "unable to create codes indexer queue" => "unable to
create repo-archive queue"
* no nil check for failed queues, which causes unfriendly panic
After this PR, many goroutines could have better display name:
![image](https://github.com/go-gitea/gitea/assets/2114189/701b2a9b-8065-4137-aeaa-0bda2b34604a)
![image](https://github.com/go-gitea/gitea/assets/2114189/f1d5f50f-0534-40f0-b0be-f2c9daa5fe92)
2023-05-26 00:31:55 -07:00
|
|
|
type RunCanceler interface {
|
|
|
|
Run()
|
|
|
|
Cancel()
|
|
|
|
}
|
|
|
|
|
2022-08-20 23:50:27 -07:00
|
|
|
// There are some places that could inherit sockets:
|
2019-11-21 11:32:02 -07:00
|
|
|
//
|
|
|
|
// * HTTP or HTTPS main listener
|
2022-08-20 23:50:27 -07:00
|
|
|
// * HTTP or HTTPS install listener
|
2019-11-21 11:32:02 -07:00
|
|
|
// * HTTP redirection fallback
|
2022-08-20 23:50:27 -07:00
|
|
|
// * Builtin SSH listener
|
2019-11-21 11:32:02 -07:00
|
|
|
//
|
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>
![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)
![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)
![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)
</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-21 15:35:11 -07:00
|
|
|
// If you add a new place you must increment this number
|
2019-11-21 11:32:02 -07:00
|
|
|
// and add a function to call manager.InformCleanup if it's not going to be used
|
2020-10-19 14:03:08 -07:00
|
|
|
const numberOfServersToCreate = 4
|
2019-11-21 11:32:02 -07:00
|
|
|
|
|
|
|
// Manager represents the graceful server manager interface
|
2019-12-15 02:51:28 -07:00
|
|
|
var manager *Manager
|
|
|
|
|
|
|
|
var initOnce = sync.Once{}
|
|
|
|
|
|
|
|
// GetManager returns the Manager
|
|
|
|
func GetManager() *Manager {
|
|
|
|
InitManager(context.Background())
|
|
|
|
return manager
|
|
|
|
}
|
|
|
|
|
|
|
|
// InitManager creates the graceful manager in the provided context
|
|
|
|
func InitManager(ctx context.Context) {
|
|
|
|
initOnce.Do(func() {
|
|
|
|
manager = newGracefulManager(ctx)
|
|
|
|
|
|
|
|
// Set the process default context to the HammerContext
|
|
|
|
process.DefaultContext = manager.HammerContext()
|
|
|
|
})
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
|
Improve queue and logger context (#24924)
Before there was a "graceful function": RunWithShutdownFns, it's mainly
for some modules which doesn't support context.
The old queue system doesn't work well with context, so the old queues
need it.
After the queue refactoring, the new queue works with context well, so,
use Golang context as much as possible, the `RunWithShutdownFns` could
be removed (replaced by RunWithCancel for context cancel mechanism), the
related code could be simplified.
This PR also fixes some legacy queue-init problems, eg:
* typo : archiver: "unable to create codes indexer queue" => "unable to
create repo-archive queue"
* no nil check for failed queues, which causes unfriendly panic
After this PR, many goroutines could have better display name:
![image](https://github.com/go-gitea/gitea/assets/2114189/701b2a9b-8065-4137-aeaa-0bda2b34604a)
![image](https://github.com/go-gitea/gitea/assets/2114189/f1d5f50f-0534-40f0-b0be-f2c9daa5fe92)
2023-05-26 00:31:55 -07:00
|
|
|
// RunWithCancel helps to run a function with a custom context, the Cancel function will be called at shutdown
|
|
|
|
// The Cancel function should stop the Run function in predictable time.
|
|
|
|
func (g *Manager) RunWithCancel(rc RunCanceler) {
|
|
|
|
g.RunAtShutdown(context.Background(), rc.Cancel)
|
2019-11-30 07:40:22 -07:00
|
|
|
g.runningServerWaitGroup.Add(1)
|
|
|
|
defer g.runningServerWaitGroup.Done()
|
2020-05-14 17:06:00 -07:00
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
Improve queue and logger context (#24924)
Before there was a "graceful function": RunWithShutdownFns, it's mainly
for some modules which doesn't support context.
The old queue system doesn't work well with context, so the old queues
need it.
After the queue refactoring, the new queue works with context well, so,
use Golang context as much as possible, the `RunWithShutdownFns` could
be removed (replaced by RunWithCancel for context cancel mechanism), the
related code could be simplified.
This PR also fixes some legacy queue-init problems, eg:
* typo : archiver: "unable to create codes indexer queue" => "unable to
create repo-archive queue"
* no nil check for failed queues, which causes unfriendly panic
After this PR, many goroutines could have better display name:
![image](https://github.com/go-gitea/gitea/assets/2114189/701b2a9b-8065-4137-aeaa-0bda2b34604a)
![image](https://github.com/go-gitea/gitea/assets/2114189/f1d5f50f-0534-40f0-b0be-f2c9daa5fe92)
2023-05-26 00:31:55 -07:00
|
|
|
log.Critical("PANIC during RunWithCancel: %v\nStacktrace: %s", err, log.Stack(2))
|
2020-05-14 17:06:00 -07:00
|
|
|
g.doShutdown()
|
|
|
|
}
|
|
|
|
}()
|
Improve queue and logger context (#24924)
Before there was a "graceful function": RunWithShutdownFns, it's mainly
for some modules which doesn't support context.
The old queue system doesn't work well with context, so the old queues
need it.
After the queue refactoring, the new queue works with context well, so,
use Golang context as much as possible, the `RunWithShutdownFns` could
be removed (replaced by RunWithCancel for context cancel mechanism), the
related code could be simplified.
This PR also fixes some legacy queue-init problems, eg:
* typo : archiver: "unable to create codes indexer queue" => "unable to
create repo-archive queue"
* no nil check for failed queues, which causes unfriendly panic
After this PR, many goroutines could have better display name:
![image](https://github.com/go-gitea/gitea/assets/2114189/701b2a9b-8065-4137-aeaa-0bda2b34604a)
![image](https://github.com/go-gitea/gitea/assets/2114189/f1d5f50f-0534-40f0-b0be-f2c9daa5fe92)
2023-05-26 00:31:55 -07:00
|
|
|
rc.Run()
|
2019-11-30 07:40:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// RunWithShutdownContext takes a function that has a context to watch for shutdown.
|
|
|
|
// After the provided context is Done(), the main function must return once shutdown is complete.
|
|
|
|
// (Optionally the HammerContext may be obtained and waited for however, this should be avoided if possible.)
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) RunWithShutdownContext(run func(context.Context)) {
|
2019-11-30 07:40:22 -07:00
|
|
|
g.runningServerWaitGroup.Add(1)
|
|
|
|
defer g.runningServerWaitGroup.Done()
|
2020-05-14 17:06:00 -07:00
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
log.Critical("PANIC during RunWithShutdownContext: %v\nStacktrace: %s", err, log.Stack(2))
|
|
|
|
g.doShutdown()
|
|
|
|
}
|
|
|
|
}()
|
2022-03-25 05:47:12 -07:00
|
|
|
ctx := g.ShutdownContext()
|
|
|
|
pprof.SetGoroutineLabels(ctx) // We don't have a label to restore back to but I think this is fine
|
|
|
|
run(ctx)
|
2019-11-30 07:40:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// RunAtTerminate adds to the terminate wait group and creates a go-routine to run the provided function at termination
|
2021-05-15 07:22:26 -07:00
|
|
|
func (g *Manager) RunAtTerminate(terminate func()) {
|
2019-11-30 07:40:22 -07:00
|
|
|
g.terminateWaitGroup.Add(1)
|
2021-05-15 07:22:26 -07:00
|
|
|
g.lock.Lock()
|
|
|
|
defer g.lock.Unlock()
|
|
|
|
g.toRunAtTerminate = append(g.toRunAtTerminate,
|
|
|
|
func() {
|
|
|
|
defer g.terminateWaitGroup.Done()
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
log.Critical("PANIC during RunAtTerminate: %v\nStacktrace: %s", err, log.Stack(2))
|
|
|
|
}
|
|
|
|
}()
|
2019-11-30 07:40:22 -07:00
|
|
|
terminate()
|
2021-05-15 07:22:26 -07:00
|
|
|
})
|
2019-11-30 07:40:22 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// RunAtShutdown creates a go-routine to run the provided function at shutdown
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) RunAtShutdown(ctx context.Context, shutdown func()) {
|
2021-05-15 07:22:26 -07:00
|
|
|
g.lock.Lock()
|
|
|
|
defer g.lock.Unlock()
|
|
|
|
g.toRunAtShutdown = append(g.toRunAtShutdown,
|
|
|
|
func() {
|
|
|
|
defer func() {
|
|
|
|
if err := recover(); err != nil {
|
|
|
|
log.Critical("PANIC during RunAtShutdown: %v\nStacktrace: %s", err, log.Stack(2))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
shutdown()
|
2020-05-14 17:06:00 -07:00
|
|
|
}
|
2021-05-15 07:22:26 -07:00
|
|
|
})
|
2019-11-30 07:40:22 -07:00
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) doShutdown() {
|
2019-11-21 11:32:02 -07:00
|
|
|
if !g.setStateTransition(stateRunning, stateShuttingDown) {
|
2022-02-19 09:36:25 -07:00
|
|
|
g.DoImmediateHammer()
|
2019-11-21 11:32:02 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
g.lock.Lock()
|
2021-05-15 07:22:26 -07:00
|
|
|
g.shutdownCtxCancel()
|
2022-03-25 05:47:12 -07:00
|
|
|
atShutdownCtx := pprof.WithLabels(g.hammerCtx, pprof.Labels("graceful-lifecycle", "post-shutdown"))
|
|
|
|
pprof.SetGoroutineLabels(atShutdownCtx)
|
2021-05-15 07:22:26 -07:00
|
|
|
for _, fn := range g.toRunAtShutdown {
|
|
|
|
go fn()
|
|
|
|
}
|
2019-11-21 11:32:02 -07:00
|
|
|
g.lock.Unlock()
|
|
|
|
|
|
|
|
if setting.GracefulHammerTime >= 0 {
|
|
|
|
go g.doHammerTime(setting.GracefulHammerTime)
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
g.WaitForServers()
|
2019-11-30 07:40:22 -07:00
|
|
|
// Mop up any remaining unclosed events.
|
|
|
|
g.doHammerTime(0)
|
2019-11-21 11:32:02 -07:00
|
|
|
<-time.After(1 * time.Second)
|
|
|
|
g.doTerminate()
|
2019-12-15 02:51:28 -07:00
|
|
|
g.WaitForTerminate()
|
|
|
|
g.lock.Lock()
|
2022-03-25 05:47:12 -07:00
|
|
|
g.managerCtxCancel()
|
2019-12-15 02:51:28 -07:00
|
|
|
g.lock.Unlock()
|
2019-11-21 11:32:02 -07:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) doHammerTime(d time.Duration) {
|
2019-11-21 11:32:02 -07:00
|
|
|
time.Sleep(d)
|
2019-12-15 02:51:28 -07:00
|
|
|
g.lock.Lock()
|
2019-11-21 11:32:02 -07:00
|
|
|
select {
|
2021-05-15 07:22:26 -07:00
|
|
|
case <-g.hammerCtx.Done():
|
2019-11-21 11:32:02 -07:00
|
|
|
default:
|
|
|
|
log.Warn("Setting Hammer condition")
|
2021-05-15 07:22:26 -07:00
|
|
|
g.hammerCtxCancel()
|
2022-03-25 05:47:12 -07:00
|
|
|
atHammerCtx := pprof.WithLabels(g.terminateCtx, pprof.Labels("graceful-lifecycle", "post-hammer"))
|
|
|
|
pprof.SetGoroutineLabels(atHammerCtx)
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
2019-12-15 02:51:28 -07:00
|
|
|
g.lock.Unlock()
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) doTerminate() {
|
2019-11-21 11:32:02 -07:00
|
|
|
if !g.setStateTransition(stateShuttingDown, stateTerminate) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
g.lock.Lock()
|
2019-12-15 02:51:28 -07:00
|
|
|
select {
|
2021-05-15 07:22:26 -07:00
|
|
|
case <-g.terminateCtx.Done():
|
2019-12-15 02:51:28 -07:00
|
|
|
default:
|
|
|
|
log.Warn("Terminating")
|
2021-05-15 07:22:26 -07:00
|
|
|
g.terminateCtxCancel()
|
2022-03-25 05:47:12 -07:00
|
|
|
atTerminateCtx := pprof.WithLabels(g.managerCtx, pprof.Labels("graceful-lifecycle", "post-terminate"))
|
|
|
|
pprof.SetGoroutineLabels(atTerminateCtx)
|
|
|
|
|
2021-05-15 07:22:26 -07:00
|
|
|
for _, fn := range g.toRunAtTerminate {
|
|
|
|
go fn()
|
|
|
|
}
|
2019-12-15 02:51:28 -07:00
|
|
|
}
|
2019-11-21 11:32:02 -07:00
|
|
|
g.lock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsChild returns if the current process is a child of previous Gitea process
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) IsChild() bool {
|
2019-11-21 11:32:02 -07:00
|
|
|
return g.isChild
|
|
|
|
}
|
|
|
|
|
|
|
|
// IsShutdown returns a channel which will be closed at shutdown.
|
|
|
|
// The order of closure is IsShutdown, IsHammer (potentially), IsTerminate
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) IsShutdown() <-chan struct{} {
|
2021-05-15 07:22:26 -07:00
|
|
|
return g.shutdownCtx.Done()
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsHammer returns a channel which will be closed at hammer
|
|
|
|
// The order of closure is IsShutdown, IsHammer (potentially), IsTerminate
|
|
|
|
// Servers running within the running server wait group should respond to IsHammer
|
|
|
|
// if not shutdown already
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) IsHammer() <-chan struct{} {
|
2021-05-15 07:22:26 -07:00
|
|
|
return g.hammerCtx.Done()
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// IsTerminate returns a channel which will be closed at terminate
|
|
|
|
// The order of closure is IsShutdown, IsHammer (potentially), IsTerminate
|
|
|
|
// IsTerminate will only close once all running servers have stopped
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) IsTerminate() <-chan struct{} {
|
2021-05-15 07:22:26 -07:00
|
|
|
return g.terminateCtx.Done()
|
2019-11-21 11:32:02 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerDone declares a running server done and subtracts one from the
|
|
|
|
// running server wait group. Users probably do not want to call this
|
|
|
|
// and should use one of the RunWithShutdown* functions
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) ServerDone() {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.runningServerWaitGroup.Done()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForServers waits for all running servers to finish. Users should probably
|
|
|
|
// instead use AtTerminate or IsTerminate
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) WaitForServers() {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.runningServerWaitGroup.Wait()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WaitForTerminate waits for all terminating actions to finish.
|
|
|
|
// Only the main go-routine should use this
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) WaitForTerminate() {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.terminateWaitGroup.Wait()
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) getState() state {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.lock.RLock()
|
|
|
|
defer g.lock.RUnlock()
|
|
|
|
return g.state
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) setStateTransition(old, new state) bool {
|
2019-11-21 11:32:02 -07:00
|
|
|
if old != g.getState() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
g.lock.Lock()
|
|
|
|
if g.state != old {
|
|
|
|
g.lock.Unlock()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
g.state = new
|
|
|
|
g.lock.Unlock()
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) setState(st state) {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.lock.Lock()
|
|
|
|
defer g.lock.Unlock()
|
|
|
|
|
|
|
|
g.state = st
|
|
|
|
}
|
|
|
|
|
2022-08-20 23:50:27 -07:00
|
|
|
// InformCleanup tells the cleanup wait group that we have either taken a listener or will not be taking a listener.
|
|
|
|
// At the moment the total number of servers (numberOfServersToCreate) are pre-defined as a const before global init,
|
|
|
|
// so this function MUST be called if a server is not used.
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) InformCleanup() {
|
2019-11-21 11:32:02 -07:00
|
|
|
g.createServerWaitGroup.Done()
|
|
|
|
}
|
2019-12-15 02:51:28 -07:00
|
|
|
|
|
|
|
// Done allows the manager to be viewed as a context.Context, it returns a channel that is closed when the server is finished terminating
|
|
|
|
func (g *Manager) Done() <-chan struct{} {
|
2022-03-25 05:47:12 -07:00
|
|
|
return g.managerCtx.Done()
|
2019-12-15 02:51:28 -07:00
|
|
|
}
|
|
|
|
|
2021-05-15 07:22:26 -07:00
|
|
|
// Err allows the manager to be viewed as a context.Context done at Terminate
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) Err() error {
|
2022-03-25 05:47:12 -07:00
|
|
|
return g.managerCtx.Err()
|
2019-12-15 02:51:28 -07:00
|
|
|
}
|
|
|
|
|
2021-05-15 07:22:26 -07:00
|
|
|
// Value allows the manager to be viewed as a context.Context done at Terminate
|
2019-12-15 02:51:28 -07:00
|
|
|
func (g *Manager) Value(key interface{}) interface{} {
|
2022-03-25 05:47:12 -07:00
|
|
|
return g.managerCtx.Value(key)
|
2019-12-15 02:51:28 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Deadline returns nil as there is no fixed Deadline for the manager, it allows the manager to be viewed as a context.Context
|
|
|
|
func (g *Manager) Deadline() (deadline time.Time, ok bool) {
|
2022-03-25 05:47:12 -07:00
|
|
|
return g.managerCtx.Deadline()
|
2019-12-15 02:51:28 -07:00
|
|
|
}
|