Add Goroutine stack inspector to admin/monitor (#19207)
Continues on from #19202. Following the addition of pprof labels we can now more easily understand the relationship between a goroutine and the requests that spawn them. This PR takes advantage of the labels and adds a few others, then provides a mechanism for the monitoring page to query the pprof goroutine profile. The binary profile that results from this profile is immediately piped in to the google library for parsing this and then stack traces are formed for the goroutines. If the goroutine is within a context or has been created from a goroutine within a process context it will acquire the process description labels for that process. The goroutines are mapped with there associate pids and any that do not have an associated pid are placed in a group at the bottom as unbound. In this way we should be able to more easily examine goroutines that have been stuck. A manager command `gitea manager processes` is also provided that can export the processes (with or without stacktraces) to the command line. Signed-off-by: Andrew Thornton <art27@cantab.net>
This commit is contained in:
parent
9c349a4277
commit
c88547ce71
48 changed files with 1479 additions and 595 deletions
377
cmd/manager.go
377
cmd/manager.go
|
@ -10,7 +10,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
|
||||||
"code.gitea.io/gitea/modules/private"
|
"code.gitea.io/gitea/modules/private"
|
||||||
|
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
|
@ -27,6 +26,7 @@ var (
|
||||||
subcmdRestart,
|
subcmdRestart,
|
||||||
subcmdFlushQueues,
|
subcmdFlushQueues,
|
||||||
subcmdLogging,
|
subcmdLogging,
|
||||||
|
subCmdProcesses,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
subcmdShutdown = cli.Command{
|
subcmdShutdown = cli.Command{
|
||||||
|
@ -68,326 +68,38 @@ var (
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
defaultLoggingFlags = []cli.Flag{
|
subCmdProcesses = cli.Command{
|
||||||
|
Name: "processes",
|
||||||
|
Usage: "Display running processes within the current process",
|
||||||
|
Action: runProcesses,
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "flat",
|
||||||
|
Usage: "Show processes as flat table rather than as tree",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "no-system",
|
||||||
|
Usage: "Do not show system proceses",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "stacktraces",
|
||||||
|
Usage: "Show stacktraces",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "json",
|
||||||
|
Usage: "Output as json",
|
||||||
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "group, g",
|
Name: "cancel",
|
||||||
Usage: "Group to add logger to - will default to \"default\"",
|
Usage: "Process PID to cancel. (Only available for non-system processes.)",
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "name, n",
|
|
||||||
Usage: "Name of the new logger - will default to mode",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "level, l",
|
|
||||||
Usage: "Logging level for the new logger",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "stacktrace-level, L",
|
|
||||||
Usage: "Stacktrace logging level",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "flags, F",
|
|
||||||
Usage: "Flags for the logger",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "expression, e",
|
|
||||||
Usage: "Matching expression for the logger",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "prefix, p",
|
|
||||||
Usage: "Prefix for the logger",
|
|
||||||
}, cli.BoolFlag{
|
|
||||||
Name: "color",
|
|
||||||
Usage: "Use color in the logs",
|
|
||||||
}, cli.BoolFlag{
|
|
||||||
Name: "debug",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
subcmdLogging = cli.Command{
|
|
||||||
Name: "logging",
|
|
||||||
Usage: "Adjust logging commands",
|
|
||||||
Subcommands: []cli.Command{
|
|
||||||
{
|
|
||||||
Name: "pause",
|
|
||||||
Usage: "Pause logging (Gitea will buffer logs up to a certain point and will drop them after that point)",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "debug",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: runPauseLogging,
|
|
||||||
}, {
|
|
||||||
Name: "resume",
|
|
||||||
Usage: "Resume logging",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "debug",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: runResumeLogging,
|
|
||||||
}, {
|
|
||||||
Name: "release-and-reopen",
|
|
||||||
Usage: "Cause Gitea to release and re-open files used for logging",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "debug",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: runReleaseReopenLogging,
|
|
||||||
}, {
|
|
||||||
Name: "remove",
|
|
||||||
Usage: "Remove a logger",
|
|
||||||
ArgsUsage: "[name] Name of logger to remove",
|
|
||||||
Flags: []cli.Flag{
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "debug",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "group, g",
|
|
||||||
Usage: "Group to add logger to - will default to \"default\"",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Action: runRemoveLogger,
|
|
||||||
}, {
|
|
||||||
Name: "add",
|
|
||||||
Usage: "Add a logger",
|
|
||||||
Subcommands: []cli.Command{
|
|
||||||
{
|
|
||||||
Name: "console",
|
|
||||||
Usage: "Add a console logger",
|
|
||||||
Flags: append(defaultLoggingFlags,
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "stderr",
|
|
||||||
Usage: "Output console logs to stderr - only relevant for console",
|
|
||||||
}),
|
|
||||||
Action: runAddConsoleLogger,
|
|
||||||
}, {
|
|
||||||
Name: "file",
|
|
||||||
Usage: "Add a file logger",
|
|
||||||
Flags: append(defaultLoggingFlags, []cli.Flag{
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "filename, f",
|
|
||||||
Usage: "Filename for the logger - this must be set.",
|
|
||||||
}, cli.BoolTFlag{
|
|
||||||
Name: "rotate, r",
|
|
||||||
Usage: "Rotate logs",
|
|
||||||
}, cli.Int64Flag{
|
|
||||||
Name: "max-size, s",
|
|
||||||
Usage: "Maximum size in bytes before rotation",
|
|
||||||
}, cli.BoolTFlag{
|
|
||||||
Name: "daily, d",
|
|
||||||
Usage: "Rotate logs daily",
|
|
||||||
}, cli.IntFlag{
|
|
||||||
Name: "max-days, D",
|
|
||||||
Usage: "Maximum number of daily logs to keep",
|
|
||||||
}, cli.BoolTFlag{
|
|
||||||
Name: "compress, z",
|
|
||||||
Usage: "Compress rotated logs",
|
|
||||||
}, cli.IntFlag{
|
|
||||||
Name: "compression-level, Z",
|
|
||||||
Usage: "Compression level to use",
|
|
||||||
},
|
|
||||||
}...),
|
|
||||||
Action: runAddFileLogger,
|
|
||||||
}, {
|
|
||||||
Name: "conn",
|
|
||||||
Usage: "Add a net conn logger",
|
|
||||||
Flags: append(defaultLoggingFlags, []cli.Flag{
|
|
||||||
cli.BoolFlag{
|
|
||||||
Name: "reconnect-on-message, R",
|
|
||||||
Usage: "Reconnect to host for every message",
|
|
||||||
}, cli.BoolFlag{
|
|
||||||
Name: "reconnect, r",
|
|
||||||
Usage: "Reconnect to host when connection is dropped",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "protocol, P",
|
|
||||||
Usage: "Set protocol to use: tcp, unix, or udp (defaults to tcp)",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "address, a",
|
|
||||||
Usage: "Host address and port to connect to (defaults to :7020)",
|
|
||||||
},
|
|
||||||
}...),
|
|
||||||
Action: runAddConnLogger,
|
|
||||||
}, {
|
|
||||||
Name: "smtp",
|
|
||||||
Usage: "Add an SMTP logger",
|
|
||||||
Flags: append(defaultLoggingFlags, []cli.Flag{
|
|
||||||
cli.StringFlag{
|
|
||||||
Name: "username, u",
|
|
||||||
Usage: "Mail server username",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "password, P",
|
|
||||||
Usage: "Mail server password",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "host, H",
|
|
||||||
Usage: "Mail server host (defaults to: 127.0.0.1:25)",
|
|
||||||
}, cli.StringSliceFlag{
|
|
||||||
Name: "send-to, s",
|
|
||||||
Usage: "Email address(es) to send to",
|
|
||||||
}, cli.StringFlag{
|
|
||||||
Name: "subject, S",
|
|
||||||
Usage: "Subject header of sent emails",
|
|
||||||
},
|
|
||||||
}...),
|
|
||||||
Action: runAddSMTPLogger,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func runRemoveLogger(c *cli.Context) error {
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
group := c.String("group")
|
|
||||||
if len(group) == 0 {
|
|
||||||
group = log.DEFAULT
|
|
||||||
}
|
|
||||||
name := c.Args().First()
|
|
||||||
ctx, cancel := installSignals()
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
statusCode, msg := private.RemoveLogger(ctx, group, name)
|
|
||||||
switch statusCode {
|
|
||||||
case http.StatusInternalServerError:
|
|
||||||
return fail("InternalServerError", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(os.Stdout, msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAddSMTPLogger(c *cli.Context) error {
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
vals := map[string]interface{}{}
|
|
||||||
mode := "smtp"
|
|
||||||
if c.IsSet("host") {
|
|
||||||
vals["host"] = c.String("host")
|
|
||||||
} else {
|
|
||||||
vals["host"] = "127.0.0.1:25"
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.IsSet("username") {
|
|
||||||
vals["username"] = c.String("username")
|
|
||||||
}
|
|
||||||
if c.IsSet("password") {
|
|
||||||
vals["password"] = c.String("password")
|
|
||||||
}
|
|
||||||
|
|
||||||
if !c.IsSet("send-to") {
|
|
||||||
return fmt.Errorf("Some recipients must be provided")
|
|
||||||
}
|
|
||||||
vals["sendTos"] = c.StringSlice("send-to")
|
|
||||||
|
|
||||||
if c.IsSet("subject") {
|
|
||||||
vals["subject"] = c.String("subject")
|
|
||||||
} else {
|
|
||||||
vals["subject"] = "Diagnostic message from Gitea"
|
|
||||||
}
|
|
||||||
|
|
||||||
return commonAddLogger(c, mode, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAddConnLogger(c *cli.Context) error {
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
vals := map[string]interface{}{}
|
|
||||||
mode := "conn"
|
|
||||||
vals["net"] = "tcp"
|
|
||||||
if c.IsSet("protocol") {
|
|
||||||
switch c.String("protocol") {
|
|
||||||
case "udp":
|
|
||||||
vals["net"] = "udp"
|
|
||||||
case "unix":
|
|
||||||
vals["net"] = "unix"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if c.IsSet("address") {
|
|
||||||
vals["address"] = c.String("address")
|
|
||||||
} else {
|
|
||||||
vals["address"] = ":7020"
|
|
||||||
}
|
|
||||||
if c.IsSet("reconnect") {
|
|
||||||
vals["reconnect"] = c.Bool("reconnect")
|
|
||||||
}
|
|
||||||
if c.IsSet("reconnect-on-message") {
|
|
||||||
vals["reconnectOnMsg"] = c.Bool("reconnect-on-message")
|
|
||||||
}
|
|
||||||
return commonAddLogger(c, mode, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAddFileLogger(c *cli.Context) error {
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
vals := map[string]interface{}{}
|
|
||||||
mode := "file"
|
|
||||||
if c.IsSet("filename") {
|
|
||||||
vals["filename"] = c.String("filename")
|
|
||||||
} else {
|
|
||||||
return fmt.Errorf("filename must be set when creating a file logger")
|
|
||||||
}
|
|
||||||
if c.IsSet("rotate") {
|
|
||||||
vals["rotate"] = c.Bool("rotate")
|
|
||||||
}
|
|
||||||
if c.IsSet("max-size") {
|
|
||||||
vals["maxsize"] = c.Int64("max-size")
|
|
||||||
}
|
|
||||||
if c.IsSet("daily") {
|
|
||||||
vals["daily"] = c.Bool("daily")
|
|
||||||
}
|
|
||||||
if c.IsSet("max-days") {
|
|
||||||
vals["maxdays"] = c.Int("max-days")
|
|
||||||
}
|
|
||||||
if c.IsSet("compress") {
|
|
||||||
vals["compress"] = c.Bool("compress")
|
|
||||||
}
|
|
||||||
if c.IsSet("compression-level") {
|
|
||||||
vals["compressionLevel"] = c.Int("compression-level")
|
|
||||||
}
|
|
||||||
return commonAddLogger(c, mode, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAddConsoleLogger(c *cli.Context) error {
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
vals := map[string]interface{}{}
|
|
||||||
mode := "console"
|
|
||||||
if c.IsSet("stderr") && c.Bool("stderr") {
|
|
||||||
vals["stderr"] = c.Bool("stderr")
|
|
||||||
}
|
|
||||||
return commonAddLogger(c, mode, vals)
|
|
||||||
}
|
|
||||||
|
|
||||||
func commonAddLogger(c *cli.Context, mode string, vals map[string]interface{}) error {
|
|
||||||
if len(c.String("level")) > 0 {
|
|
||||||
vals["level"] = log.FromString(c.String("level")).String()
|
|
||||||
}
|
|
||||||
if len(c.String("stacktrace-level")) > 0 {
|
|
||||||
vals["stacktraceLevel"] = log.FromString(c.String("stacktrace-level")).String()
|
|
||||||
}
|
|
||||||
if len(c.String("expression")) > 0 {
|
|
||||||
vals["expression"] = c.String("expression")
|
|
||||||
}
|
|
||||||
if len(c.String("prefix")) > 0 {
|
|
||||||
vals["prefix"] = c.String("prefix")
|
|
||||||
}
|
|
||||||
if len(c.String("flags")) > 0 {
|
|
||||||
vals["flags"] = log.FlagsFromString(c.String("flags"))
|
|
||||||
}
|
|
||||||
if c.IsSet("color") {
|
|
||||||
vals["colorize"] = c.Bool("color")
|
|
||||||
}
|
|
||||||
group := "default"
|
|
||||||
if c.IsSet("group") {
|
|
||||||
group = c.String("group")
|
|
||||||
}
|
|
||||||
name := mode
|
|
||||||
if c.IsSet("name") {
|
|
||||||
name = c.String("name")
|
|
||||||
}
|
|
||||||
ctx, cancel := installSignals()
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
statusCode, msg := private.AddLogger(ctx, group, name, mode, vals)
|
|
||||||
switch statusCode {
|
|
||||||
case http.StatusInternalServerError:
|
|
||||||
return fail("InternalServerError", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(os.Stdout, msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runShutdown(c *cli.Context) error {
|
func runShutdown(c *cli.Context) error {
|
||||||
ctx, cancel := installSignals()
|
ctx, cancel := installSignals()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -433,47 +145,16 @@ func runFlushQueues(c *cli.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func runPauseLogging(c *cli.Context) error {
|
func runProcesses(c *cli.Context) error {
|
||||||
ctx, cancel := installSignals()
|
ctx, cancel := installSignals()
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
setup("manager", c.Bool("debug"))
|
setup("manager", c.Bool("debug"))
|
||||||
statusCode, msg := private.PauseLogging(ctx)
|
statusCode, msg := private.Processes(ctx, os.Stdout, c.Bool("flat"), c.Bool("no-system"), c.Bool("stacktraces"), c.Bool("json"), c.String("cancel"))
|
||||||
switch statusCode {
|
switch statusCode {
|
||||||
case http.StatusInternalServerError:
|
case http.StatusInternalServerError:
|
||||||
return fail("InternalServerError", msg)
|
return fail("InternalServerError", msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintln(os.Stdout, msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runResumeLogging(c *cli.Context) error {
|
|
||||||
ctx, cancel := installSignals()
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
statusCode, msg := private.ResumeLogging(ctx)
|
|
||||||
switch statusCode {
|
|
||||||
case http.StatusInternalServerError:
|
|
||||||
return fail("InternalServerError", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(os.Stdout, msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runReleaseReopenLogging(c *cli.Context) error {
|
|
||||||
ctx, cancel := installSignals()
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
setup("manager", c.Bool("debug"))
|
|
||||||
statusCode, msg := private.ReleaseReopenLogging(ctx)
|
|
||||||
switch statusCode {
|
|
||||||
case http.StatusInternalServerError:
|
|
||||||
return fail("InternalServerError", msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Fprintln(os.Stdout, msg)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
382
cmd/manager_logging.go
Normal file
382
cmd/manager_logging.go
Normal file
|
@ -0,0 +1,382 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/private"
|
||||||
|
"github.com/urfave/cli"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultLoggingFlags = []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "group, g",
|
||||||
|
Usage: "Group to add logger to - will default to \"default\"",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "name, n",
|
||||||
|
Usage: "Name of the new logger - will default to mode",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "level, l",
|
||||||
|
Usage: "Logging level for the new logger",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "stacktrace-level, L",
|
||||||
|
Usage: "Stacktrace logging level",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "flags, F",
|
||||||
|
Usage: "Flags for the logger",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "expression, e",
|
||||||
|
Usage: "Matching expression for the logger",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "prefix, p",
|
||||||
|
Usage: "Prefix for the logger",
|
||||||
|
}, cli.BoolFlag{
|
||||||
|
Name: "color",
|
||||||
|
Usage: "Use color in the logs",
|
||||||
|
}, cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
subcmdLogging = cli.Command{
|
||||||
|
Name: "logging",
|
||||||
|
Usage: "Adjust logging commands",
|
||||||
|
Subcommands: []cli.Command{
|
||||||
|
{
|
||||||
|
Name: "pause",
|
||||||
|
Usage: "Pause logging (Gitea will buffer logs up to a certain point and will drop them after that point)",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: runPauseLogging,
|
||||||
|
}, {
|
||||||
|
Name: "resume",
|
||||||
|
Usage: "Resume logging",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: runResumeLogging,
|
||||||
|
}, {
|
||||||
|
Name: "release-and-reopen",
|
||||||
|
Usage: "Cause Gitea to release and re-open files used for logging",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: runReleaseReopenLogging,
|
||||||
|
}, {
|
||||||
|
Name: "remove",
|
||||||
|
Usage: "Remove a logger",
|
||||||
|
ArgsUsage: "[name] Name of logger to remove",
|
||||||
|
Flags: []cli.Flag{
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "debug",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "group, g",
|
||||||
|
Usage: "Group to add logger to - will default to \"default\"",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Action: runRemoveLogger,
|
||||||
|
}, {
|
||||||
|
Name: "add",
|
||||||
|
Usage: "Add a logger",
|
||||||
|
Subcommands: []cli.Command{
|
||||||
|
{
|
||||||
|
Name: "console",
|
||||||
|
Usage: "Add a console logger",
|
||||||
|
Flags: append(defaultLoggingFlags,
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "stderr",
|
||||||
|
Usage: "Output console logs to stderr - only relevant for console",
|
||||||
|
}),
|
||||||
|
Action: runAddConsoleLogger,
|
||||||
|
}, {
|
||||||
|
Name: "file",
|
||||||
|
Usage: "Add a file logger",
|
||||||
|
Flags: append(defaultLoggingFlags, []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "filename, f",
|
||||||
|
Usage: "Filename for the logger - this must be set.",
|
||||||
|
}, cli.BoolTFlag{
|
||||||
|
Name: "rotate, r",
|
||||||
|
Usage: "Rotate logs",
|
||||||
|
}, cli.Int64Flag{
|
||||||
|
Name: "max-size, s",
|
||||||
|
Usage: "Maximum size in bytes before rotation",
|
||||||
|
}, cli.BoolTFlag{
|
||||||
|
Name: "daily, d",
|
||||||
|
Usage: "Rotate logs daily",
|
||||||
|
}, cli.IntFlag{
|
||||||
|
Name: "max-days, D",
|
||||||
|
Usage: "Maximum number of daily logs to keep",
|
||||||
|
}, cli.BoolTFlag{
|
||||||
|
Name: "compress, z",
|
||||||
|
Usage: "Compress rotated logs",
|
||||||
|
}, cli.IntFlag{
|
||||||
|
Name: "compression-level, Z",
|
||||||
|
Usage: "Compression level to use",
|
||||||
|
},
|
||||||
|
}...),
|
||||||
|
Action: runAddFileLogger,
|
||||||
|
}, {
|
||||||
|
Name: "conn",
|
||||||
|
Usage: "Add a net conn logger",
|
||||||
|
Flags: append(defaultLoggingFlags, []cli.Flag{
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "reconnect-on-message, R",
|
||||||
|
Usage: "Reconnect to host for every message",
|
||||||
|
}, cli.BoolFlag{
|
||||||
|
Name: "reconnect, r",
|
||||||
|
Usage: "Reconnect to host when connection is dropped",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "protocol, P",
|
||||||
|
Usage: "Set protocol to use: tcp, unix, or udp (defaults to tcp)",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "address, a",
|
||||||
|
Usage: "Host address and port to connect to (defaults to :7020)",
|
||||||
|
},
|
||||||
|
}...),
|
||||||
|
Action: runAddConnLogger,
|
||||||
|
}, {
|
||||||
|
Name: "smtp",
|
||||||
|
Usage: "Add an SMTP logger",
|
||||||
|
Flags: append(defaultLoggingFlags, []cli.Flag{
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "username, u",
|
||||||
|
Usage: "Mail server username",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "password, P",
|
||||||
|
Usage: "Mail server password",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "host, H",
|
||||||
|
Usage: "Mail server host (defaults to: 127.0.0.1:25)",
|
||||||
|
}, cli.StringSliceFlag{
|
||||||
|
Name: "send-to, s",
|
||||||
|
Usage: "Email address(es) to send to",
|
||||||
|
}, cli.StringFlag{
|
||||||
|
Name: "subject, S",
|
||||||
|
Usage: "Subject header of sent emails",
|
||||||
|
},
|
||||||
|
}...),
|
||||||
|
Action: runAddSMTPLogger,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func runRemoveLogger(c *cli.Context) error {
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
group := c.String("group")
|
||||||
|
if len(group) == 0 {
|
||||||
|
group = log.DEFAULT
|
||||||
|
}
|
||||||
|
name := c.Args().First()
|
||||||
|
ctx, cancel := installSignals()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
statusCode, msg := private.RemoveLogger(ctx, group, name)
|
||||||
|
switch statusCode {
|
||||||
|
case http.StatusInternalServerError:
|
||||||
|
return fail("InternalServerError", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stdout, msg)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAddSMTPLogger(c *cli.Context) error {
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
vals := map[string]interface{}{}
|
||||||
|
mode := "smtp"
|
||||||
|
if c.IsSet("host") {
|
||||||
|
vals["host"] = c.String("host")
|
||||||
|
} else {
|
||||||
|
vals["host"] = "127.0.0.1:25"
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.IsSet("username") {
|
||||||
|
vals["username"] = c.String("username")
|
||||||
|
}
|
||||||
|
if c.IsSet("password") {
|
||||||
|
vals["password"] = c.String("password")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !c.IsSet("send-to") {
|
||||||
|
return fmt.Errorf("Some recipients must be provided")
|
||||||
|
}
|
||||||
|
vals["sendTos"] = c.StringSlice("send-to")
|
||||||
|
|
||||||
|
if c.IsSet("subject") {
|
||||||
|
vals["subject"] = c.String("subject")
|
||||||
|
} else {
|
||||||
|
vals["subject"] = "Diagnostic message from Gitea"
|
||||||
|
}
|
||||||
|
|
||||||
|
return commonAddLogger(c, mode, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAddConnLogger(c *cli.Context) error {
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
vals := map[string]interface{}{}
|
||||||
|
mode := "conn"
|
||||||
|
vals["net"] = "tcp"
|
||||||
|
if c.IsSet("protocol") {
|
||||||
|
switch c.String("protocol") {
|
||||||
|
case "udp":
|
||||||
|
vals["net"] = "udp"
|
||||||
|
case "unix":
|
||||||
|
vals["net"] = "unix"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if c.IsSet("address") {
|
||||||
|
vals["address"] = c.String("address")
|
||||||
|
} else {
|
||||||
|
vals["address"] = ":7020"
|
||||||
|
}
|
||||||
|
if c.IsSet("reconnect") {
|
||||||
|
vals["reconnect"] = c.Bool("reconnect")
|
||||||
|
}
|
||||||
|
if c.IsSet("reconnect-on-message") {
|
||||||
|
vals["reconnectOnMsg"] = c.Bool("reconnect-on-message")
|
||||||
|
}
|
||||||
|
return commonAddLogger(c, mode, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAddFileLogger(c *cli.Context) error {
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
vals := map[string]interface{}{}
|
||||||
|
mode := "file"
|
||||||
|
if c.IsSet("filename") {
|
||||||
|
vals["filename"] = c.String("filename")
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("filename must be set when creating a file logger")
|
||||||
|
}
|
||||||
|
if c.IsSet("rotate") {
|
||||||
|
vals["rotate"] = c.Bool("rotate")
|
||||||
|
}
|
||||||
|
if c.IsSet("max-size") {
|
||||||
|
vals["maxsize"] = c.Int64("max-size")
|
||||||
|
}
|
||||||
|
if c.IsSet("daily") {
|
||||||
|
vals["daily"] = c.Bool("daily")
|
||||||
|
}
|
||||||
|
if c.IsSet("max-days") {
|
||||||
|
vals["maxdays"] = c.Int("max-days")
|
||||||
|
}
|
||||||
|
if c.IsSet("compress") {
|
||||||
|
vals["compress"] = c.Bool("compress")
|
||||||
|
}
|
||||||
|
if c.IsSet("compression-level") {
|
||||||
|
vals["compressionLevel"] = c.Int("compression-level")
|
||||||
|
}
|
||||||
|
return commonAddLogger(c, mode, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runAddConsoleLogger(c *cli.Context) error {
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
vals := map[string]interface{}{}
|
||||||
|
mode := "console"
|
||||||
|
if c.IsSet("stderr") && c.Bool("stderr") {
|
||||||
|
vals["stderr"] = c.Bool("stderr")
|
||||||
|
}
|
||||||
|
return commonAddLogger(c, mode, vals)
|
||||||
|
}
|
||||||
|
|
||||||
|
func commonAddLogger(c *cli.Context, mode string, vals map[string]interface{}) error {
|
||||||
|
if len(c.String("level")) > 0 {
|
||||||
|
vals["level"] = log.FromString(c.String("level")).String()
|
||||||
|
}
|
||||||
|
if len(c.String("stacktrace-level")) > 0 {
|
||||||
|
vals["stacktraceLevel"] = log.FromString(c.String("stacktrace-level")).String()
|
||||||
|
}
|
||||||
|
if len(c.String("expression")) > 0 {
|
||||||
|
vals["expression"] = c.String("expression")
|
||||||
|
}
|
||||||
|
if len(c.String("prefix")) > 0 {
|
||||||
|
vals["prefix"] = c.String("prefix")
|
||||||
|
}
|
||||||
|
if len(c.String("flags")) > 0 {
|
||||||
|
vals["flags"] = log.FlagsFromString(c.String("flags"))
|
||||||
|
}
|
||||||
|
if c.IsSet("color") {
|
||||||
|
vals["colorize"] = c.Bool("color")
|
||||||
|
}
|
||||||
|
group := "default"
|
||||||
|
if c.IsSet("group") {
|
||||||
|
group = c.String("group")
|
||||||
|
}
|
||||||
|
name := mode
|
||||||
|
if c.IsSet("name") {
|
||||||
|
name = c.String("name")
|
||||||
|
}
|
||||||
|
ctx, cancel := installSignals()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
statusCode, msg := private.AddLogger(ctx, group, name, mode, vals)
|
||||||
|
switch statusCode {
|
||||||
|
case http.StatusInternalServerError:
|
||||||
|
return fail("InternalServerError", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stdout, msg)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runPauseLogging(c *cli.Context) error {
|
||||||
|
ctx, cancel := installSignals()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
statusCode, msg := private.PauseLogging(ctx)
|
||||||
|
switch statusCode {
|
||||||
|
case http.StatusInternalServerError:
|
||||||
|
return fail("InternalServerError", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stdout, msg)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runResumeLogging(c *cli.Context) error {
|
||||||
|
ctx, cancel := installSignals()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
statusCode, msg := private.ResumeLogging(ctx)
|
||||||
|
switch statusCode {
|
||||||
|
case http.StatusInternalServerError:
|
||||||
|
return fail("InternalServerError", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stdout, msg)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runReleaseReopenLogging(c *cli.Context) error {
|
||||||
|
ctx, cancel := installSignals()
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
setup("manager", c.Bool("debug"))
|
||||||
|
statusCode, msg := private.ReleaseReopenLogging(ctx)
|
||||||
|
switch statusCode {
|
||||||
|
case http.StatusInternalServerError:
|
||||||
|
return fail("InternalServerError", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintln(os.Stdout, msg)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -16,6 +16,7 @@ import (
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/routers"
|
"code.gitea.io/gitea/routers"
|
||||||
"code.gitea.io/gitea/routers/install"
|
"code.gitea.io/gitea/routers/install"
|
||||||
|
@ -59,6 +60,9 @@ and it takes care of all the other things for you`,
|
||||||
}
|
}
|
||||||
|
|
||||||
func runHTTPRedirector() {
|
func runHTTPRedirector() {
|
||||||
|
_, _, finished := process.GetManager().AddTypedContext(graceful.GetManager().HammerContext(), "Web: HTTP Redirector", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
|
|
||||||
source := fmt.Sprintf("%s:%s", setting.HTTPAddr, setting.PortToRedirect)
|
source := fmt.Sprintf("%s:%s", setting.HTTPAddr, setting.PortToRedirect)
|
||||||
dest := strings.TrimSuffix(setting.AppURL, "/")
|
dest := strings.TrimSuffix(setting.AppURL, "/")
|
||||||
log.Info("Redirecting: %s to %s", source, dest)
|
log.Info("Redirecting: %s to %s", source, dest)
|
||||||
|
@ -141,8 +145,10 @@ func runWeb(ctx *cli.Context) error {
|
||||||
|
|
||||||
if setting.EnablePprof {
|
if setting.EnablePprof {
|
||||||
go func() {
|
go func() {
|
||||||
|
_, _, finished := process.GetManager().AddTypedContext(context.Background(), "Web: PProf Server", process.SystemProcessType, true)
|
||||||
log.Info("Starting pprof server on localhost:6060")
|
log.Info("Starting pprof server on localhost:6060")
|
||||||
log.Info("%v", http.ListenAndServe("localhost:6060", nil))
|
log.Info("%v", http.ListenAndServe("localhost:6060", nil))
|
||||||
|
finished()
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,6 +210,8 @@ func listen(m http.Handler, handleRedirector bool) error {
|
||||||
if setting.Protocol != setting.HTTPUnix && setting.Protocol != setting.FCGIUnix {
|
if setting.Protocol != setting.HTTPUnix && setting.Protocol != setting.FCGIUnix {
|
||||||
listenAddr = net.JoinHostPort(listenAddr, setting.HTTPPort)
|
listenAddr = net.JoinHostPort(listenAddr, setting.HTTPPort)
|
||||||
}
|
}
|
||||||
|
_, _, finished := process.GetManager().AddTypedContext(graceful.GetManager().HammerContext(), "Web: Gitea Server", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubURL)
|
log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubURL)
|
||||||
// This can be useful for users, many users do wrong to their config and get strange behaviors behind a reverse-proxy.
|
// This can be useful for users, many users do wrong to their config and get strange behaviors behind a reverse-proxy.
|
||||||
// A user may fix the configuration mistake when he sees this log.
|
// A user may fix the configuration mistake when he sees this log.
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
|
||||||
"github.com/caddyserver/certmagic"
|
"github.com/caddyserver/certmagic"
|
||||||
|
@ -107,6 +108,9 @@ func runACME(listenAddr string, m http.Handler) error {
|
||||||
|
|
||||||
if enableHTTPChallenge {
|
if enableHTTPChallenge {
|
||||||
go func() {
|
go func() {
|
||||||
|
_, _, finished := process.GetManager().AddTypedContext(graceful.GetManager().HammerContext(), "Web: ACME HTTP challenge server", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
|
|
||||||
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
|
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
|
||||||
// all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
|
// all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
|
||||||
err := runHTTP("tcp", setting.HTTPAddr+":"+setting.PortToRedirect, "Let's Encrypt HTTP Challenge", myACME.HTTPChallengeHandler(http.HandlerFunc(runLetsEncryptFallbackHandler)))
|
err := runHTTP("tcp", setting.HTTPAddr+":"+setting.PortToRedirect, "Let's Encrypt HTTP Challenge", myACME.HTTPChallengeHandler(http.HandlerFunc(runLetsEncryptFallbackHandler)))
|
||||||
|
|
|
@ -503,6 +503,13 @@ Manage running server operations:
|
||||||
- `--host value`, `-H value`: Mail server host (defaults to: 127.0.0.1:25)
|
- `--host value`, `-H value`: Mail server host (defaults to: 127.0.0.1:25)
|
||||||
- `--send-to value`, `-s value`: Email address(es) to send to
|
- `--send-to value`, `-s value`: Email address(es) to send to
|
||||||
- `--subject value`, `-S value`: Subject header of sent emails
|
- `--subject value`, `-S value`: Subject header of sent emails
|
||||||
|
- `processes`: Display Gitea processes and goroutine information
|
||||||
|
- Options:
|
||||||
|
- `--flat`: Show processes as flat table rather than as tree
|
||||||
|
- `--no-system`: Do not show system processes
|
||||||
|
- `--stacktraces`: Show stacktraces for goroutines associated with processes
|
||||||
|
- `--json`: Output as json
|
||||||
|
- `--cancel PID`: Send cancel to process with PID. (Only for non-system processes.)
|
||||||
|
|
||||||
### dump-repo
|
### dump-repo
|
||||||
|
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -42,6 +42,7 @@ require (
|
||||||
github.com/gogs/go-gogs-client v0.0.0-20210131175652-1d7215cd8d85
|
github.com/gogs/go-gogs-client v0.0.0-20210131175652-1d7215cd8d85
|
||||||
github.com/golang-jwt/jwt/v4 v4.3.0
|
github.com/golang-jwt/jwt/v4 v4.3.0
|
||||||
github.com/google/go-github/v39 v39.2.0
|
github.com/google/go-github/v39 v39.2.0
|
||||||
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/feeds v1.1.1
|
github.com/gorilla/feeds v1.1.1
|
||||||
github.com/gorilla/sessions v1.2.1
|
github.com/gorilla/sessions v1.2.1
|
||||||
|
|
1
go.sum
1
go.sum
|
@ -761,6 +761,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
|
||||||
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||||
github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
|
github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
|
||||||
|
|
|
@ -79,6 +79,6 @@ func PrivateContexter() func(http.Handler) http.Handler {
|
||||||
// the underlying request has timed out from the ssh/http push
|
// the underlying request has timed out from the ssh/http push
|
||||||
func OverrideContext(ctx *PrivateContext) (cancel context.CancelFunc) {
|
func OverrideContext(ctx *PrivateContext) (cancel context.CancelFunc) {
|
||||||
// We now need to override the request context as the base for our work because even if the request is cancelled we have to continue this work
|
// We now need to override the request context as the base for our work because even if the request is cancelled we have to continue this work
|
||||||
ctx.Override, _, cancel = process.GetManager().AddContext(graceful.GetManager().HammerContext(), fmt.Sprintf("PrivateContext: %s", ctx.Req.RequestURI))
|
ctx.Override, _, cancel = process.GetManager().AddTypedContext(graceful.GetManager().HammerContext(), fmt.Sprintf("PrivateContext: %s", ctx.Req.RequestURI), process.RequestProcessType, true)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"code.gitea.io/gitea/models"
|
"code.gitea.io/gitea/models"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/timeutil"
|
"code.gitea.io/gitea/modules/timeutil"
|
||||||
)
|
)
|
||||||
|
@ -25,6 +26,9 @@ func (m *Manager) Init() {
|
||||||
|
|
||||||
// Run runs the manager within a provided context
|
// Run runs the manager within a provided context
|
||||||
func (m *Manager) Run(ctx context.Context) {
|
func (m *Manager) Run(ctx context.Context) {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: EventSource", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
|
|
||||||
then := timeutil.TimeStampNow().Add(-2)
|
then := timeutil.TimeStampNow().Add(-2)
|
||||||
timer := time.NewTicker(setting.UI.Notification.EventSourceUpdateTime)
|
timer := time.NewTicker(setting.UI.Notification.EventSourceUpdateTime)
|
||||||
loop:
|
loop:
|
||||||
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -73,7 +74,7 @@ func (g *Manager) start(ctx context.Context) {
|
||||||
|
|
||||||
// Set the running state & handle signals
|
// Set the running state & handle signals
|
||||||
g.setState(stateRunning)
|
g.setState(stateRunning)
|
||||||
go g.handleSignals(ctx)
|
go g.handleSignals(g.managerCtx)
|
||||||
|
|
||||||
// Handle clean up of unused provided listeners and delayed start-up
|
// Handle clean up of unused provided listeners and delayed start-up
|
||||||
startupDone := make(chan struct{})
|
startupDone := make(chan struct{})
|
||||||
|
@ -112,6 +113,9 @@ func (g *Manager) start(ctx context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (g *Manager) handleSignals(ctx context.Context) {
|
func (g *Manager) handleSignals(ctx context.Context) {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Graceful: HandleSignals", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
|
|
||||||
signalChannel := make(chan os.Signal, 1)
|
signalChannel := make(chan os.Signal, 1)
|
||||||
|
|
||||||
signal.Notify(
|
signal.Notify(
|
||||||
|
|
|
@ -7,6 +7,7 @@ package code
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime/pprof"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -15,6 +16,7 @@ import (
|
||||||
repo_model "code.gitea.io/gitea/models/repo"
|
repo_model "code.gitea.io/gitea/models/repo"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/queue"
|
"code.gitea.io/gitea/modules/queue"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/timeutil"
|
"code.gitea.io/gitea/modules/timeutil"
|
||||||
|
@ -116,7 +118,7 @@ func Init() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel, finished := process.GetManager().AddTypedContext(context.Background(), "Service: CodeIndexer", process.SystemProcessType, false)
|
||||||
|
|
||||||
graceful.GetManager().RunAtTerminate(func() {
|
graceful.GetManager().RunAtTerminate(func() {
|
||||||
select {
|
select {
|
||||||
|
@ -128,6 +130,7 @@ func Init() {
|
||||||
log.Debug("Closing repository indexer")
|
log.Debug("Closing repository indexer")
|
||||||
indexer.Close()
|
indexer.Close()
|
||||||
log.Info("PID: %d Repository Indexer closed", os.Getpid())
|
log.Info("PID: %d Repository Indexer closed", os.Getpid())
|
||||||
|
finished()
|
||||||
})
|
})
|
||||||
|
|
||||||
waitChannel := make(chan time.Duration)
|
waitChannel := make(chan time.Duration)
|
||||||
|
@ -172,6 +175,7 @@ func Init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
var (
|
var (
|
||||||
rIndexer Indexer
|
rIndexer Indexer
|
||||||
|
@ -247,6 +251,7 @@ func Init() {
|
||||||
|
|
||||||
if setting.Indexer.StartupTimeout > 0 {
|
if setting.Indexer.StartupTimeout > 0 {
|
||||||
go func() {
|
go func() {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
timeout := setting.Indexer.StartupTimeout
|
timeout := setting.Indexer.StartupTimeout
|
||||||
if graceful.GetManager().IsChild() && setting.GracefulHammerTime > 0 {
|
if graceful.GetManager().IsChild() && setting.GracefulHammerTime > 0 {
|
||||||
timeout += setting.GracefulHammerTime
|
timeout += setting.GracefulHammerTime
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -16,6 +17,7 @@ import (
|
||||||
repo_model "code.gitea.io/gitea/models/repo"
|
repo_model "code.gitea.io/gitea/models/repo"
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/queue"
|
"code.gitea.io/gitea/modules/queue"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
@ -100,6 +102,8 @@ var (
|
||||||
// InitIssueIndexer initialize issue indexer, syncReindex is true then reindex until
|
// InitIssueIndexer initialize issue indexer, syncReindex is true then reindex until
|
||||||
// all issue index done.
|
// all issue index done.
|
||||||
func InitIssueIndexer(syncReindex bool) {
|
func InitIssueIndexer(syncReindex bool) {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(context.Background(), "Service: IssueIndexer", process.SystemProcessType, false)
|
||||||
|
|
||||||
waitChannel := make(chan time.Duration)
|
waitChannel := make(chan time.Duration)
|
||||||
|
|
||||||
// Create the Queue
|
// Create the Queue
|
||||||
|
@ -165,6 +169,7 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
|
|
||||||
// Create the Indexer
|
// Create the Indexer
|
||||||
go func() {
|
go func() {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
log.Info("PID %d: Initializing Issue Indexer: %s", os.Getpid(), setting.Indexer.IssueType)
|
log.Info("PID %d: Initializing Issue Indexer: %s", os.Getpid(), setting.Indexer.IssueType)
|
||||||
var populate bool
|
var populate bool
|
||||||
|
@ -193,11 +198,13 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
if issueIndexer != nil {
|
if issueIndexer != nil {
|
||||||
issueIndexer.Close()
|
issueIndexer.Close()
|
||||||
}
|
}
|
||||||
|
finished()
|
||||||
log.Info("PID: %d Issue Indexer closed", os.Getpid())
|
log.Info("PID: %d Issue Indexer closed", os.Getpid())
|
||||||
})
|
})
|
||||||
log.Debug("Created Bleve Indexer")
|
log.Debug("Created Bleve Indexer")
|
||||||
case "elasticsearch":
|
case "elasticsearch":
|
||||||
graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(func())) {
|
graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(func())) {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
issueIndexer, err := NewElasticSearchIndexer(setting.Indexer.IssueConnStr, setting.Indexer.IssueIndexerName)
|
issueIndexer, err := NewElasticSearchIndexer(setting.Indexer.IssueConnStr, setting.Indexer.IssueIndexerName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Unable to initialize Elastic Search Issue Indexer at connection: %s Error: %v", setting.Indexer.IssueConnStr, err)
|
log.Fatal("Unable to initialize Elastic Search Issue Indexer at connection: %s Error: %v", setting.Indexer.IssueConnStr, err)
|
||||||
|
@ -208,10 +215,12 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
}
|
}
|
||||||
populate = !exist
|
populate = !exist
|
||||||
holder.set(issueIndexer)
|
holder.set(issueIndexer)
|
||||||
|
atTerminate(finished)
|
||||||
})
|
})
|
||||||
case "db":
|
case "db":
|
||||||
issueIndexer := &DBIndexer{}
|
issueIndexer := &DBIndexer{}
|
||||||
holder.set(issueIndexer)
|
holder.set(issueIndexer)
|
||||||
|
graceful.GetManager().RunAtTerminate(finished)
|
||||||
default:
|
default:
|
||||||
holder.cancel()
|
holder.cancel()
|
||||||
log.Fatal("Unknown issue indexer type: %s", setting.Indexer.IssueType)
|
log.Fatal("Unknown issue indexer type: %s", setting.Indexer.IssueType)
|
||||||
|
@ -251,6 +260,7 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
}
|
}
|
||||||
} else if setting.Indexer.StartupTimeout > 0 {
|
} else if setting.Indexer.StartupTimeout > 0 {
|
||||||
go func() {
|
go func() {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
timeout := setting.Indexer.StartupTimeout
|
timeout := setting.Indexer.StartupTimeout
|
||||||
if graceful.GetManager().IsChild() && setting.GracefulHammerTime > 0 {
|
if graceful.GetManager().IsChild() && setting.GracefulHammerTime > 0 {
|
||||||
timeout += setting.GracefulHammerTime
|
timeout += setting.GracefulHammerTime
|
||||||
|
@ -272,6 +282,8 @@ func InitIssueIndexer(syncReindex bool) {
|
||||||
|
|
||||||
// populateIssueIndexer populate the issue indexer with issue data
|
// populateIssueIndexer populate the issue indexer with issue data
|
||||||
func populateIssueIndexer(ctx context.Context) {
|
func populateIssueIndexer(ctx context.Context) {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: PopulateIssueIndexer", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
for page := 1; ; page++ {
|
for page := 1; ; page++ {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
|
|
@ -5,9 +5,13 @@
|
||||||
package log
|
package log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Event represents a logging event
|
// Event represents a logging event
|
||||||
|
@ -34,6 +38,8 @@ type EventLogger interface {
|
||||||
|
|
||||||
// ChannelledLog represents a cached channel to a LoggerProvider
|
// ChannelledLog represents a cached channel to a LoggerProvider
|
||||||
type ChannelledLog struct {
|
type ChannelledLog struct {
|
||||||
|
ctx context.Context
|
||||||
|
finished context.CancelFunc
|
||||||
name string
|
name string
|
||||||
provider string
|
provider string
|
||||||
queue chan *Event
|
queue chan *Event
|
||||||
|
@ -44,8 +50,9 @@ type ChannelledLog struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewChannelledLog a new logger instance with given logger provider and config.
|
// NewChannelledLog a new logger instance with given logger provider and config.
|
||||||
func NewChannelledLog(name, provider, config string, bufferLength int64) (*ChannelledLog, error) {
|
func NewChannelledLog(parent context.Context, name, provider, config string, bufferLength int64) (*ChannelledLog, error) {
|
||||||
if log, ok := providers[provider]; ok {
|
if log, ok := providers[provider]; ok {
|
||||||
|
|
||||||
l := &ChannelledLog{
|
l := &ChannelledLog{
|
||||||
queue: make(chan *Event, bufferLength),
|
queue: make(chan *Event, bufferLength),
|
||||||
flush: make(chan bool),
|
flush: make(chan bool),
|
||||||
|
@ -58,6 +65,7 @@ func NewChannelledLog(name, provider, config string, bufferLength int64) (*Chann
|
||||||
}
|
}
|
||||||
l.name = name
|
l.name = name
|
||||||
l.provider = provider
|
l.provider = provider
|
||||||
|
l.ctx, _, l.finished = process.GetManager().AddTypedContext(parent, fmt.Sprintf("Logger: %s(%s)", l.name, l.provider), process.SystemProcessType, false)
|
||||||
go l.Start()
|
go l.Start()
|
||||||
return l, nil
|
return l, nil
|
||||||
}
|
}
|
||||||
|
@ -66,6 +74,8 @@ func NewChannelledLog(name, provider, config string, bufferLength int64) (*Chann
|
||||||
|
|
||||||
// Start processing the ChannelledLog
|
// Start processing the ChannelledLog
|
||||||
func (l *ChannelledLog) Start() {
|
func (l *ChannelledLog) Start() {
|
||||||
|
pprof.SetGoroutineLabels(l.ctx)
|
||||||
|
defer l.finished()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case event, ok := <-l.queue:
|
case event, ok := <-l.queue:
|
||||||
|
@ -140,6 +150,8 @@ func (l *ChannelledLog) GetName() string {
|
||||||
|
|
||||||
// MultiChannelledLog represents a cached channel to a LoggerProvider
|
// MultiChannelledLog represents a cached channel to a LoggerProvider
|
||||||
type MultiChannelledLog struct {
|
type MultiChannelledLog struct {
|
||||||
|
ctx context.Context
|
||||||
|
finished context.CancelFunc
|
||||||
name string
|
name string
|
||||||
bufferLength int64
|
bufferLength int64
|
||||||
queue chan *Event
|
queue chan *Event
|
||||||
|
@ -156,7 +168,11 @@ type MultiChannelledLog struct {
|
||||||
|
|
||||||
// NewMultiChannelledLog a new logger instance with given logger provider and config.
|
// NewMultiChannelledLog a new logger instance with given logger provider and config.
|
||||||
func NewMultiChannelledLog(name string, bufferLength int64) *MultiChannelledLog {
|
func NewMultiChannelledLog(name string, bufferLength int64) *MultiChannelledLog {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(context.Background(), fmt.Sprintf("Logger: %s", name), process.SystemProcessType, false)
|
||||||
|
|
||||||
m := &MultiChannelledLog{
|
m := &MultiChannelledLog{
|
||||||
|
ctx: ctx,
|
||||||
|
finished: finished,
|
||||||
name: name,
|
name: name,
|
||||||
queue: make(chan *Event, bufferLength),
|
queue: make(chan *Event, bufferLength),
|
||||||
flush: make(chan bool),
|
flush: make(chan bool),
|
||||||
|
@ -277,6 +293,9 @@ func (m *MultiChannelledLog) Start() {
|
||||||
m.rwmutex.Unlock()
|
m.rwmutex.Unlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
pprof.SetGoroutineLabels(m.ctx)
|
||||||
|
defer m.finished()
|
||||||
|
|
||||||
m.started = true
|
m.started = true
|
||||||
m.rwmutex.Unlock()
|
m.rwmutex.Unlock()
|
||||||
paused := false
|
paused := false
|
||||||
|
|
|
@ -31,7 +31,7 @@ func newLogger(name string, buffer int64) *MultiChannelledLogger {
|
||||||
|
|
||||||
// SetLogger sets new logger instance with given logger provider and config.
|
// SetLogger sets new logger instance with given logger provider and config.
|
||||||
func (l *MultiChannelledLogger) SetLogger(name, provider, config string) error {
|
func (l *MultiChannelledLogger) SetLogger(name, provider, config string) error {
|
||||||
eventLogger, err := NewChannelledLog(name, provider, config, l.bufferLength)
|
eventLogger, err := NewChannelledLog(l.ctx, name, provider, config, l.bufferLength)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to create sublogger (%s): %v", name, err)
|
return fmt.Errorf("Failed to create sublogger (%s): %v", name, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,10 +5,12 @@
|
||||||
package nosql
|
package nosql
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"github.com/go-redis/redis/v8"
|
"github.com/go-redis/redis/v8"
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
)
|
)
|
||||||
|
@ -17,6 +19,8 @@ var manager *Manager
|
||||||
|
|
||||||
// Manager is the nosql connection manager
|
// Manager is the nosql connection manager
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
|
ctx context.Context
|
||||||
|
finished context.CancelFunc
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
|
|
||||||
RedisConnections map[string]*redisClientHolder
|
RedisConnections map[string]*redisClientHolder
|
||||||
|
@ -46,7 +50,10 @@ func init() {
|
||||||
// GetManager returns a Manager and initializes one as singleton is there's none yet
|
// GetManager returns a Manager and initializes one as singleton is there's none yet
|
||||||
func GetManager() *Manager {
|
func GetManager() *Manager {
|
||||||
if manager == nil {
|
if manager == nil {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(context.Background(), "Service: NoSQL", process.SystemProcessType, false)
|
||||||
manager = &Manager{
|
manager = &Manager{
|
||||||
|
ctx: ctx,
|
||||||
|
finished: finished,
|
||||||
RedisConnections: make(map[string]*redisClientHolder),
|
RedisConnections: make(map[string]*redisClientHolder),
|
||||||
LevelDBConnections: make(map[string]*levelDBHolder),
|
LevelDBConnections: make(map[string]*levelDBHolder),
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ package nosql
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
|
"runtime/pprof"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -50,7 +51,31 @@ func (m *Manager) CloseLevelDB(connection string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLevelDB gets a levelDB for a particular connection
|
// GetLevelDB gets a levelDB for a particular connection
|
||||||
func (m *Manager) GetLevelDB(connection string) (*leveldb.DB, error) {
|
func (m *Manager) GetLevelDB(connection string) (db *leveldb.DB, err error) {
|
||||||
|
// Because we want associate any goroutines created by this call to the main nosqldb context we need to
|
||||||
|
// wrap this in a goroutine labelled with the nosqldb context
|
||||||
|
done := make(chan struct{})
|
||||||
|
var recovered interface{}
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
recovered = recover()
|
||||||
|
if recovered != nil {
|
||||||
|
log.Critical("PANIC during GetLevelDB: %v\nStacktrace: %s", recovered, log.Stack(2))
|
||||||
|
}
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
pprof.SetGoroutineLabels(m.ctx)
|
||||||
|
|
||||||
|
db, err = m.getLevelDB(connection)
|
||||||
|
}()
|
||||||
|
<-done
|
||||||
|
if recovered != nil {
|
||||||
|
panic(recovered)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) getLevelDB(connection string) (*leveldb.DB, error) {
|
||||||
// Convert the provided connection description to the common format
|
// Convert the provided connection description to the common format
|
||||||
uri := ToLevelDBURI(connection)
|
uri := ToLevelDBURI(connection)
|
||||||
|
|
||||||
|
@ -168,15 +193,18 @@ func (m *Manager) GetLevelDB(connection string) (*leveldb.DB, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.IsCorrupted(err) {
|
if !errors.IsCorrupted(err) {
|
||||||
if strings.Contains(err.Error(), "resource temporarily unavailable") {
|
if strings.Contains(err.Error(), "resource temporarily unavailable") {
|
||||||
return nil, fmt.Errorf("unable to lock level db at %s: %w", dataDir, err)
|
err = fmt.Errorf("unable to lock level db at %s: %w", dataDir, err)
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("unable to open level db at %s: %w", dataDir, err)
|
|
||||||
}
|
|
||||||
db.db, err = leveldb.RecoverFile(dataDir, opts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = fmt.Errorf("unable to open level db at %s: %w", dataDir, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
db.db, err = leveldb.RecoverFile(dataDir, opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, name := range db.name {
|
for _, name := range db.name {
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"net/url"
|
"net/url"
|
||||||
"path"
|
"path"
|
||||||
|
"runtime/pprof"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
@ -43,7 +44,31 @@ func (m *Manager) CloseRedisClient(connection string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRedisClient gets a redis client for a particular connection
|
// GetRedisClient gets a redis client for a particular connection
|
||||||
func (m *Manager) GetRedisClient(connection string) redis.UniversalClient {
|
func (m *Manager) GetRedisClient(connection string) (client redis.UniversalClient) {
|
||||||
|
// Because we want associate any goroutines created by this call to the main nosqldb context we need to
|
||||||
|
// wrap this in a goroutine labelled with the nosqldb context
|
||||||
|
done := make(chan struct{})
|
||||||
|
var recovered interface{}
|
||||||
|
go func() {
|
||||||
|
defer func() {
|
||||||
|
recovered = recover()
|
||||||
|
if recovered != nil {
|
||||||
|
log.Critical("PANIC during GetRedisClient: %v\nStacktrace: %s", recovered, log.Stack(2))
|
||||||
|
}
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
pprof.SetGoroutineLabels(m.ctx)
|
||||||
|
|
||||||
|
client = m.getRedisClient(connection)
|
||||||
|
}()
|
||||||
|
<-done
|
||||||
|
if recovered != nil {
|
||||||
|
panic(recovered)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Manager) getRedisClient(connection string) redis.UniversalClient {
|
||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
defer m.mutex.Unlock()
|
defer m.mutex.Unlock()
|
||||||
client, ok := m.RedisConnections[connection]
|
client, ok := m.RedisConnections[connection]
|
||||||
|
|
|
@ -7,6 +7,7 @@ package private
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
@ -189,3 +190,25 @@ func RemoveLogger(ctx context.Context, group, name string) (int, string) {
|
||||||
|
|
||||||
return http.StatusOK, "Removed"
|
return http.StatusOK, "Removed"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Processes return the current processes from this gitea instance
|
||||||
|
func Processes(ctx context.Context, out io.Writer, flat, noSystem, stacktraces, json bool, cancel string) (int, string) {
|
||||||
|
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/processes?flat=%t&no-system=%t&stacktraces=%t&json=%t&cancel-pid=%s", flat, noSystem, stacktraces, json, url.QueryEscape(cancel))
|
||||||
|
|
||||||
|
req := newInternalRequest(ctx, reqURL, "GET")
|
||||||
|
resp, err := req.Response()
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, fmt.Sprintf("Unable to contact gitea: %v", err.Error())
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return resp.StatusCode, decodeJSONError(resp).Err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = io.Copy(out, resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return http.StatusInternalServerError, err.Error()
|
||||||
|
}
|
||||||
|
return http.StatusOK, ""
|
||||||
|
}
|
||||||
|
|
26
modules/process/error.go
Normal file
26
modules/process/error.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package process
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// Error is a wrapped error describing the error results of Process Execution
|
||||||
|
type Error struct {
|
||||||
|
PID IDType
|
||||||
|
Description string
|
||||||
|
Err error
|
||||||
|
CtxErr error
|
||||||
|
Stdout string
|
||||||
|
Stderr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *Error) Error() string {
|
||||||
|
return fmt.Sprintf("exec(%s:%s) failed: %v(%v) stdout: %s stderr: %s", err.PID, err.Description, err.Err, err.CtxErr, err.Stdout, err.Stderr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap implements the unwrappable implicit interface for go1.13 Unwrap()
|
||||||
|
func (err *Error) Unwrap() error {
|
||||||
|
return err.Err
|
||||||
|
}
|
|
@ -6,13 +6,8 @@
|
||||||
package process
|
package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os/exec"
|
|
||||||
"runtime/pprof"
|
"runtime/pprof"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -30,6 +25,18 @@ var (
|
||||||
DefaultContext = context.Background()
|
DefaultContext = context.Background()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// DescriptionPProfLabel is a label set on goroutines that have a process attached
|
||||||
|
const DescriptionPProfLabel = "process-description"
|
||||||
|
|
||||||
|
// PIDPProfLabel is a label set on goroutines that have a process attached
|
||||||
|
const PIDPProfLabel = "pid"
|
||||||
|
|
||||||
|
// PPIDPProfLabel is a label set on goroutines that have a process attached
|
||||||
|
const PPIDPProfLabel = "ppid"
|
||||||
|
|
||||||
|
// ProcessTypePProfLabel is a label set on goroutines that have a process attached
|
||||||
|
const ProcessTypePProfLabel = "process-type"
|
||||||
|
|
||||||
// IDType is a pid type
|
// IDType is a pid type
|
||||||
type IDType string
|
type IDType string
|
||||||
|
|
||||||
|
@ -44,14 +51,14 @@ type Manager struct {
|
||||||
next int64
|
next int64
|
||||||
lastTime int64
|
lastTime int64
|
||||||
|
|
||||||
processes map[IDType]*Process
|
processMap map[IDType]*process
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetManager returns a Manager and initializes one as singleton if there's none yet
|
// GetManager returns a Manager and initializes one as singleton if there's none yet
|
||||||
func GetManager() *Manager {
|
func GetManager() *Manager {
|
||||||
managerInit.Do(func() {
|
managerInit.Do(func() {
|
||||||
manager = &Manager{
|
manager = &Manager{
|
||||||
processes: make(map[IDType]*Process),
|
processMap: make(map[IDType]*process),
|
||||||
next: 1,
|
next: 1,
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -69,12 +76,25 @@ func GetManager() *Manager {
|
||||||
func (pm *Manager) AddContext(parent context.Context, description string) (ctx context.Context, cancel context.CancelFunc, finished FinishedFunc) {
|
func (pm *Manager) AddContext(parent context.Context, description string) (ctx context.Context, cancel context.CancelFunc, finished FinishedFunc) {
|
||||||
ctx, cancel = context.WithCancel(parent)
|
ctx, cancel = context.WithCancel(parent)
|
||||||
|
|
||||||
ctx, pid, finished := pm.Add(ctx, description, cancel)
|
ctx, _, finished = pm.Add(ctx, description, cancel, NormalProcessType, true)
|
||||||
|
|
||||||
return &Context{
|
return ctx, cancel, finished
|
||||||
Context: ctx,
|
}
|
||||||
pid: pid,
|
|
||||||
}, cancel, finished
|
// AddTypedContext creates a new context and adds it as a process. Once the process is finished, finished must be called
|
||||||
|
// to remove the process from the process table. It should not be called until the process is finished but must always be called.
|
||||||
|
//
|
||||||
|
// cancel should be used to cancel the returned context, however it will not remove the process from the process table.
|
||||||
|
// finished will cancel the returned context and remove it from the process table.
|
||||||
|
//
|
||||||
|
// Most processes will not need to use the cancel function but there will be cases whereby you want to cancel the process but not immediately remove it from the
|
||||||
|
// process table.
|
||||||
|
func (pm *Manager) AddTypedContext(parent context.Context, description, processType string, currentlyRunning bool) (ctx context.Context, cancel context.CancelFunc, finished FinishedFunc) {
|
||||||
|
ctx, cancel = context.WithCancel(parent)
|
||||||
|
|
||||||
|
ctx, _, finished = pm.Add(ctx, description, cancel, processType, currentlyRunning)
|
||||||
|
|
||||||
|
return ctx, cancel, finished
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddContextTimeout creates a new context and add it as a process. Once the process is finished, finished must be called
|
// AddContextTimeout creates a new context and add it as a process. Once the process is finished, finished must be called
|
||||||
|
@ -90,52 +110,61 @@ func (pm *Manager) AddContextTimeout(parent context.Context, timeout time.Durati
|
||||||
// it's meaningless to use timeout <= 0, and it must be a bug! so we must panic here to tell developers to make the timeout correct
|
// it's meaningless to use timeout <= 0, and it must be a bug! so we must panic here to tell developers to make the timeout correct
|
||||||
panic("the timeout must be greater than zero, otherwise the context will be cancelled immediately")
|
panic("the timeout must be greater than zero, otherwise the context will be cancelled immediately")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel = context.WithTimeout(parent, timeout)
|
ctx, cancel = context.WithTimeout(parent, timeout)
|
||||||
|
|
||||||
ctx, pid, finshed := pm.Add(ctx, description, cancel)
|
ctx, _, finshed = pm.Add(ctx, description, cancel, NormalProcessType, true)
|
||||||
|
|
||||||
return &Context{
|
return ctx, cancel, finshed
|
||||||
Context: ctx,
|
|
||||||
pid: pid,
|
|
||||||
}, cancel, finshed
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add create a new process
|
// Add create a new process
|
||||||
func (pm *Manager) Add(ctx context.Context, description string, cancel context.CancelFunc) (context.Context, IDType, FinishedFunc) {
|
func (pm *Manager) Add(ctx context.Context, description string, cancel context.CancelFunc, processType string, currentlyRunning bool) (context.Context, IDType, FinishedFunc) {
|
||||||
parentPID := GetParentPID(ctx)
|
parentPID := GetParentPID(ctx)
|
||||||
|
|
||||||
pm.mutex.Lock()
|
pm.mutex.Lock()
|
||||||
start, pid := pm.nextPID()
|
start, pid := pm.nextPID()
|
||||||
|
|
||||||
parent := pm.processes[parentPID]
|
parent := pm.processMap[parentPID]
|
||||||
if parent == nil {
|
if parent == nil {
|
||||||
parentPID = ""
|
parentPID = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
process := &Process{
|
process := &process{
|
||||||
PID: pid,
|
PID: pid,
|
||||||
ParentPID: parentPID,
|
ParentPID: parentPID,
|
||||||
Description: description,
|
Description: description,
|
||||||
Start: start,
|
Start: start,
|
||||||
Cancel: cancel,
|
Cancel: cancel,
|
||||||
|
Type: processType,
|
||||||
}
|
}
|
||||||
|
|
||||||
finished := func() {
|
var finished FinishedFunc
|
||||||
|
if currentlyRunning {
|
||||||
|
finished = func() {
|
||||||
cancel()
|
cancel()
|
||||||
pm.remove(process)
|
pm.remove(process)
|
||||||
pprof.SetGoroutineLabels(ctx)
|
pprof.SetGoroutineLabels(ctx)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
if parent != nil {
|
finished = func() {
|
||||||
parent.AddChild(process)
|
cancel()
|
||||||
|
pm.remove(process)
|
||||||
}
|
}
|
||||||
pm.processes[pid] = process
|
}
|
||||||
|
|
||||||
|
pm.processMap[pid] = process
|
||||||
pm.mutex.Unlock()
|
pm.mutex.Unlock()
|
||||||
|
|
||||||
pprofCtx := pprof.WithLabels(ctx, pprof.Labels("process-description", description, "ppid", string(parentPID), "pid", string(pid)))
|
pprofCtx := pprof.WithLabels(ctx, pprof.Labels(DescriptionPProfLabel, description, PPIDPProfLabel, string(parentPID), PIDPProfLabel, string(pid), ProcessTypePProfLabel, processType))
|
||||||
|
if currentlyRunning {
|
||||||
pprof.SetGoroutineLabels(pprofCtx)
|
pprof.SetGoroutineLabels(pprofCtx)
|
||||||
|
}
|
||||||
|
|
||||||
return pprofCtx, pid, finished
|
return &Context{
|
||||||
|
Context: pprofCtx,
|
||||||
|
pid: pid,
|
||||||
|
}, pid, finished
|
||||||
}
|
}
|
||||||
|
|
||||||
// nextPID will return the next available PID. pm.mutex should already be locked.
|
// nextPID will return the next available PID. pm.mutex should already be locked.
|
||||||
|
@ -160,142 +189,24 @@ func (pm *Manager) nextPID() (start time.Time, pid IDType) {
|
||||||
// Remove a process from the ProcessManager.
|
// Remove a process from the ProcessManager.
|
||||||
func (pm *Manager) Remove(pid IDType) {
|
func (pm *Manager) Remove(pid IDType) {
|
||||||
pm.mutex.Lock()
|
pm.mutex.Lock()
|
||||||
delete(pm.processes, pid)
|
delete(pm.processMap, pid)
|
||||||
pm.mutex.Unlock()
|
pm.mutex.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pm *Manager) remove(process *Process) {
|
func (pm *Manager) remove(process *process) {
|
||||||
pm.mutex.Lock()
|
pm.mutex.Lock()
|
||||||
if p := pm.processes[process.PID]; p == process {
|
defer pm.mutex.Unlock()
|
||||||
delete(pm.processes, process.PID)
|
if p := pm.processMap[process.PID]; p == process {
|
||||||
|
delete(pm.processMap, process.PID)
|
||||||
}
|
}
|
||||||
parent := pm.processes[process.ParentPID]
|
|
||||||
pm.mutex.Unlock()
|
|
||||||
|
|
||||||
if parent == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
parent.RemoveChild(process)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cancel a process in the ProcessManager.
|
// Cancel a process in the ProcessManager.
|
||||||
func (pm *Manager) Cancel(pid IDType) {
|
func (pm *Manager) Cancel(pid IDType) {
|
||||||
pm.mutex.Lock()
|
pm.mutex.Lock()
|
||||||
process, ok := pm.processes[pid]
|
process, ok := pm.processMap[pid]
|
||||||
pm.mutex.Unlock()
|
pm.mutex.Unlock()
|
||||||
if ok {
|
if ok && process.Type != SystemProcessType {
|
||||||
process.Cancel()
|
process.Cancel()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Processes gets the processes in a thread safe manner
|
|
||||||
func (pm *Manager) Processes(onlyRoots bool) []*Process {
|
|
||||||
pm.mutex.Lock()
|
|
||||||
processes := make([]*Process, 0, len(pm.processes))
|
|
||||||
if onlyRoots {
|
|
||||||
for _, process := range pm.processes {
|
|
||||||
if _, has := pm.processes[process.ParentPID]; !has {
|
|
||||||
processes = append(processes, process)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for _, process := range pm.processes {
|
|
||||||
processes = append(processes, process)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pm.mutex.Unlock()
|
|
||||||
|
|
||||||
sort.Slice(processes, func(i, j int) bool {
|
|
||||||
left, right := processes[i], processes[j]
|
|
||||||
|
|
||||||
return left.Start.Before(right.Start)
|
|
||||||
})
|
|
||||||
|
|
||||||
return processes
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exec a command and use the default timeout.
|
|
||||||
func (pm *Manager) Exec(desc, cmdName string, args ...string) (string, string, error) {
|
|
||||||
return pm.ExecDir(DefaultContext, -1, "", desc, cmdName, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecTimeout a command and use a specific timeout duration.
|
|
||||||
func (pm *Manager) ExecTimeout(timeout time.Duration, desc, cmdName string, args ...string) (string, string, error) {
|
|
||||||
return pm.ExecDir(DefaultContext, timeout, "", desc, cmdName, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecDir a command and use the default timeout.
|
|
||||||
func (pm *Manager) ExecDir(ctx context.Context, timeout time.Duration, dir, desc, cmdName string, args ...string) (string, string, error) {
|
|
||||||
return pm.ExecDirEnv(ctx, timeout, dir, desc, nil, cmdName, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecDirEnv runs a command in given path and environment variables, and waits for its completion
|
|
||||||
// up to the given timeout (or DefaultTimeout if -1 is given).
|
|
||||||
// Returns its complete stdout and stderr
|
|
||||||
// outputs and an error, if any (including timeout)
|
|
||||||
func (pm *Manager) ExecDirEnv(ctx context.Context, timeout time.Duration, dir, desc string, env []string, cmdName string, args ...string) (string, string, error) {
|
|
||||||
return pm.ExecDirEnvStdIn(ctx, timeout, dir, desc, env, nil, cmdName, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecDirEnvStdIn runs a command in given path and environment variables with provided stdIN, and waits for its completion
|
|
||||||
// up to the given timeout (or DefaultTimeout if -1 is given).
|
|
||||||
// Returns its complete stdout and stderr
|
|
||||||
// outputs and an error, if any (including timeout)
|
|
||||||
func (pm *Manager) ExecDirEnvStdIn(ctx context.Context, timeout time.Duration, dir, desc string, env []string, stdIn io.Reader, cmdName string, args ...string) (string, string, error) {
|
|
||||||
if timeout <= 0 {
|
|
||||||
timeout = 60 * time.Second
|
|
||||||
}
|
|
||||||
|
|
||||||
stdOut := new(bytes.Buffer)
|
|
||||||
stdErr := new(bytes.Buffer)
|
|
||||||
|
|
||||||
ctx, _, finished := pm.AddContextTimeout(ctx, timeout, desc)
|
|
||||||
defer finished()
|
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, cmdName, args...)
|
|
||||||
cmd.Dir = dir
|
|
||||||
cmd.Env = env
|
|
||||||
cmd.Stdout = stdOut
|
|
||||||
cmd.Stderr = stdErr
|
|
||||||
if stdIn != nil {
|
|
||||||
cmd.Stdin = stdIn
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cmd.Start(); err != nil {
|
|
||||||
return "", "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
err := cmd.Wait()
|
|
||||||
if err != nil {
|
|
||||||
err = &Error{
|
|
||||||
PID: GetPID(ctx),
|
|
||||||
Description: desc,
|
|
||||||
Err: err,
|
|
||||||
CtxErr: ctx.Err(),
|
|
||||||
Stdout: stdOut.String(),
|
|
||||||
Stderr: stdErr.String(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return stdOut.String(), stdErr.String(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error is a wrapped error describing the error results of Process Execution
|
|
||||||
type Error struct {
|
|
||||||
PID IDType
|
|
||||||
Description string
|
|
||||||
Err error
|
|
||||||
CtxErr error
|
|
||||||
Stdout string
|
|
||||||
Stderr string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err *Error) Error() string {
|
|
||||||
return fmt.Sprintf("exec(%s:%s) failed: %v(%v) stdout: %s stderr: %s", err.PID, err.Description, err.Err, err.CtxErr, err.Stdout, err.Stderr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unwrap implements the unwrappable implicit interface for go1.13 Unwrap()
|
|
||||||
func (err *Error) Unwrap() error {
|
|
||||||
return err.Err
|
|
||||||
}
|
|
||||||
|
|
79
modules/process/manager_exec.go
Normal file
79
modules/process/manager_exec.go
Normal file
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package process
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"os/exec"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exec a command and use the default timeout.
|
||||||
|
func (pm *Manager) Exec(desc, cmdName string, args ...string) (string, string, error) {
|
||||||
|
return pm.ExecDir(DefaultContext, -1, "", desc, cmdName, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecTimeout a command and use a specific timeout duration.
|
||||||
|
func (pm *Manager) ExecTimeout(timeout time.Duration, desc, cmdName string, args ...string) (string, string, error) {
|
||||||
|
return pm.ExecDir(DefaultContext, timeout, "", desc, cmdName, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecDir a command and use the default timeout.
|
||||||
|
func (pm *Manager) ExecDir(ctx context.Context, timeout time.Duration, dir, desc, cmdName string, args ...string) (string, string, error) {
|
||||||
|
return pm.ExecDirEnv(ctx, timeout, dir, desc, nil, cmdName, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecDirEnv runs a command in given path and environment variables, and waits for its completion
|
||||||
|
// up to the given timeout (or DefaultTimeout if -1 is given).
|
||||||
|
// Returns its complete stdout and stderr
|
||||||
|
// outputs and an error, if any (including timeout)
|
||||||
|
func (pm *Manager) ExecDirEnv(ctx context.Context, timeout time.Duration, dir, desc string, env []string, cmdName string, args ...string) (string, string, error) {
|
||||||
|
return pm.ExecDirEnvStdIn(ctx, timeout, dir, desc, env, nil, cmdName, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecDirEnvStdIn runs a command in given path and environment variables with provided stdIN, and waits for its completion
|
||||||
|
// up to the given timeout (or DefaultTimeout if timeout <= 0 is given).
|
||||||
|
// Returns its complete stdout and stderr
|
||||||
|
// outputs and an error, if any (including timeout)
|
||||||
|
func (pm *Manager) ExecDirEnvStdIn(ctx context.Context, timeout time.Duration, dir, desc string, env []string, stdIn io.Reader, cmdName string, args ...string) (string, string, error) {
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = 60 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
stdOut := new(bytes.Buffer)
|
||||||
|
stdErr := new(bytes.Buffer)
|
||||||
|
|
||||||
|
ctx, _, finished := pm.AddContextTimeout(ctx, timeout, desc)
|
||||||
|
defer finished()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, cmdName, args...)
|
||||||
|
cmd.Dir = dir
|
||||||
|
cmd.Env = env
|
||||||
|
cmd.Stdout = stdOut
|
||||||
|
cmd.Stderr = stdErr
|
||||||
|
if stdIn != nil {
|
||||||
|
cmd.Stdin = stdIn
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err := cmd.Wait()
|
||||||
|
if err != nil {
|
||||||
|
err = &Error{
|
||||||
|
PID: GetPID(ctx),
|
||||||
|
Description: desc,
|
||||||
|
Err: err,
|
||||||
|
CtxErr: ctx.Err(),
|
||||||
|
Stdout: stdOut.String(),
|
||||||
|
Stderr: stdErr.String(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdOut.String(), stdErr.String(), err
|
||||||
|
}
|
355
modules/process/manager_stacktraces.go
Normal file
355
modules/process/manager_stacktraces.go
Normal file
|
@ -0,0 +1,355 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package process
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"runtime/pprof"
|
||||||
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/pprof/profile"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StackEntry is an entry on a stacktrace
|
||||||
|
type StackEntry struct {
|
||||||
|
Function string
|
||||||
|
File string
|
||||||
|
Line int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label represents a pprof label assigned to goroutine stack
|
||||||
|
type Label struct {
|
||||||
|
Name string
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stack is a stacktrace relating to a goroutine. (Multiple goroutines may have the same stacktrace)
|
||||||
|
type Stack struct {
|
||||||
|
Count int64 // Number of goroutines with this stack trace
|
||||||
|
Description string
|
||||||
|
Labels []*Label `json:",omitempty"`
|
||||||
|
Entry []*StackEntry `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Process is a combined representation of a Process and a Stacktrace for the goroutines associated with it
|
||||||
|
type Process struct {
|
||||||
|
PID IDType
|
||||||
|
ParentPID IDType
|
||||||
|
Description string
|
||||||
|
Start time.Time
|
||||||
|
Type string
|
||||||
|
|
||||||
|
Children []*Process `json:",omitempty"`
|
||||||
|
Stacks []*Stack `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Processes gets the processes in a thread safe manner
|
||||||
|
func (pm *Manager) Processes(flat, noSystem bool) ([]*Process, int) {
|
||||||
|
pm.mutex.Lock()
|
||||||
|
processCount := len(pm.processMap)
|
||||||
|
processes := make([]*Process, 0, len(pm.processMap))
|
||||||
|
if flat {
|
||||||
|
for _, process := range pm.processMap {
|
||||||
|
if noSystem && process.Type == SystemProcessType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processes = append(processes, process.toProcess())
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We need our own processMap
|
||||||
|
processMap := map[IDType]*Process{}
|
||||||
|
for _, internalProcess := range pm.processMap {
|
||||||
|
process, ok := processMap[internalProcess.PID]
|
||||||
|
if !ok {
|
||||||
|
process = internalProcess.toProcess()
|
||||||
|
processMap[process.PID] = process
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check its parent
|
||||||
|
if process.ParentPID == "" {
|
||||||
|
processes = append(processes, process)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
internalParentProcess, ok := pm.processMap[internalProcess.ParentPID]
|
||||||
|
if ok {
|
||||||
|
parentProcess, ok := processMap[process.ParentPID]
|
||||||
|
if !ok {
|
||||||
|
parentProcess = internalParentProcess.toProcess()
|
||||||
|
processMap[parentProcess.PID] = parentProcess
|
||||||
|
}
|
||||||
|
parentProcess.Children = append(parentProcess.Children, process)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
processes = append(processes, process)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pm.mutex.Unlock()
|
||||||
|
|
||||||
|
if !flat && noSystem {
|
||||||
|
for i := 0; i < len(processes); i++ {
|
||||||
|
process := processes[i]
|
||||||
|
if process.Type != SystemProcessType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processes[len(processes)-1], processes[i] = processes[i], processes[len(processes)-1]
|
||||||
|
processes = append(processes[:len(processes)-1], process.Children...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort by process' start time. Oldest process appears first.
|
||||||
|
sort.Slice(processes, func(i, j int) bool {
|
||||||
|
left, right := processes[i], processes[j]
|
||||||
|
|
||||||
|
return left.Start.Before(right.Start)
|
||||||
|
})
|
||||||
|
|
||||||
|
return processes, processCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProcessStacktraces gets the processes and stacktraces in a thread safe manner
|
||||||
|
func (pm *Manager) ProcessStacktraces(flat, noSystem bool) ([]*Process, int, int64, error) {
|
||||||
|
var stacks *profile.Profile
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// We cannot use the pm.ProcessMap here because we will release the mutex ...
|
||||||
|
processMap := map[IDType]*Process{}
|
||||||
|
processCount := 0
|
||||||
|
|
||||||
|
// Lock the manager
|
||||||
|
pm.mutex.Lock()
|
||||||
|
processCount = len(pm.processMap)
|
||||||
|
|
||||||
|
// Add a defer to unlock in case there is a panic
|
||||||
|
unlocked := false
|
||||||
|
defer func() {
|
||||||
|
if !unlocked {
|
||||||
|
pm.mutex.Unlock()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
processes := make([]*Process, 0, len(pm.processMap))
|
||||||
|
if flat {
|
||||||
|
for _, internalProcess := range pm.processMap {
|
||||||
|
process := internalProcess.toProcess()
|
||||||
|
processMap[process.PID] = process
|
||||||
|
if noSystem && internalProcess.Type == SystemProcessType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processes = append(processes, process)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, internalProcess := range pm.processMap {
|
||||||
|
process, ok := processMap[internalProcess.PID]
|
||||||
|
if !ok {
|
||||||
|
process = internalProcess.toProcess()
|
||||||
|
processMap[process.PID] = process
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check its parent
|
||||||
|
if process.ParentPID == "" {
|
||||||
|
processes = append(processes, process)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
internalParentProcess, ok := pm.processMap[internalProcess.ParentPID]
|
||||||
|
if ok {
|
||||||
|
parentProcess, ok := processMap[process.ParentPID]
|
||||||
|
if !ok {
|
||||||
|
parentProcess = internalParentProcess.toProcess()
|
||||||
|
processMap[parentProcess.PID] = parentProcess
|
||||||
|
}
|
||||||
|
parentProcess.Children = append(parentProcess.Children, process)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
processes = append(processes, process)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now from within the lock we need to get the goroutines.
|
||||||
|
// Why? If we release the lock then between between filling the above map and getting
|
||||||
|
// the stacktraces another process could be created which would then look like a dead process below
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
defer reader.Close()
|
||||||
|
go func() {
|
||||||
|
err := pprof.Lookup("goroutine").WriteTo(writer, 0)
|
||||||
|
_ = writer.CloseWithError(err)
|
||||||
|
}()
|
||||||
|
stacks, err = profile.Parse(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock the mutex
|
||||||
|
pm.mutex.Unlock()
|
||||||
|
unlocked = true
|
||||||
|
|
||||||
|
goroutineCount := int64(0)
|
||||||
|
|
||||||
|
// Now walk through the "Sample" slice in the goroutines stack
|
||||||
|
for _, sample := range stacks.Sample {
|
||||||
|
// In the "goroutine" pprof profile each sample represents one or more goroutines
|
||||||
|
// with the same labels and stacktraces.
|
||||||
|
|
||||||
|
// We will represent each goroutine by a `Stack`
|
||||||
|
stack := &Stack{}
|
||||||
|
|
||||||
|
// Add the non-process associated labels from the goroutine sample to the Stack
|
||||||
|
for name, value := range sample.Label {
|
||||||
|
if name == DescriptionPProfLabel || name == PIDPProfLabel || (!flat && name == PPIDPProfLabel) || name == ProcessTypePProfLabel {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Labels from the "goroutine" pprof profile only have one value.
|
||||||
|
// This is because the underlying representation is a map[string]string
|
||||||
|
if len(value) != 1 {
|
||||||
|
// Unexpected...
|
||||||
|
return nil, 0, 0, fmt.Errorf("label: %s in goroutine stack with unexpected number of values: %v", name, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
stack.Labels = append(stack.Labels, &Label{Name: name, Value: value[0]})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The number of goroutines that this sample represents is the `stack.Value[0]`
|
||||||
|
stack.Count = sample.Value[0]
|
||||||
|
goroutineCount += stack.Count
|
||||||
|
|
||||||
|
// Now we want to associate this Stack with a Process.
|
||||||
|
var process *Process
|
||||||
|
|
||||||
|
// Try to get the PID from the goroutine labels
|
||||||
|
if pidvalue, ok := sample.Label[PIDPProfLabel]; ok && len(pidvalue) == 1 {
|
||||||
|
pid := IDType(pidvalue[0])
|
||||||
|
|
||||||
|
// Now try to get the process from our map
|
||||||
|
process, ok = processMap[pid]
|
||||||
|
if !ok && pid != "" {
|
||||||
|
// This means that no process has been found in the process map - but there was a process PID
|
||||||
|
// Therefore this goroutine belongs to a dead process and it has escaped control of the process as it
|
||||||
|
// should have died with the process context cancellation.
|
||||||
|
|
||||||
|
// We need to create a dead process holder for this process and label it appropriately
|
||||||
|
|
||||||
|
// get the parent PID
|
||||||
|
ppid := IDType("")
|
||||||
|
if value, ok := sample.Label[PPIDPProfLabel]; ok && len(value) == 1 {
|
||||||
|
ppid = IDType(value[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// format the description
|
||||||
|
description := "(dead process)"
|
||||||
|
if value, ok := sample.Label[DescriptionPProfLabel]; ok && len(value) == 1 {
|
||||||
|
description = value[0] + " " + description
|
||||||
|
}
|
||||||
|
|
||||||
|
// override the type of the process to "code" but add the old type as a label on the first stack
|
||||||
|
ptype := NoneProcessType
|
||||||
|
if value, ok := sample.Label[ProcessTypePProfLabel]; ok && len(value) == 1 {
|
||||||
|
stack.Labels = append(stack.Labels, &Label{Name: ProcessTypePProfLabel, Value: value[0]})
|
||||||
|
}
|
||||||
|
process = &Process{
|
||||||
|
PID: pid,
|
||||||
|
ParentPID: ppid,
|
||||||
|
Description: description,
|
||||||
|
Type: ptype,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now add the dead process back to the map and tree so we don't go back through this again.
|
||||||
|
processMap[process.PID] = process
|
||||||
|
added := false
|
||||||
|
if process.ParentPID != "" && !flat {
|
||||||
|
if parent, ok := processMap[process.ParentPID]; ok {
|
||||||
|
parent.Children = append(parent.Children, process)
|
||||||
|
added = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !added {
|
||||||
|
processes = append(processes, process)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if process == nil {
|
||||||
|
// This means that the sample we're looking has no PID label
|
||||||
|
var ok bool
|
||||||
|
process, ok = processMap[""]
|
||||||
|
if !ok {
|
||||||
|
// this is the first time we've come acrross an unassociated goroutine so create a "process" to hold them
|
||||||
|
process = &Process{
|
||||||
|
Description: "(unassociated)",
|
||||||
|
Type: NoneProcessType,
|
||||||
|
}
|
||||||
|
processMap[process.PID] = process
|
||||||
|
processes = append(processes, process)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// The sample.Location represents a stack trace for this goroutine,
|
||||||
|
// however each Location can represent multiple lines (mostly due to inlining)
|
||||||
|
// so we need to walk the lines too
|
||||||
|
for _, location := range sample.Location {
|
||||||
|
for _, line := range location.Line {
|
||||||
|
entry := &StackEntry{
|
||||||
|
Function: line.Function.Name,
|
||||||
|
File: line.Function.Filename,
|
||||||
|
Line: int(line.Line),
|
||||||
|
}
|
||||||
|
stack.Entry = append(stack.Entry, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we need a short-descriptive name to call the stack trace if when it is folded and
|
||||||
|
// assuming the stack trace has some lines we'll choose the bottom of the stack (i.e. the
|
||||||
|
// initial function that started the stack trace.) The top of the stack is unlikely to
|
||||||
|
// be very helpful as a lot of the time it will be runtime.select or some other call into
|
||||||
|
// a std library.
|
||||||
|
stack.Description = "(unknown)"
|
||||||
|
if len(stack.Entry) > 0 {
|
||||||
|
stack.Description = stack.Entry[len(stack.Entry)-1].Function
|
||||||
|
}
|
||||||
|
|
||||||
|
process.Stacks = append(process.Stacks, stack)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restrict to not show system processes
|
||||||
|
if noSystem {
|
||||||
|
for i := 0; i < len(processes); i++ {
|
||||||
|
process := processes[i]
|
||||||
|
if process.Type != SystemProcessType && process.Type != NoneProcessType {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
processes[len(processes)-1], processes[i] = processes[i], processes[len(processes)-1]
|
||||||
|
processes = append(processes[:len(processes)-1], process.Children...)
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now finally re-sort the processes. Newest process appears first
|
||||||
|
after := func(processes []*Process) func(i, j int) bool {
|
||||||
|
return func(i, j int) bool {
|
||||||
|
left, right := processes[i], processes[j]
|
||||||
|
return left.Start.After(right.Start)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sort.Slice(processes, after(processes))
|
||||||
|
if !flat {
|
||||||
|
|
||||||
|
var sortChildren func(process *Process)
|
||||||
|
|
||||||
|
sortChildren = func(process *Process) {
|
||||||
|
sort.Slice(process.Children, after(process.Children))
|
||||||
|
for _, child := range process.Children {
|
||||||
|
sortChildren(child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return processes, processCount, goroutineCount, err
|
||||||
|
}
|
|
@ -22,7 +22,7 @@ func TestGetManager(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestManager_AddContext(t *testing.T) {
|
func TestManager_AddContext(t *testing.T) {
|
||||||
pm := Manager{processes: make(map[IDType]*Process), next: 1}
|
pm := Manager{processMap: make(map[IDType]*process), next: 1}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -41,7 +41,7 @@ func TestManager_AddContext(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestManager_Cancel(t *testing.T) {
|
func TestManager_Cancel(t *testing.T) {
|
||||||
pm := Manager{processes: make(map[IDType]*Process), next: 1}
|
pm := Manager{processMap: make(map[IDType]*process), next: 1}
|
||||||
|
|
||||||
ctx, _, finished := pm.AddContext(context.Background(), "foo")
|
ctx, _, finished := pm.AddContext(context.Background(), "foo")
|
||||||
defer finished()
|
defer finished()
|
||||||
|
@ -69,7 +69,7 @@ func TestManager_Cancel(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestManager_Remove(t *testing.T) {
|
func TestManager_Remove(t *testing.T) {
|
||||||
pm := Manager{processes: make(map[IDType]*Process), next: 1}
|
pm := Manager{processMap: make(map[IDType]*process), next: 1}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
@ -85,7 +85,7 @@ func TestManager_Remove(t *testing.T) {
|
||||||
|
|
||||||
pm.Remove(GetPID(p2Ctx))
|
pm.Remove(GetPID(p2Ctx))
|
||||||
|
|
||||||
_, exists := pm.processes[GetPID(p2Ctx)]
|
_, exists := pm.processMap[GetPID(p2Ctx)]
|
||||||
assert.False(t, exists, "PID %d is in the list but shouldn't", GetPID(p2Ctx))
|
assert.False(t, exists, "PID %d is in the list but shouldn't", GetPID(p2Ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,61 +6,34 @@ package process
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Process represents a working process inheriting from Gitea.
|
var (
|
||||||
type Process struct {
|
SystemProcessType = "system"
|
||||||
|
RequestProcessType = "request"
|
||||||
|
NormalProcessType = "normal"
|
||||||
|
NoneProcessType = "none"
|
||||||
|
)
|
||||||
|
|
||||||
|
// process represents a working process inheriting from Gitea.
|
||||||
|
type process struct {
|
||||||
PID IDType // Process ID, not system one.
|
PID IDType // Process ID, not system one.
|
||||||
ParentPID IDType
|
ParentPID IDType
|
||||||
Description string
|
Description string
|
||||||
Start time.Time
|
Start time.Time
|
||||||
Cancel context.CancelFunc
|
Cancel context.CancelFunc
|
||||||
|
Type string
|
||||||
lock sync.Mutex
|
|
||||||
children []*Process
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Children gets the children of the process
|
// ToProcess converts a process to a externally usable Process
|
||||||
// Note: this function will behave nicely even if p is nil
|
func (p *process) toProcess() *Process {
|
||||||
func (p *Process) Children() (children []*Process) {
|
process := &Process{
|
||||||
if p == nil {
|
PID: p.PID,
|
||||||
return
|
ParentPID: p.ParentPID,
|
||||||
}
|
Description: p.Description,
|
||||||
|
Start: p.Start,
|
||||||
p.lock.Lock()
|
Type: p.Type,
|
||||||
defer p.lock.Unlock()
|
|
||||||
children = make([]*Process, len(p.children))
|
|
||||||
copy(children, p.children)
|
|
||||||
return children
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddChild adds a child process
|
|
||||||
// Note: this function will behave nicely even if p is nil
|
|
||||||
func (p *Process) AddChild(child *Process) {
|
|
||||||
if p == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p.lock.Lock()
|
|
||||||
defer p.lock.Unlock()
|
|
||||||
p.children = append(p.children, child)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveChild removes a child process
|
|
||||||
// Note: this function will behave nicely even if p is nil
|
|
||||||
func (p *Process) RemoveChild(process *Process) {
|
|
||||||
if p == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
p.lock.Lock()
|
|
||||||
defer p.lock.Unlock()
|
|
||||||
for i, child := range p.children {
|
|
||||||
if child == process {
|
|
||||||
p.children = append(p.children[:i], p.children[i+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return process
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ package queue
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -20,7 +21,6 @@ import (
|
||||||
type ByteFIFOQueueConfiguration struct {
|
type ByteFIFOQueueConfiguration struct {
|
||||||
WorkerPoolConfiguration
|
WorkerPoolConfiguration
|
||||||
Workers int
|
Workers int
|
||||||
Name string
|
|
||||||
WaitOnEmpty bool
|
WaitOnEmpty bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,6 +153,7 @@ func (q *ByteFIFOQueue) Flush(timeout time.Duration) error {
|
||||||
|
|
||||||
// Run runs the bytefifo queue
|
// Run runs the bytefifo queue
|
||||||
func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(func())) {
|
func (q *ByteFIFOQueue) Run(atShutdown, atTerminate func(func())) {
|
||||||
|
pprof.SetGoroutineLabels(q.baseCtx)
|
||||||
atShutdown(q.Shutdown)
|
atShutdown(q.Shutdown)
|
||||||
atTerminate(q.Terminate)
|
atTerminate(q.Terminate)
|
||||||
log.Debug("%s: %s Starting", q.typ, q.name)
|
log.Debug("%s: %s Starting", q.typ, q.name)
|
||||||
|
@ -355,6 +356,7 @@ func (q *ByteFIFOQueue) Terminate() {
|
||||||
if err := q.byteFIFO.Close(); err != nil {
|
if err := q.byteFIFO.Close(); err != nil {
|
||||||
log.Error("Error whilst closing internal byte fifo in %s: %s: %v", q.typ, q.name, err)
|
log.Error("Error whilst closing internal byte fifo in %s: %s: %v", q.typ, q.name, err)
|
||||||
}
|
}
|
||||||
|
q.baseCtxFinished()
|
||||||
log.Debug("%s: %s Terminated", q.typ, q.name)
|
log.Debug("%s: %s Terminated", q.typ, q.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ package queue
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/pprof"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -20,7 +21,6 @@ const ChannelQueueType Type = "channel"
|
||||||
type ChannelQueueConfiguration struct {
|
type ChannelQueueConfiguration struct {
|
||||||
WorkerPoolConfiguration
|
WorkerPoolConfiguration
|
||||||
Workers int
|
Workers int
|
||||||
Name string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChannelQueue implements Queue
|
// ChannelQueue implements Queue
|
||||||
|
@ -84,6 +84,7 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro
|
||||||
|
|
||||||
// Run starts to run the queue
|
// Run starts to run the queue
|
||||||
func (q *ChannelQueue) Run(atShutdown, atTerminate func(func())) {
|
func (q *ChannelQueue) Run(atShutdown, atTerminate func(func())) {
|
||||||
|
pprof.SetGoroutineLabels(q.baseCtx)
|
||||||
atShutdown(q.Shutdown)
|
atShutdown(q.Shutdown)
|
||||||
atTerminate(q.Terminate)
|
atTerminate(q.Terminate)
|
||||||
log.Debug("ChannelQueue: %s Starting", q.name)
|
log.Debug("ChannelQueue: %s Starting", q.name)
|
||||||
|
@ -169,6 +170,7 @@ func (q *ChannelQueue) Terminate() {
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
q.terminateCtxCancel()
|
q.terminateCtxCancel()
|
||||||
|
q.baseCtxFinished()
|
||||||
log.Debug("ChannelQueue: %s Terminated", q.name)
|
log.Debug("ChannelQueue: %s Terminated", q.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,9 +34,9 @@ func TestChannelQueue(t *testing.T) {
|
||||||
BlockTimeout: 1 * time.Second,
|
BlockTimeout: 1 * time.Second,
|
||||||
BoostTimeout: 5 * time.Minute,
|
BoostTimeout: 5 * time.Minute,
|
||||||
BoostWorkers: 5,
|
BoostWorkers: 5,
|
||||||
|
Name: "TestChannelQueue",
|
||||||
},
|
},
|
||||||
Workers: 0,
|
Workers: 0,
|
||||||
Name: "TestChannelQueue",
|
|
||||||
}, &testData{})
|
}, &testData{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ package queue
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -72,9 +73,9 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
|
||||||
BoostTimeout: config.BoostTimeout,
|
BoostTimeout: config.BoostTimeout,
|
||||||
BoostWorkers: config.BoostWorkers,
|
BoostWorkers: config.BoostWorkers,
|
||||||
MaxWorkers: config.MaxWorkers,
|
MaxWorkers: config.MaxWorkers,
|
||||||
|
Name: config.Name + "-channel",
|
||||||
},
|
},
|
||||||
Workers: config.Workers,
|
Workers: config.Workers,
|
||||||
Name: config.Name + "-channel",
|
|
||||||
}, exemplar)
|
}, exemplar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -90,9 +91,9 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (
|
||||||
BoostTimeout: 5 * time.Minute,
|
BoostTimeout: 5 * time.Minute,
|
||||||
BoostWorkers: 1,
|
BoostWorkers: 1,
|
||||||
MaxWorkers: 5,
|
MaxWorkers: 5,
|
||||||
|
Name: config.Name + "-level",
|
||||||
},
|
},
|
||||||
Workers: 0,
|
Workers: 0,
|
||||||
Name: config.Name + "-level",
|
|
||||||
},
|
},
|
||||||
DataDir: config.DataDir,
|
DataDir: config.DataDir,
|
||||||
}
|
}
|
||||||
|
@ -154,6 +155,7 @@ func (q *PersistableChannelQueue) PushBack(data Data) error {
|
||||||
|
|
||||||
// Run starts to run the queue
|
// Run starts to run the queue
|
||||||
func (q *PersistableChannelQueue) Run(atShutdown, atTerminate func(func())) {
|
func (q *PersistableChannelQueue) Run(atShutdown, atTerminate func(func())) {
|
||||||
|
pprof.SetGoroutineLabels(q.channelQueue.baseCtx)
|
||||||
log.Debug("PersistableChannelQueue: %s Starting", q.delayedStarter.name)
|
log.Debug("PersistableChannelQueue: %s Starting", q.delayedStarter.name)
|
||||||
_ = q.channelQueue.AddWorkers(q.channelQueue.workers, 0)
|
_ = q.channelQueue.AddWorkers(q.channelQueue.workers, 0)
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ package queue
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
@ -97,6 +98,7 @@ func NewChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue
|
||||||
|
|
||||||
// Run starts to run the queue
|
// Run starts to run the queue
|
||||||
func (q *ChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
|
func (q *ChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
|
||||||
|
pprof.SetGoroutineLabels(q.baseCtx)
|
||||||
atShutdown(q.Shutdown)
|
atShutdown(q.Shutdown)
|
||||||
atTerminate(q.Terminate)
|
atTerminate(q.Terminate)
|
||||||
log.Debug("ChannelUniqueQueue: %s Starting", q.name)
|
log.Debug("ChannelUniqueQueue: %s Starting", q.name)
|
||||||
|
@ -226,6 +228,7 @@ func (q *ChannelUniqueQueue) Terminate() {
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
q.terminateCtxCancel()
|
q.terminateCtxCancel()
|
||||||
|
q.baseCtxFinished()
|
||||||
log.Debug("ChannelUniqueQueue: %s Terminated", q.name)
|
log.Debug("ChannelUniqueQueue: %s Terminated", q.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,9 @@ func TestChannelUniqueQueue(t *testing.T) {
|
||||||
BlockTimeout: 1 * time.Second,
|
BlockTimeout: 1 * time.Second,
|
||||||
BoostTimeout: 5 * time.Minute,
|
BoostTimeout: 5 * time.Minute,
|
||||||
BoostWorkers: 5,
|
BoostWorkers: 5,
|
||||||
|
Name: "TestChannelQueue",
|
||||||
},
|
},
|
||||||
Workers: 0,
|
Workers: 0,
|
||||||
Name: "TestChannelQueue",
|
|
||||||
}, &testData{})
|
}, &testData{})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ package queue
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -72,9 +73,9 @@ func NewPersistableChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interfac
|
||||||
BoostTimeout: config.BoostTimeout,
|
BoostTimeout: config.BoostTimeout,
|
||||||
BoostWorkers: config.BoostWorkers,
|
BoostWorkers: config.BoostWorkers,
|
||||||
MaxWorkers: config.MaxWorkers,
|
MaxWorkers: config.MaxWorkers,
|
||||||
|
Name: config.Name + "-channel",
|
||||||
},
|
},
|
||||||
Workers: config.Workers,
|
Workers: config.Workers,
|
||||||
Name: config.Name + "-channel",
|
|
||||||
}, exemplar)
|
}, exemplar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -90,9 +91,9 @@ func NewPersistableChannelUniqueQueue(handle HandlerFunc, cfg, exemplar interfac
|
||||||
BoostTimeout: 5 * time.Minute,
|
BoostTimeout: 5 * time.Minute,
|
||||||
BoostWorkers: 1,
|
BoostWorkers: 1,
|
||||||
MaxWorkers: 5,
|
MaxWorkers: 5,
|
||||||
|
Name: config.Name + "-level",
|
||||||
},
|
},
|
||||||
Workers: 0,
|
Workers: 0,
|
||||||
Name: config.Name + "-level",
|
|
||||||
},
|
},
|
||||||
DataDir: config.DataDir,
|
DataDir: config.DataDir,
|
||||||
}
|
}
|
||||||
|
@ -183,6 +184,7 @@ func (q *PersistableChannelUniqueQueue) Has(data Data) (bool, error) {
|
||||||
|
|
||||||
// Run starts to run the queue
|
// Run starts to run the queue
|
||||||
func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
|
func (q *PersistableChannelUniqueQueue) Run(atShutdown, atTerminate func(func())) {
|
||||||
|
pprof.SetGoroutineLabels(q.channelQueue.baseCtx)
|
||||||
log.Debug("PersistableChannelUniqueQueue: %s Starting", q.delayedStarter.name)
|
log.Debug("PersistableChannelUniqueQueue: %s Starting", q.delayedStarter.name)
|
||||||
|
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
|
@ -301,6 +303,7 @@ func (q *PersistableChannelUniqueQueue) Terminate() {
|
||||||
if q.internal != nil {
|
if q.internal != nil {
|
||||||
q.internal.(*LevelUniqueQueue).Terminate()
|
q.internal.(*LevelUniqueQueue).Terminate()
|
||||||
}
|
}
|
||||||
|
q.channelQueue.baseCtxFinished()
|
||||||
log.Debug("PersistableChannelUniqueQueue: %s Terminated", q.delayedStarter.name)
|
log.Debug("PersistableChannelUniqueQueue: %s Terminated", q.delayedStarter.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,11 +6,14 @@ package queue
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"runtime/pprof"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,6 +25,7 @@ type WorkerPool struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
baseCtx context.Context
|
baseCtx context.Context
|
||||||
baseCtxCancel context.CancelFunc
|
baseCtxCancel context.CancelFunc
|
||||||
|
baseCtxFinished process.FinishedFunc
|
||||||
paused chan struct{}
|
paused chan struct{}
|
||||||
resumed chan struct{}
|
resumed chan struct{}
|
||||||
cond *sync.Cond
|
cond *sync.Cond
|
||||||
|
@ -44,6 +48,7 @@ var (
|
||||||
|
|
||||||
// WorkerPoolConfiguration is the basic configuration for a WorkerPool
|
// WorkerPoolConfiguration is the basic configuration for a WorkerPool
|
||||||
type WorkerPoolConfiguration struct {
|
type WorkerPoolConfiguration struct {
|
||||||
|
Name string
|
||||||
QueueLength int
|
QueueLength int
|
||||||
BatchLength int
|
BatchLength int
|
||||||
BlockTimeout time.Duration
|
BlockTimeout time.Duration
|
||||||
|
@ -54,12 +59,13 @@ type WorkerPoolConfiguration struct {
|
||||||
|
|
||||||
// NewWorkerPool creates a new worker pool
|
// NewWorkerPool creates a new worker pool
|
||||||
func NewWorkerPool(handle HandlerFunc, config WorkerPoolConfiguration) *WorkerPool {
|
func NewWorkerPool(handle HandlerFunc, config WorkerPoolConfiguration) *WorkerPool {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel, finished := process.GetManager().AddTypedContext(context.Background(), fmt.Sprintf("Queue: %s", config.Name), process.SystemProcessType, false)
|
||||||
|
|
||||||
dataChan := make(chan Data, config.QueueLength)
|
dataChan := make(chan Data, config.QueueLength)
|
||||||
pool := &WorkerPool{
|
pool := &WorkerPool{
|
||||||
baseCtx: ctx,
|
baseCtx: ctx,
|
||||||
baseCtxCancel: cancel,
|
baseCtxCancel: cancel,
|
||||||
|
baseCtxFinished: finished,
|
||||||
batchLength: config.BatchLength,
|
batchLength: config.BatchLength,
|
||||||
dataChan: dataChan,
|
dataChan: dataChan,
|
||||||
resumed: closedChan,
|
resumed: closedChan,
|
||||||
|
@ -299,6 +305,7 @@ func (p *WorkerPool) addWorkers(ctx context.Context, cancel context.CancelFunc,
|
||||||
p.numberOfWorkers++
|
p.numberOfWorkers++
|
||||||
p.lock.Unlock()
|
p.lock.Unlock()
|
||||||
go func() {
|
go func() {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
p.doWork(ctx)
|
p.doWork(ctx)
|
||||||
|
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
|
@ -476,6 +483,7 @@ func (p *WorkerPool) FlushWithContext(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *WorkerPool) doWork(ctx context.Context) {
|
func (p *WorkerPool) doWork(ctx context.Context) {
|
||||||
|
pprof.SetGoroutineLabels(ctx)
|
||||||
delay := time.Millisecond * 300
|
delay := time.Millisecond * 300
|
||||||
|
|
||||||
// Create a common timer - we will use this elsewhere
|
// Create a common timer - we will use this elsewhere
|
||||||
|
|
|
@ -23,7 +23,9 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
||||||
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
"code.gitea.io/gitea/modules/util"
|
"code.gitea.io/gitea/modules/util"
|
||||||
|
|
||||||
|
@ -317,7 +319,11 @@ func Listen(host string, port int, ciphers, keyExchanges, macs []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
go listen(&srv)
|
go func() {
|
||||||
|
_, _, finished := process.GetManager().AddTypedContext(graceful.GetManager().HammerContext(), "Service: Built-in SSH server", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
|
listen(&srv)
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenKeyPair make a pair of public and private keys for SSH access.
|
// GenKeyPair make a pair of public and private keys for SSH access.
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Event indicates when the printer is triggered
|
// Event indicates when the printer is triggered
|
||||||
|
@ -40,7 +41,9 @@ type requestRecordsManager struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (manager *requestRecordsManager) startSlowQueryDetector(threshold time.Duration) {
|
func (manager *requestRecordsManager) startSlowQueryDetector(threshold time.Duration) {
|
||||||
go graceful.GetManager().RunWithShutdownContext(func(baseCtx context.Context) {
|
go graceful.GetManager().RunWithShutdownContext(func(ctx context.Context) {
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: SlowQueryDetector", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
// This go-routine checks all active requests every second.
|
// This go-routine checks all active requests every second.
|
||||||
// If a request has been running for a long time (eg: /user/events), we also print a log with "still-executing" message
|
// If a request has been running for a long time (eg: /user/events), we also print a log with "still-executing" message
|
||||||
// After the "still-executing" log is printed, the record will be removed from the map to prevent from duplicated logs in future
|
// After the "still-executing" log is printed, the record will be removed from the map to prevent from duplicated logs in future
|
||||||
|
@ -49,7 +52,7 @@ func (manager *requestRecordsManager) startSlowQueryDetector(threshold time.Dura
|
||||||
t := time.NewTicker(time.Second)
|
t := time.NewTicker(time.Second)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-baseCtx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-t.C:
|
case <-t.C:
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
|
@ -2831,6 +2831,8 @@ monitor.next = Next Time
|
||||||
monitor.previous = Previous Time
|
monitor.previous = Previous Time
|
||||||
monitor.execute_times = Executions
|
monitor.execute_times = Executions
|
||||||
monitor.process = Running Processes
|
monitor.process = Running Processes
|
||||||
|
monitor.stacktrace = Stacktraces
|
||||||
|
monitor.goroutines = %d Goroutines
|
||||||
monitor.desc = Description
|
monitor.desc = Description
|
||||||
monitor.start = Start Time
|
monitor.start = Start Time
|
||||||
monitor.execute_time = Execution Time
|
monitor.execute_time = Execution Time
|
||||||
|
|
|
@ -27,7 +27,7 @@ func Middlewares() []func(http.Handler) http.Handler {
|
||||||
// First of all escape the URL RawPath to ensure that all routing is done using a correctly escaped URL
|
// First of all escape the URL RawPath to ensure that all routing is done using a correctly escaped URL
|
||||||
req.URL.RawPath = req.URL.EscapedPath()
|
req.URL.RawPath = req.URL.EscapedPath()
|
||||||
|
|
||||||
ctx, _, finished := process.GetManager().AddContext(req.Context(), fmt.Sprintf("%s: %s", req.Method, req.RequestURI))
|
ctx, _, finished := process.GetManager().AddTypedContext(req.Context(), fmt.Sprintf("%s: %s", req.Method, req.RequestURI), process.RequestProcessType, true)
|
||||||
defer finished()
|
defer finished()
|
||||||
next.ServeHTTP(context.NewResponse(resp), req.WithContext(ctx))
|
next.ServeHTTP(context.NewResponse(resp), req.WithContext(ctx))
|
||||||
})
|
})
|
||||||
|
|
|
@ -141,7 +141,7 @@ func GlobalInitInstalled(ctx context.Context) {
|
||||||
models.NewRepoContext()
|
models.NewRepoContext()
|
||||||
|
|
||||||
// Booting long running goroutines.
|
// Booting long running goroutines.
|
||||||
cron.NewContext()
|
cron.NewContext(ctx)
|
||||||
issue_indexer.InitIssueIndexer(false)
|
issue_indexer.InitIssueIndexer(false)
|
||||||
code_indexer.Init()
|
code_indexer.Init()
|
||||||
mustInit(stats_indexer.Init)
|
mustInit(stats_indexer.Init)
|
||||||
|
|
|
@ -70,6 +70,7 @@ func Routes() *web.Route {
|
||||||
r.Post("/manager/release-and-reopen-logging", ReleaseReopenLogging)
|
r.Post("/manager/release-and-reopen-logging", ReleaseReopenLogging)
|
||||||
r.Post("/manager/add-logger", bind(private.LoggerOptions{}), AddLogger)
|
r.Post("/manager/add-logger", bind(private.LoggerOptions{}), AddLogger)
|
||||||
r.Post("/manager/remove-logger/{group}/{name}", RemoveLogger)
|
r.Post("/manager/remove-logger/{group}/{name}", RemoveLogger)
|
||||||
|
r.Get("/manager/processes", Processes)
|
||||||
r.Post("/mail/send", SendEmail)
|
r.Post("/mail/send", SendEmail)
|
||||||
r.Post("/restore_repo", RestoreRepo)
|
r.Post("/restore_repo", RestoreRepo)
|
||||||
|
|
||||||
|
|
161
routers/private/manager_process.go
Normal file
161
routers/private/manager_process.go
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
// Copyright 2022 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package private
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/context"
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/private"
|
||||||
|
process_module "code.gitea.io/gitea/modules/process"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Processes prints out the processes
|
||||||
|
func Processes(ctx *context.PrivateContext) {
|
||||||
|
pid := ctx.FormString("cancel-pid")
|
||||||
|
if pid != "" {
|
||||||
|
process_module.GetManager().Cancel(process_module.IDType(pid))
|
||||||
|
runtime.Gosched()
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
flat := ctx.FormBool("flat")
|
||||||
|
noSystem := ctx.FormBool("no-system")
|
||||||
|
stacktraces := ctx.FormBool("stacktraces")
|
||||||
|
json := ctx.FormBool("json")
|
||||||
|
|
||||||
|
var processes []*process_module.Process
|
||||||
|
goroutineCount := int64(0)
|
||||||
|
processCount := 0
|
||||||
|
var err error
|
||||||
|
if stacktraces {
|
||||||
|
processes, processCount, goroutineCount, err = process_module.GetManager().ProcessStacktraces(flat, noSystem)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Unable to get stacktrace: %v", err)
|
||||||
|
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||||
|
Err: fmt.Sprintf("Failed to get stacktraces: %v", err),
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
processes, processCount = process_module.GetManager().Processes(flat, noSystem)
|
||||||
|
}
|
||||||
|
|
||||||
|
if json {
|
||||||
|
ctx.JSON(http.StatusOK, map[string]interface{}{
|
||||||
|
"TotalNumberOfGoroutines": goroutineCount,
|
||||||
|
"TotalNumberOfProcesses": processCount,
|
||||||
|
"Processes": processes,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Resp.Header().Set("Content-Type", "text/plain;charset=utf-8")
|
||||||
|
ctx.Resp.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
if err := writeProcesses(ctx.Resp, processes, processCount, goroutineCount, "", flat); err != nil {
|
||||||
|
log.Error("Unable to write out process stacktrace: %v", err)
|
||||||
|
if !ctx.Written() {
|
||||||
|
ctx.JSON(http.StatusInternalServerError, private.Response{
|
||||||
|
Err: fmt.Sprintf("Failed to get stacktraces: %v", err),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeProcesses(out io.Writer, processes []*process_module.Process, processCount int, goroutineCount int64, indent string, flat bool) error {
|
||||||
|
if goroutineCount > 0 {
|
||||||
|
if _, err := fmt.Fprintf(out, "%sTotal Number of Goroutines: %d\n", indent, goroutineCount); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := fmt.Fprintf(out, "%sTotal Number of Processes: %d\n", indent, processCount); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(processes) > 0 {
|
||||||
|
if err := writeProcess(out, processes[0], " ", flat); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(processes) > 1 {
|
||||||
|
for _, process := range processes[1:] {
|
||||||
|
if _, err := fmt.Fprintf(out, "%s | \n", indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := writeProcess(out, process, " ", flat); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeProcess(out io.Writer, process *process_module.Process, indent string, flat bool) error {
|
||||||
|
sb := &bytes.Buffer{}
|
||||||
|
if flat {
|
||||||
|
if process.ParentPID != "" {
|
||||||
|
_, _ = fmt.Fprintf(sb, "%s+ PID: %s\t\tType: %s\n", indent, process.PID, process.Type)
|
||||||
|
} else {
|
||||||
|
_, _ = fmt.Fprintf(sb, "%s+ PID: %s:%s\tType: %s\n", indent, process.ParentPID, process.PID, process.Type)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
_, _ = fmt.Fprintf(sb, "%s+ PID: %s\tType: %s\n", indent, process.PID, process.Type)
|
||||||
|
}
|
||||||
|
indent += "| "
|
||||||
|
|
||||||
|
_, _ = fmt.Fprintf(sb, "%sDescription: %s\n", indent, process.Description)
|
||||||
|
_, _ = fmt.Fprintf(sb, "%sStart: %s\n", indent, process.Start)
|
||||||
|
|
||||||
|
if len(process.Stacks) > 0 {
|
||||||
|
_, _ = fmt.Fprintf(sb, "%sGoroutines:\n", indent)
|
||||||
|
for _, stack := range process.Stacks {
|
||||||
|
indent := indent + " "
|
||||||
|
_, _ = fmt.Fprintf(sb, "%s+ Description: %s", indent, stack.Description)
|
||||||
|
if stack.Count > 1 {
|
||||||
|
_, _ = fmt.Fprintf(sb, "* %d", stack.Count)
|
||||||
|
}
|
||||||
|
_, _ = fmt.Fprintf(sb, "\n")
|
||||||
|
indent += "| "
|
||||||
|
if len(stack.Labels) > 0 {
|
||||||
|
_, _ = fmt.Fprintf(sb, "%sLabels: %q:%q", indent, stack.Labels[0].Name, stack.Labels[0].Value)
|
||||||
|
|
||||||
|
if len(stack.Labels) > 1 {
|
||||||
|
for _, label := range stack.Labels[1:] {
|
||||||
|
_, _ = fmt.Fprintf(sb, ", %q:%q", label.Name, label.Value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_, _ = fmt.Fprintf(sb, "\n")
|
||||||
|
}
|
||||||
|
_, _ = fmt.Fprintf(sb, "%sStack:\n", indent)
|
||||||
|
indent += " "
|
||||||
|
for _, entry := range stack.Entry {
|
||||||
|
_, _ = fmt.Fprintf(sb, "%s+ %s\n", indent, entry.Function)
|
||||||
|
_, _ = fmt.Fprintf(sb, "%s| %s:%d\n", indent, entry.File, entry.Line)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, err := out.Write(sb.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sb.Reset()
|
||||||
|
if len(process.Children) > 0 {
|
||||||
|
if _, err := fmt.Fprintf(out, "%sChildren:\n", indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, child := range process.Children {
|
||||||
|
if err := writeProcess(out, child, indent+" ", flat); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -38,6 +38,7 @@ const (
|
||||||
tplDashboard base.TplName = "admin/dashboard"
|
tplDashboard base.TplName = "admin/dashboard"
|
||||||
tplConfig base.TplName = "admin/config"
|
tplConfig base.TplName = "admin/config"
|
||||||
tplMonitor base.TplName = "admin/monitor"
|
tplMonitor base.TplName = "admin/monitor"
|
||||||
|
tplStacktrace base.TplName = "admin/stacktrace"
|
||||||
tplQueue base.TplName = "admin/queue"
|
tplQueue base.TplName = "admin/queue"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -326,12 +327,33 @@ func Monitor(ctx *context.Context) {
|
||||||
ctx.Data["Title"] = ctx.Tr("admin.monitor")
|
ctx.Data["Title"] = ctx.Tr("admin.monitor")
|
||||||
ctx.Data["PageIsAdmin"] = true
|
ctx.Data["PageIsAdmin"] = true
|
||||||
ctx.Data["PageIsAdminMonitor"] = true
|
ctx.Data["PageIsAdminMonitor"] = true
|
||||||
ctx.Data["Processes"] = process.GetManager().Processes(true)
|
ctx.Data["Processes"], ctx.Data["ProcessCount"] = process.GetManager().Processes(false, true)
|
||||||
ctx.Data["Entries"] = cron.ListTasks()
|
ctx.Data["Entries"] = cron.ListTasks()
|
||||||
ctx.Data["Queues"] = queue.GetManager().ManagedQueues()
|
ctx.Data["Queues"] = queue.GetManager().ManagedQueues()
|
||||||
|
|
||||||
ctx.HTML(http.StatusOK, tplMonitor)
|
ctx.HTML(http.StatusOK, tplMonitor)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GoroutineStacktrace show admin monitor goroutines page
|
||||||
|
func GoroutineStacktrace(ctx *context.Context) {
|
||||||
|
ctx.Data["Title"] = ctx.Tr("admin.monitor")
|
||||||
|
ctx.Data["PageIsAdmin"] = true
|
||||||
|
ctx.Data["PageIsAdminMonitor"] = true
|
||||||
|
|
||||||
|
processStacks, processCount, goroutineCount, err := process.GetManager().ProcessStacktraces(false, false)
|
||||||
|
if err != nil {
|
||||||
|
ctx.ServerError("GoroutineStacktrace", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx.Data["ProcessStacks"] = processStacks
|
||||||
|
|
||||||
|
ctx.Data["GoroutineCount"] = goroutineCount
|
||||||
|
ctx.Data["ProcessCount"] = processCount
|
||||||
|
|
||||||
|
ctx.HTML(http.StatusOK, tplStacktrace)
|
||||||
|
}
|
||||||
|
|
||||||
// MonitorCancel cancels a process
|
// MonitorCancel cancels a process
|
||||||
func MonitorCancel(ctx *context.Context) {
|
func MonitorCancel(ctx *context.Context) {
|
||||||
pid := ctx.Params("pid")
|
pid := ctx.Params("pid")
|
||||||
|
|
|
@ -436,6 +436,7 @@ func RegisterRoutes(m *web.Route) {
|
||||||
m.Post("/config/test_mail", admin.SendTestMail)
|
m.Post("/config/test_mail", admin.SendTestMail)
|
||||||
m.Group("/monitor", func() {
|
m.Group("/monitor", func() {
|
||||||
m.Get("", admin.Monitor)
|
m.Get("", admin.Monitor)
|
||||||
|
m.Get("/stacktrace", admin.GoroutineStacktrace)
|
||||||
m.Post("/cancel/{pid}", admin.MonitorCancel)
|
m.Post("/cancel/{pid}", admin.MonitorCancel)
|
||||||
m.Group("/queue/{qid}", func() {
|
m.Group("/queue/{qid}", func() {
|
||||||
m.Get("", admin.Queue)
|
m.Get("", admin.Queue)
|
||||||
|
|
|
@ -7,9 +7,11 @@ package cron
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"runtime/pprof"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/sync"
|
"code.gitea.io/gitea/modules/sync"
|
||||||
|
|
||||||
"github.com/gogs/cron"
|
"github.com/gogs/cron"
|
||||||
|
@ -23,7 +25,9 @@ var taskStatusTable = sync.NewStatusTable()
|
||||||
// NewContext begins cron tasks
|
// NewContext begins cron tasks
|
||||||
// Each cron task is run within the shutdown context as a running server
|
// Each cron task is run within the shutdown context as a running server
|
||||||
// AtShutdown the cron server is stopped
|
// AtShutdown the cron server is stopped
|
||||||
func NewContext() {
|
func NewContext(original context.Context) {
|
||||||
|
defer pprof.SetGoroutineLabels(original)
|
||||||
|
_, _, finished := process.GetManager().AddTypedContext(graceful.GetManager().ShutdownContext(), "Service: Cron", process.SystemProcessType, true)
|
||||||
initBasicTasks()
|
initBasicTasks()
|
||||||
initExtendedTasks()
|
initExtendedTasks()
|
||||||
|
|
||||||
|
@ -42,6 +46,7 @@ func NewContext() {
|
||||||
lock.Lock()
|
lock.Lock()
|
||||||
started = false
|
started = false
|
||||||
lock.Unlock()
|
lock.Unlock()
|
||||||
|
finished()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,9 +32,9 @@ func TestPullRequest_AddToTaskQueue(t *testing.T) {
|
||||||
WorkerPoolConfiguration: queue.WorkerPoolConfiguration{
|
WorkerPoolConfiguration: queue.WorkerPoolConfiguration{
|
||||||
QueueLength: 10,
|
QueueLength: 10,
|
||||||
BatchLength: 1,
|
BatchLength: 1,
|
||||||
|
Name: "temporary-queue",
|
||||||
},
|
},
|
||||||
Workers: 1,
|
Workers: 1,
|
||||||
Name: "temporary-queue",
|
|
||||||
}, "")
|
}, "")
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"code.gitea.io/gitea/modules/graceful"
|
"code.gitea.io/gitea/modules/graceful"
|
||||||
"code.gitea.io/gitea/modules/hostmatcher"
|
"code.gitea.io/gitea/modules/hostmatcher"
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"code.gitea.io/gitea/modules/process"
|
||||||
"code.gitea.io/gitea/modules/proxy"
|
"code.gitea.io/gitea/modules/proxy"
|
||||||
"code.gitea.io/gitea/modules/setting"
|
"code.gitea.io/gitea/modules/setting"
|
||||||
|
|
||||||
|
@ -31,7 +32,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Deliver deliver hook task
|
// Deliver deliver hook task
|
||||||
func Deliver(t *webhook_model.HookTask) error {
|
func Deliver(ctx context.Context, t *webhook_model.HookTask) error {
|
||||||
w, err := webhook_model.GetWebhookByID(t.HookID)
|
w, err := webhook_model.GetWebhookByID(t.HookID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -178,7 +179,7 @@ func Deliver(t *webhook_model.HookTask) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := webhookHTTPClient.Do(req.WithContext(graceful.GetManager().ShutdownContext()))
|
resp, err := webhookHTTPClient.Do(req.WithContext(ctx))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.ResponseInfo.Body = fmt.Sprintf("Delivery: %v", err)
|
t.ResponseInfo.Body = fmt.Sprintf("Delivery: %v", err)
|
||||||
return err
|
return err
|
||||||
|
@ -210,6 +211,8 @@ func DeliverHooks(ctx context.Context) {
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
ctx, _, finished := process.GetManager().AddTypedContext(ctx, "Service: DeliverHooks", process.SystemProcessType, true)
|
||||||
|
defer finished()
|
||||||
tasks, err := webhook_model.FindUndeliveredHookTasks()
|
tasks, err := webhook_model.FindUndeliveredHookTasks()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("DeliverHooks: %v", err)
|
log.Error("DeliverHooks: %v", err)
|
||||||
|
@ -223,7 +226,7 @@ func DeliverHooks(ctx context.Context) {
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if err = Deliver(t); err != nil {
|
if err = Deliver(ctx, t); err != nil {
|
||||||
log.Error("deliver: %v", err)
|
log.Error("deliver: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -255,7 +258,7 @@ func DeliverHooks(ctx context.Context) {
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if err = Deliver(t); err != nil {
|
if err = Deliver(ctx, t); err != nil {
|
||||||
log.Error("deliver: %v", err)
|
log.Error("deliver: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
<div class="item">
|
<div class="item">
|
||||||
<div class="df ac">
|
<div class="df ac">
|
||||||
|
<div class="icon ml-3 mr-3">{{if eq .Process.Type "request"}}{{svg "octicon-globe" 16 }}{{else if eq .Process.Type "system"}}{{svg "octicon-cpu" 16 }}{{else}}{{svg "octicon-terminal" 16 }}{{end}}</div>
|
||||||
<div class="content f1">
|
<div class="content f1">
|
||||||
<div class="header">{{.Process.Description}}</div>
|
<div class="header">{{.Process.Description}}</div>
|
||||||
<div class="description"><span title="{{DateFmtLong .Process.Start}}">{{TimeSince .Process.Start .root.i18n.Lang}}</span></div>
|
<div class="description"><span title="{{DateFmtLong .Process.Start}}">{{TimeSince .Process.Start .root.i18n.Lang}}</span></div>
|
||||||
</div>
|
</div>
|
||||||
<div>
|
<div>
|
||||||
|
{{if ne .Process.Type "system"}}
|
||||||
<a class="delete-button icon" href="" data-url="{{.root.Link}}/cancel/{{.Process.PID}}" data-id="{{.Process.PID}}" data-name="{{.Process.Description}}">{{svg "octicon-trash" 16 "text-red"}}</a>
|
<a class="delete-button icon" href="" data-url="{{.root.Link}}/cancel/{{.Process.PID}}" data-id="{{.Process.PID}}" data-name="{{.Process.Description}}">{{svg "octicon-trash" 16 "text-red"}}</a>
|
||||||
|
{{end}}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,8 @@
|
||||||
<h4 class="ui top attached header">
|
<h4 class="ui top attached header">
|
||||||
{{.i18n.Tr "admin.monitor.process"}}
|
{{.i18n.Tr "admin.monitor.process"}}
|
||||||
|
<div class="ui right">
|
||||||
|
<a class="ui blue tiny button" href="{{AppSubUrl}}/admin/monitor/stacktrace">{{.i18n.Tr "admin.monitor.stacktrace"}}</a>
|
||||||
|
</div>
|
||||||
</h4>
|
</h4>
|
||||||
<div class="ui attached segment">
|
<div class="ui attached segment">
|
||||||
<div class="ui relaxed divided list">
|
<div class="ui relaxed divided list">
|
||||||
|
|
66
templates/admin/stacktrace-row.tmpl
Normal file
66
templates/admin/stacktrace-row.tmpl
Normal file
|
@ -0,0 +1,66 @@
|
||||||
|
<div class="item">
|
||||||
|
<div class="df ac">
|
||||||
|
<div class="icon ml-3 mr-3">
|
||||||
|
{{if eq .Process.Type "request"}}
|
||||||
|
{{svg "octicon-globe" 16 }}
|
||||||
|
{{else if eq .Process.Type "system"}}
|
||||||
|
{{svg "octicon-cpu" 16 }}
|
||||||
|
{{else if eq .Process.Type "normal"}}
|
||||||
|
{{svg "octicon-terminal" 16 }}
|
||||||
|
{{else}}
|
||||||
|
{{svg "octicon-code" 16 }}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
<div class="content f1">
|
||||||
|
<div class="header">{{.Process.Description}}</div>
|
||||||
|
<div class="description">{{if ne .Process.Type "none"}}<span title="{{DateFmtLong .Process.Start}}">{{TimeSince .Process.Start .root.i18n.Lang}}</span>{{end}}</div>
|
||||||
|
</div>
|
||||||
|
<div>
|
||||||
|
{{if or (eq .Process.Type "request") (eq .Process.Type "normal") }}
|
||||||
|
<a class="delete-button icon" href="" data-url="{{.root.Link}}/cancel/{{.Process.PID}}" data-id="{{.Process.PID}}" data-name="{{.Process.Description}}">{{svg "octicon-trash" 16 "text-red"}}</a>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{if .Process.Stacks}}
|
||||||
|
<div class="divided list ml-3">
|
||||||
|
{{range .Process.Stacks}}
|
||||||
|
<div class="item">
|
||||||
|
<details>
|
||||||
|
<summary>
|
||||||
|
<div class="dif content">
|
||||||
|
<div class="header ml-3">
|
||||||
|
<span class="icon mr-3">{{svg "octicon-code" 16 }}</span>{{.Description}}{{if gt .Count 1}} * {{.Count}}{{end}}
|
||||||
|
</div>
|
||||||
|
<div class="description">
|
||||||
|
{{range .Labels}}
|
||||||
|
<div class="ui label">{{.Name}}<div class="detail">{{.Value}}</div></div>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</summary>
|
||||||
|
<div class="list">
|
||||||
|
{{range .Entry}}
|
||||||
|
<div class="item df ac">
|
||||||
|
<span class="icon mr-4">{{svg "octicon-dot-fill" 16 }}</span>
|
||||||
|
<div class="content f1">
|
||||||
|
<div class="header"><code>{{.Function}}</code></div>
|
||||||
|
<div class="description"><code>{{.File}}:{{.Line}}</code></div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</details>
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
{{if .Process.Children}}
|
||||||
|
<div class="divided list">
|
||||||
|
{{range .Process.Children}}
|
||||||
|
{{template "admin/stacktrace-row" dict "Process" . "root" $.root}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
{{end}}
|
||||||
|
|
||||||
|
</div>
|
33
templates/admin/stacktrace.tmpl
Normal file
33
templates/admin/stacktrace.tmpl
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
{{template "base/head" .}}
|
||||||
|
<div class="page-content admin monitor">
|
||||||
|
{{template "admin/navbar" .}}
|
||||||
|
<div class="ui container">
|
||||||
|
{{template "base/alert" .}}
|
||||||
|
<h4 class="ui top attached header">
|
||||||
|
{{.i18n.Tr "admin.monitor.stacktrace"}}: {{.i18n.Tr "admin.monitor.goroutines" .GoroutineCount}}
|
||||||
|
<div class="ui right">
|
||||||
|
<a class="ui blue tiny button" href="{{AppSubUrl}}/admin/monitor">{{.i18n.Tr "admin.monitor"}}</a>
|
||||||
|
</div>
|
||||||
|
</h4>
|
||||||
|
<div class="ui attached segment">
|
||||||
|
<div class="ui relaxed divided list">
|
||||||
|
{{range .ProcessStacks}}
|
||||||
|
{{template "admin/stacktrace-row" dict "Process" . "root" $}}
|
||||||
|
{{end}}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<div class="ui small basic delete modal">
|
||||||
|
<div class="ui icon header">
|
||||||
|
{{svg "octicon-x" 16 "close inside"}}
|
||||||
|
{{.i18n.Tr "admin.monitor.process.cancel"}}
|
||||||
|
</div>
|
||||||
|
<div class="content">
|
||||||
|
<p>{{$.i18n.Tr "admin.monitor.process.cancel_notices" `<span class="name"></span>` | Safe}}</p>
|
||||||
|
<p>{{$.i18n.Tr "admin.monitor.process.cancel_desc"}}</p>
|
||||||
|
</div>
|
||||||
|
{{template "base/delete_modal_actions" .}}
|
||||||
|
</div>
|
||||||
|
|
||||||
|
{{template "base/footer" .}}
|
Loading…
Reference in a new issue