Rewrite logger system (#24726)

## ⚠️ Breaking

The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.

Although many legacy options still work, it's encouraged to upgrade to
the new options.

The SMTP logger is deleted because SMTP is not suitable to collect logs.

If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.

## Description

Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)

Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)

There is a new document (with examples): `logging-config.en-us.md`

This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.

## The old problems

The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.


## The new design

See `logger.go` for documents.


## Screenshot

<details>


![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)


![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)


![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)

</details>

## TODO

* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)

---------

Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
This commit is contained in:
wxiaoguang 2023-05-22 06:35:11 +08:00 committed by GitHub
parent 65dff8e364
commit 4647660776
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
109 changed files with 3806 additions and 5337 deletions

View file

@ -9,6 +9,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"os" "os"
"os/signal" "os/signal"
"strings" "strings"
@ -59,7 +60,7 @@ func confirm() (bool, error) {
func initDB(ctx context.Context) error { func initDB(ctx context.Context) error {
setting.Init(&setting.Options{}) setting.Init(&setting.Options{})
setting.LoadDBSetting() setting.LoadDBSetting()
setting.InitSQLLog(false) setting.InitSQLLoggersForCli(log.INFO)
if setting.Database.Type == "" { if setting.Database.Type == "" {
log.Fatal(`Database settings are missing from the configuration file: %q. log.Fatal(`Database settings are missing from the configuration file: %q.
@ -93,3 +94,17 @@ func installSignals() (context.Context, context.CancelFunc) {
return ctx, cancel return ctx, cancel
} }
func setupConsoleLogger(level log.Level, colorize bool, out io.Writer) {
if out != os.Stdout && out != os.Stderr {
panic("setupConsoleLogger can only be used with os.Stdout or os.Stderr")
}
writeMode := log.WriterMode{
Level: level,
Colorize: colorize,
WriterOption: log.WriterConsoleOption{Stderr: out == os.Stderr},
}
writer := log.NewEventWriterConsole("console-default", writeMode)
log.GetManager().GetLogger(log.DEFAULT).RemoveAllWriters().AddWriters(writer)
}

View file

@ -4,10 +4,10 @@
package cmd package cmd
import ( import (
"errors"
"fmt" "fmt"
golog "log" golog "log"
"os" "os"
"path/filepath"
"strings" "strings"
"text/tabwriter" "text/tabwriter"
@ -82,23 +82,25 @@ You should back-up your database before doing this and ensure that your database
} }
func runRecreateTable(ctx *cli.Context) error { func runRecreateTable(ctx *cli.Context) error {
// Redirect the default golog to here
golog.SetFlags(0)
golog.SetPrefix("")
golog.SetOutput(log.NewLoggerAsWriter("INFO", log.GetLogger(log.DEFAULT)))
setting.Init(&setting.Options{})
setting.LoadDBSetting()
setting.Log.EnableXORMLog = ctx.Bool("debug")
setting.Database.LogSQL = ctx.Bool("debug")
// FIXME: don't use CfgProvider directly
setting.CfgProvider.Section("log").Key("XORM").SetValue(",")
setting.InitSQLLog(!ctx.Bool("debug"))
stdCtx, cancel := installSignals() stdCtx, cancel := installSignals()
defer cancel() defer cancel()
// Redirect the default golog to here
golog.SetFlags(0)
golog.SetPrefix("")
golog.SetOutput(log.LoggerToWriter(log.GetLogger(log.DEFAULT).Info))
debug := ctx.Bool("debug")
setting.Init(&setting.Options{})
setting.LoadDBSetting()
if debug {
setting.InitSQLLoggersForCli(log.DEBUG)
} else {
setting.InitSQLLoggersForCli(log.INFO)
}
setting.Database.LogSQL = debug
if err := db.InitEngine(stdCtx); err != nil { if err := db.InitEngine(stdCtx); err != nil {
fmt.Println(err) fmt.Println(err)
fmt.Println("Check if you are using the right config file. You can use a --config directive to specify one.") fmt.Println("Check if you are using the right config file. You can use a --config directive to specify one.")
@ -125,44 +127,31 @@ func runRecreateTable(ctx *cli.Context) error {
}) })
} }
func setDoctorLogger(ctx *cli.Context) { func setupDoctorDefaultLogger(ctx *cli.Context, colorize bool) {
// Silence the default loggers
setupConsoleLogger(log.FATAL, log.CanColorStderr, os.Stderr)
logFile := ctx.String("log-file") logFile := ctx.String("log-file")
if !ctx.IsSet("log-file") { if !ctx.IsSet("log-file") {
logFile = "doctor.log" logFile = "doctor.log"
} }
colorize := log.CanColorStdout
if ctx.IsSet("color") {
colorize = ctx.Bool("color")
}
if len(logFile) == 0 { if len(logFile) == 0 {
log.NewLogger(1000, "doctor", "console", fmt.Sprintf(`{"level":"NONE","stacktracelevel":"NONE","colorize":%t}`, colorize)) // if no doctor log-file is set, do not show any log from default logger
return return
} }
defer func() {
recovered := recover()
if recovered == nil {
return
}
err, ok := recovered.(error)
if !ok {
panic(recovered)
}
if errors.Is(err, os.ErrPermission) {
fmt.Fprintf(os.Stderr, "ERROR: Unable to write logs to provided file due to permissions error: %s\n %v\n", logFile, err)
} else {
fmt.Fprintf(os.Stderr, "ERROR: Unable to write logs to provided file: %s\n %v\n", logFile, err)
}
fmt.Fprintf(os.Stderr, "WARN: Logging will be disabled\n Use `--log-file` to configure log file location\n")
log.NewLogger(1000, "doctor", "console", fmt.Sprintf(`{"level":"NONE","stacktracelevel":"NONE","colorize":%t}`, colorize))
}()
if logFile == "-" { if logFile == "-" {
log.NewLogger(1000, "doctor", "console", fmt.Sprintf(`{"level":"trace","stacktracelevel":"NONE","colorize":%t}`, colorize)) setupConsoleLogger(log.TRACE, colorize, os.Stdout)
} else { } else {
log.NewLogger(1000, "doctor", "file", fmt.Sprintf(`{"filename":%q,"level":"trace","stacktracelevel":"NONE"}`, logFile)) logFile, _ = filepath.Abs(logFile)
writeMode := log.WriterMode{Level: log.TRACE, WriterOption: log.WriterFileOption{FileName: logFile}}
writer, err := log.NewEventWriter("console-to-file", "file", writeMode)
if err != nil {
log.FallbackErrorf("unable to create file log writer: %v", err)
return
}
log.GetManager().GetLogger(log.DEFAULT).RemoveAllWriters().AddWriters(writer)
} }
} }
@ -170,22 +159,17 @@ func runDoctor(ctx *cli.Context) error {
stdCtx, cancel := installSignals() stdCtx, cancel := installSignals()
defer cancel() defer cancel()
// Silence the default loggers
log.DelNamedLogger("console")
log.DelNamedLogger(log.DEFAULT)
// Now setup our own
setDoctorLogger(ctx)
colorize := log.CanColorStdout colorize := log.CanColorStdout
if ctx.IsSet("color") { if ctx.IsSet("color") {
colorize = ctx.Bool("color") colorize = ctx.Bool("color")
} }
// Finally redirect the default golog to here setupDoctorDefaultLogger(ctx, colorize)
// Finally redirect the default golang's log to here
golog.SetFlags(0) golog.SetFlags(0)
golog.SetPrefix("") golog.SetPrefix("")
golog.SetOutput(log.NewLoggerAsWriter("INFO", log.GetLogger(log.DEFAULT))) golog.SetOutput(log.LoggerToWriter(log.GetLogger(log.DEFAULT).Info))
if ctx.IsSet("list") { if ctx.IsSet("list") {
w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0) w := tabwriter.NewWriter(os.Stdout, 0, 8, 1, '\t', 0)
@ -233,17 +217,5 @@ func runDoctor(ctx *cli.Context) error {
} }
} }
// Now we can set up our own logger to return information about what the doctor is doing return doctor.RunChecks(stdCtx, colorize, ctx.Bool("fix"), checks)
if err := log.NewNamedLogger("doctorouter",
0,
"console",
"console",
fmt.Sprintf(`{"level":"INFO","stacktracelevel":"NONE","colorize":%t,"flags":-1}`, colorize)); err != nil {
fmt.Println(err)
return err
}
logger := log.GetLogger("doctorouter")
defer logger.Close()
return doctor.RunChecks(stdCtx, logger, ctx.Bool("fix"), checks)
} }

View file

@ -172,10 +172,7 @@ func runDump(ctx *cli.Context) error {
outType := ctx.String("type") outType := ctx.String("type")
if fileName == "-" { if fileName == "-" {
file = os.Stdout file = os.Stdout
err := log.DelLogger("console") setupConsoleLogger(log.FATAL, log.CanColorStderr, os.Stderr)
if err != nil {
fatal("Deleting default logger failed. Can not write to stdout: %v", err)
}
} else { } else {
for _, suffix := range outputTypeEnum.Enum { for _, suffix := range outputTypeEnum.Enum {
if strings.HasSuffix(fileName, "."+suffix) { if strings.HasSuffix(fileName, "."+suffix) {

View file

@ -97,13 +97,7 @@ type assetFile struct {
} }
func initEmbeddedExtractor(c *cli.Context) error { func initEmbeddedExtractor(c *cli.Context) error {
// FIXME: there is a bug, if the user runs `gitea embedded` with a different user or root, setupConsoleLogger(log.ERROR, log.CanColorStderr, os.Stderr)
// The setting.Init (loadRunModeFrom) will fail and do log.Fatal
// But the console logger has been deleted, so nothing is printed, the user sees nothing and Gitea just exits.
// Silence the console logger
log.DelNamedLogger("console")
log.DelNamedLogger(log.DEFAULT)
// Read configuration file // Read configuration file
setting.Init(&setting.Options{ setting.Init(&setting.Options{

View file

@ -16,13 +16,13 @@ import (
var ( var (
defaultLoggingFlags = []cli.Flag{ defaultLoggingFlags = []cli.Flag{
cli.StringFlag{ cli.StringFlag{
Name: "group, g", Name: "logger",
Usage: "Group to add logger to - will default to \"default\"", Usage: `Logger name - will default to "default"`,
}, cli.StringFlag{ }, cli.StringFlag{
Name: "name, n", Name: "writer",
Usage: "Name of the new logger - will default to mode", Usage: "Name of the log writer - will default to mode",
}, cli.StringFlag{ }, cli.StringFlag{
Name: "level, l", Name: "level",
Usage: "Logging level for the new logger", Usage: "Logging level for the new logger",
}, cli.StringFlag{ }, cli.StringFlag{
Name: "stacktrace-level, L", Name: "stacktrace-level, L",
@ -83,8 +83,8 @@ var (
cli.BoolFlag{ cli.BoolFlag{
Name: "debug", Name: "debug",
}, cli.StringFlag{ }, cli.StringFlag{
Name: "group, g", Name: "logger",
Usage: "Group to add logger to - will default to \"default\"", Usage: `Logger name - will default to "default"`,
}, },
}, },
Action: runRemoveLogger, Action: runRemoveLogger,
@ -93,15 +93,6 @@ var (
Usage: "Add a logger", Usage: "Add a logger",
Subcommands: []cli.Command{ Subcommands: []cli.Command{
{ {
Name: "console",
Usage: "Add a console logger",
Flags: append(defaultLoggingFlags,
cli.BoolFlag{
Name: "stderr",
Usage: "Output console logs to stderr - only relevant for console",
}),
Action: runAddConsoleLogger,
}, {
Name: "file", Name: "file",
Usage: "Add a file logger", Usage: "Add a file logger",
Flags: append(defaultLoggingFlags, []cli.Flag{ Flags: append(defaultLoggingFlags, []cli.Flag{
@ -148,28 +139,6 @@ var (
}, },
}...), }...),
Action: runAddConnLogger, Action: runAddConnLogger,
}, {
Name: "smtp",
Usage: "Add an SMTP logger",
Flags: append(defaultLoggingFlags, []cli.Flag{
cli.StringFlag{
Name: "username, u",
Usage: "Mail server username",
}, cli.StringFlag{
Name: "password, P",
Usage: "Mail server password",
}, cli.StringFlag{
Name: "host, H",
Usage: "Mail server host (defaults to: 127.0.0.1:25)",
}, cli.StringSliceFlag{
Name: "send-to, s",
Usage: "Email address(es) to send to",
}, cli.StringFlag{
Name: "subject, S",
Usage: "Subject header of sent emails",
},
}...),
Action: runAddSMTPLogger,
}, },
}, },
}, { }, {
@ -194,50 +163,16 @@ func runRemoveLogger(c *cli.Context) error {
defer cancel() defer cancel()
setup(ctx, c.Bool("debug")) setup(ctx, c.Bool("debug"))
group := c.String("group") logger := c.String("logger")
if len(group) == 0 { if len(logger) == 0 {
group = log.DEFAULT logger = log.DEFAULT
} }
name := c.Args().First() writer := c.Args().First()
extra := private.RemoveLogger(ctx, group, name) extra := private.RemoveLogger(ctx, logger, writer)
return handleCliResponseExtra(extra) return handleCliResponseExtra(extra)
} }
func runAddSMTPLogger(c *cli.Context) error {
ctx, cancel := installSignals()
defer cancel()
setup(ctx, c.Bool("debug"))
vals := map[string]interface{}{}
mode := "smtp"
if c.IsSet("host") {
vals["host"] = c.String("host")
} else {
vals["host"] = "127.0.0.1:25"
}
if c.IsSet("username") {
vals["username"] = c.String("username")
}
if c.IsSet("password") {
vals["password"] = c.String("password")
}
if !c.IsSet("send-to") {
return fmt.Errorf("Some recipients must be provided")
}
vals["sendTos"] = c.StringSlice("send-to")
if c.IsSet("subject") {
vals["subject"] = c.String("subject")
} else {
vals["subject"] = "Diagnostic message from Gitea"
}
return commonAddLogger(c, mode, vals)
}
func runAddConnLogger(c *cli.Context) error { func runAddConnLogger(c *cli.Context) error {
ctx, cancel := installSignals() ctx, cancel := installSignals()
defer cancel() defer cancel()
@ -301,25 +236,12 @@ func runAddFileLogger(c *cli.Context) error {
return commonAddLogger(c, mode, vals) return commonAddLogger(c, mode, vals)
} }
func runAddConsoleLogger(c *cli.Context) error {
ctx, cancel := installSignals()
defer cancel()
setup(ctx, c.Bool("debug"))
vals := map[string]interface{}{}
mode := "console"
if c.IsSet("stderr") && c.Bool("stderr") {
vals["stderr"] = c.Bool("stderr")
}
return commonAddLogger(c, mode, vals)
}
func commonAddLogger(c *cli.Context, mode string, vals map[string]interface{}) error { func commonAddLogger(c *cli.Context, mode string, vals map[string]interface{}) error {
if len(c.String("level")) > 0 { if len(c.String("level")) > 0 {
vals["level"] = log.FromString(c.String("level")).String() vals["level"] = log.LevelFromString(c.String("level")).String()
} }
if len(c.String("stacktrace-level")) > 0 { if len(c.String("stacktrace-level")) > 0 {
vals["stacktraceLevel"] = log.FromString(c.String("stacktrace-level")).String() vals["stacktraceLevel"] = log.LevelFromString(c.String("stacktrace-level")).String()
} }
if len(c.String("expression")) > 0 { if len(c.String("expression")) > 0 {
vals["expression"] = c.String("expression") vals["expression"] = c.String("expression")
@ -333,18 +255,18 @@ func commonAddLogger(c *cli.Context, mode string, vals map[string]interface{}) e
if c.IsSet("color") { if c.IsSet("color") {
vals["colorize"] = c.Bool("color") vals["colorize"] = c.Bool("color")
} }
group := "default" logger := log.DEFAULT
if c.IsSet("group") { if c.IsSet("logger") {
group = c.String("group") logger = c.String("logger")
} }
name := mode writer := mode
if c.IsSet("name") { if c.IsSet("writer") {
name = c.String("name") writer = c.String("writer")
} }
ctx, cancel := installSignals() ctx, cancel := installSignals()
defer cancel() defer cancel()
extra := private.AddLogger(ctx, group, name, mode, vals) extra := private.AddLogger(ctx, logger, writer, mode, vals)
return handleCliResponseExtra(extra) return handleCliResponseExtra(extra)
} }

View file

@ -56,11 +56,10 @@ var CmdServ = cli.Command{
} }
func setup(ctx context.Context, debug bool) { func setup(ctx context.Context, debug bool) {
_ = log.DelLogger("console")
if debug { if debug {
_ = log.NewLogger(1000, "console", "console", `{"level":"trace","stacktracelevel":"NONE","stderr":true}`) setupConsoleLogger(log.TRACE, false, os.Stderr)
} else { } else {
_ = log.NewLogger(1000, "console", "console", `{"level":"fatal","stacktracelevel":"NONE","stderr":true}`) setupConsoleLogger(log.FATAL, false, os.Stderr)
} }
setting.Init(&setting.Options{}) setting.Init(&setting.Options{})
if debug { if debug {

View file

@ -103,11 +103,9 @@ func createPIDFile(pidPath string) {
func runWeb(ctx *cli.Context) error { func runWeb(ctx *cli.Context) error {
if ctx.Bool("verbose") { if ctx.Bool("verbose") {
_ = log.DelLogger("console") setupConsoleLogger(log.TRACE, log.CanColorStdout, os.Stdout)
log.NewLogger(0, "console", "console", fmt.Sprintf(`{"level": "trace", "colorize": %t, "stacktraceLevel": "none"}`, log.CanColorStdout))
} else if ctx.Bool("quiet") { } else if ctx.Bool("quiet") {
_ = log.DelLogger("console") setupConsoleLogger(log.FATAL, log.CanColorStdout, os.Stdout)
log.NewLogger(0, "console", "console", fmt.Sprintf(`{"level": "fatal", "colorize": %t, "stacktraceLevel": "none"}`, log.CanColorStdout))
} }
defer func() { defer func() {
if panicked := recover(); panicked != nil { if panicked := recover(); panicked != nil {
@ -156,7 +154,7 @@ func runWeb(ctx *cli.Context) error {
case <-graceful.GetManager().IsShutdown(): case <-graceful.GetManager().IsShutdown():
<-graceful.GetManager().Done() <-graceful.GetManager().Done()
log.Info("PID: %d Gitea Web Finished", os.Getpid()) log.Info("PID: %d Gitea Web Finished", os.Getpid())
log.Close() log.GetManager().Close()
return err return err
default: default:
} }
@ -199,7 +197,7 @@ func runWeb(ctx *cli.Context) error {
err := listen(c, true) err := listen(c, true)
<-graceful.GetManager().Done() <-graceful.GetManager().Done()
log.Info("PID: %d Gitea Web Finished", os.Getpid()) log.Info("PID: %d Gitea Web Finished", os.Getpid())
log.Close() log.GetManager().Close()
return err return err
} }

View file

@ -230,7 +230,6 @@ RUN_MODE = ; prod
;; ;;
;; Disable CDN even in "prod" mode ;; Disable CDN even in "prod" mode
;OFFLINE_MODE = false ;OFFLINE_MODE = false
;DISABLE_ROUTER_LOG = false
;; ;;
;; TLS Settings: Either ACME or manual ;; TLS Settings: Either ACME or manual
;; (Other common TLS configuration are found before) ;; (Other common TLS configuration are found before)
@ -387,7 +386,7 @@ USER = root
;ITERATE_BUFFER_SIZE = 50 ;ITERATE_BUFFER_SIZE = 50
;; ;;
;; Show the database generated SQL ;; Show the database generated SQL
LOG_SQL = false ; if unset defaults to true ;LOG_SQL = false
;; ;;
;; Maximum number of DB Connect retries ;; Maximum number of DB Connect retries
;DB_RETRIES = 10 ;DB_RETRIES = 10
@ -550,34 +549,32 @@ ENABLE = true
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Main Logger ;; Main Logger
;; ;;
;; Either "console", "file", "conn", "smtp" or "database", default is "console" ;; Either "console", "file" or "conn", default is "console"
;; Use comma to separate multiple modes, e.g. "console, file" ;; Use comma to separate multiple modes, e.g. "console, file"
MODE = console MODE = console
;; ;;
;; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical" or "None", default is "Info" ;; Either "Trace", "Debug", "Info", "Warn", "Error" or "None", default is "Info"
LEVEL = Info LEVEL = Info
;; ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;; Print Stacktrace with logs (rarely helpful, do not set) Either "Trace", "Debug", "Info", "Warn", "Error", default is "None"
;; Router Logger ;STACKTRACE_LEVEL = None
;; ;;
;; Switch off the router log ;; Buffer length of the channel, keep it as it is if you don't know what it is.
;DISABLE_ROUTER_LOG=false ;BUFFER_LEN = 10000
;; ;;
;; Set the log "modes" for the router log (if file is set the log file will default to router.log) ;; Sub logger modes, a single comma means use default MODE above, empty means disable it
ROUTER = console ;logger.access.MODE=
;logger.router.MODE=,
;logger.xorm.MODE=,
;; ;;
;; The router will log different things at different levels. ;; Collect SSH logs (Creates log from ssh git request)
;; ;;
;; * started messages will be logged at TRACE level ;ENABLE_SSH_LOG = false
;; * polling/completed routers will be logged at INFO
;; * slow routers will be logged at WARN
;; * failed routers will be logged at WARN
;;
;; The routing level will default to that of the system but individual router level can be set in
;; [log.<mode>.router] LEVEL
;; ;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; ;;
;; Access Logger (Creates log in NCSA common log format)
;;
;; Print request id which parsed from request headers in access log, when access log is enabled. ;; Print request id which parsed from request headers in access log, when access log is enabled.
;; * E.g: ;; * E.g:
;; * In request Header: X-Request-ID: test-id-123 ;; * In request Header: X-Request-ID: test-id-123
@ -587,57 +584,32 @@ ROUTER = console
;; If you configure more than one in the .ini file, it will match in the order of configuration, ;; If you configure more than one in the .ini file, it will match in the order of configuration,
;; and the first match will be finally printed in the log. ;; and the first match will be finally printed in the log.
;; * E.g: ;; * E.g:
;; * In reuqest Header: X-Trace-ID: trace-id-1q2w3e4r ;; * In request Header: X-Trace-ID: trace-id-1q2w3e4r
;; * Configuration in app.ini: REQUEST_ID_HEADERS = X-Request-ID, X-Trace-ID, X-Req-ID ;; * Configuration in app.ini: REQUEST_ID_HEADERS = X-Request-ID, X-Trace-ID, X-Req-ID
;; * Print in log: 127.0.0.1:58384 - - [14/Feb/2023:16:33:51 +0800] "trace-id-1q2w3e4r" ;; * Print in log: 127.0.0.1:58384 - - [14/Feb/2023:16:33:51 +0800] "trace-id-1q2w3e4r"
;; ;;
;; REQUEST_ID_HEADERS = ;REQUEST_ID_HEADERS =
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Access Logger (Creates log in NCSA common log format)
;;
;ENABLE_ACCESS_LOG = false
;;
;; Set the log "modes" for the access log (if file is set the log file will default to access.log)
;ACCESS = file
;; ;;
;; Sets the template used to create the access log. ;; Sets the template used to create the access log.
;ACCESS_LOG_TEMPLATE = {{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}" ;ACCESS_LOG_TEMPLATE = {{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; SSH log (Creates log from ssh git request)
;;
;ENABLE_SSH_LOG = false
;;
;; Other Settings
;;
;; Print Stacktraces with logs. (Rarely helpful.) Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "None"
;STACKTRACE_LEVEL = None
;;
;; Buffer length of the channel, keep it as it is if you don't know what it is.
;BUFFER_LEN = 10000
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Creating specific log configuration
;; ;;
;; You can set specific configuration for individual modes and subloggers ;; Log modes (aka log writers)
;; ;;
;; Configuration available to all log modes/subloggers ;[log.%(WriterMode)]
;MODE=console/file/conn/...
;LEVEL= ;LEVEL=
;FLAGS = stdflags ;FLAGS = stdflags
;EXPRESSION = ;EXPRESSION =
;PREFIX = ;PREFIX =
;COLORIZE = false ;COLORIZE = false
;; ;;
;; For "console" mode only ;[log.console]
;STDERR = false ;STDERR = false
;; ;;
;; For "file" mode only ;[log.file]
;LEVEL = ;; Set the file_name for the logger. If this is a relative path this will be relative to ROOT_PATH
;; Set the file_name for the logger. If this is a relative path this
;; will be relative to ROOT_PATH
;FILE_NAME = ;FILE_NAME =
;; This enables automated log rotate(switch of following options), default is true ;; This enables automated log rotate(switch of following options), default is true
;LOG_ROTATE = true ;LOG_ROTATE = true
@ -651,9 +623,8 @@ ROUTER = console
;COMPRESS = true ;COMPRESS = true
;; compression level see godoc for compress/gzip ;; compression level see godoc for compress/gzip
;COMPRESSION_LEVEL = -1 ;COMPRESSION_LEVEL = -1
; ;;
;; For "conn" mode only ;[log.conn]
;LEVEL =
;; Reconnect host for every single message, default is false ;; Reconnect host for every single message, default is false
;RECONNECT_ON_MSG = false ;RECONNECT_ON_MSG = false
;; Try to reconnect when connection is lost, default is false ;; Try to reconnect when connection is lost, default is false
@ -662,19 +633,6 @@ ROUTER = console
;PROTOCOL = tcp ;PROTOCOL = tcp
;; Host address ;; Host address
;ADDR = ;ADDR =
;
;; For "smtp" mode only
;LEVEL =
;; Name displayed in mail title, default is "Diagnostic message from server"
;SUBJECT = Diagnostic message from server
;; Mail server
;HOST =
;; Mailer user name and password
;USER =
;; Use PASSWD = `your password` for quoting if you use special characters in the password.
;PASSWD =
;; Receivers, can be one or more, e.g. 1@example.com,2@example.com
;RECEIVERS =
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

View file

@ -835,22 +835,16 @@ Default templates for project boards:
## Log (`log`) ## Log (`log`)
- `ROOT_PATH`: **\<empty\>**: Root path for log files. - `ROOT_PATH`: **\<empty\>**: Root path for log files.
- `MODE`: **console**: Logging mode. For multiple modes, use a comma to separate values. You can configure each mode in per mode log subsections `\[log.modename\]`. By default the file mode will log to `$ROOT_PATH/gitea.log`. - `MODE`: **console**: Logging mode. For multiple modes, use a comma to separate values. You can configure each mode in per mode log subsections `\[log.writer-mode-name\]`.
- `LEVEL`: **Info**: General log level. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\] - `LEVEL`: **Info**: General log level. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\]
- `STACKTRACE_LEVEL`: **None**: Default log level at which to log create stack traces. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\] - `STACKTRACE_LEVEL`: **None**: Default log level at which to log create stack traces (rarely useful, do not set it). \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\]
- `ENABLE_SSH_LOG`: **false**: save ssh log to log file - `ENABLE_SSH_LOG`: **false**: save ssh log to log file
- `ENABLE_XORM_LOG`: **true**: Set whether to perform XORM logging. Please note SQL statement logging can be disabled by setting `LOG_SQL` to false in the `[database]` section. - `logger.access.MODE`: **\<empty\>**: The "access" logger
- `logger.router.MODE`: **,**: The "router" logger, a single comma means it will use the default MODE above
### Router Log (`log`) - `logger.xorm.MODE`: **,**: The "xorm" logger
- `DISABLE_ROUTER_LOG`: **false**: Mute printing of the router log.
- `ROUTER`: **console**: The mode or name of the log the router should log to. (If you set this to `,` it will log to default Gitea logger.)
NB: You must have `DISABLE_ROUTER_LOG` set to `false` for this option to take effect. Configure each mode in per mode log subsections `\[log.modename.router\]`.
### Access Log (`log`) ### Access Log (`log`)
- `ENABLE_ACCESS_LOG`: **false**: Creates an access.log in NCSA common log format, or as per the following template
- `ACCESS`: **file**: Logging mode for the access logger, use a comma to separate values. Configure each mode in per mode log subsections `\[log.modename.access\]`. By default the file mode will log to `$ROOT_PATH/access.log`. (If you set this to `,` it will log to the default Gitea logger.)
- `ACCESS_LOG_TEMPLATE`: **`{{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"`**: Sets the template used to create the access log. - `ACCESS_LOG_TEMPLATE`: **`{{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"`**: Sets the template used to create the access log.
- The following variables are available: - The following variables are available:
- `Ctx`: the `context.Context` of the request. - `Ctx`: the `context.Context` of the request.
@ -858,31 +852,31 @@ Default templates for project boards:
- `Start`: the start time of the request. - `Start`: the start time of the request.
- `ResponseWriter`: the responseWriter from the request. - `ResponseWriter`: the responseWriter from the request.
- `RequestID`: the value matching REQUEST_ID_HEADERSdefault: `-`, if not matched. - `RequestID`: the value matching REQUEST_ID_HEADERSdefault: `-`, if not matched.
- You must be very careful to ensure that this template does not throw errors or panics as this template runs outside of the panic/recovery script. - You must be very careful to ensure that this template does not throw errors or panics as this template runs outside the panic/recovery script.
- `REQUEST_ID_HEADERS`: **\<empty\>**: You can configure multiple values that are splited by comma here. It will match in the order of configuration, and the first match will be finally printed in the access log. - `REQUEST_ID_HEADERS`: **\<empty\>**: You can configure multiple values that are splited by comma here. It will match in the order of configuration, and the first match will be finally printed in the access log.
- e.g. - e.g.
- In the Request Header: X-Request-ID: **test-id-123** - In the Request Header: X-Request-ID: **test-id-123**
- Configuration in app.ini: REQUEST_ID_HEADERS = X-Request-ID - Configuration in app.ini: REQUEST_ID_HEADERS = X-Request-ID
- Print in log: 127.0.0.1:58384 - - [14/Feb/2023:16:33:51 +0800] "**test-id-123**" ... - Print in log: 127.0.0.1:58384 - - [14/Feb/2023:16:33:51 +0800] "**test-id-123**" ...
### Log subsections (`log.name`, `log.name.*`) ### Log subsections (`log.<writer-mode-name>`)
- `LEVEL`: **log.LEVEL**: Sets the log-level of this sublogger. Defaults to the `LEVEL` set in the global `[log]` section. - `MODE`: **name**: Sets the mode of this log writer - Defaults to the provided subsection name. This allows you to have two different file loggers at different levels.
- `LEVEL`: **log.LEVEL**: Sets the log-level of this writer. Defaults to the `LEVEL` set in the global `[log]` section.
- `STACKTRACE_LEVEL`: **log.STACKTRACE_LEVEL**: Sets the log level at which to log stack traces. - `STACKTRACE_LEVEL`: **log.STACKTRACE_LEVEL**: Sets the log level at which to log stack traces.
- `MODE`: **name**: Sets the mode of this sublogger - Defaults to the provided subsection name. This allows you to have two different file loggers at different levels.
- `EXPRESSION`: **""**: A regular expression to match either the function name, file or message. Defaults to empty. Only log messages that match the expression will be saved in the logger. - `EXPRESSION`: **""**: A regular expression to match either the function name, file or message. Defaults to empty. Only log messages that match the expression will be saved in the logger.
- `FLAGS`: **stdflags**: A comma separated string representing the log flags. Defaults to `stdflags` which represents the prefix: `2009/01/23 01:23:23 ...a/b/c/d.go:23:runtime.Caller() [I]: message`. `none` means don't prefix log lines. See `modules/log/flags.go` for more information. - `FLAGS`: **stdflags**: A comma separated string representing the log flags. Defaults to `stdflags` which represents the prefix: `2009/01/23 01:23:23 ...a/b/c/d.go:23:runtime.Caller() [I]: message`. `none` means don't prefix log lines. See `modules/log/flags.go` for more information.
- `PREFIX`: **""**: An additional prefix for every log line in this logger. Defaults to empty. - `PREFIX`: **""**: An additional prefix for every log line in this logger. Defaults to empty.
- `COLORIZE`: **false**: Whether to colorize the log lines - `COLORIZE`: **false**: Whether to colorize the log lines
### Console log mode (`log.console`, `log.console.*`, or `MODE=console`) ### Console log mode (`log.console`, or `MODE=console`)
- For the console logger `COLORIZE` will default to `true` if not on windows or the terminal is determined to be able to color. - For the console logger `COLORIZE` will default to `true` if not on windows or the terminal is determined to be able to color.
- `STDERR`: **false**: Use Stderr instead of Stdout. - `STDERR`: **false**: Use Stderr instead of Stdout.
### File log mode (`log.file`, `log.file.*` or `MODE=file`) ### File log mode (`log.file`, or `MODE=file`)
- `FILE_NAME`: Set the file name for this logger. Defaults as described above. If relative will be relative to the `ROOT_PATH` - `FILE_NAME`: Set the file name for this logger. Defaults to `gitea.log` (exception: access log defaults to `access.log`). If relative will be relative to the `ROOT_PATH`
- `LOG_ROTATE`: **true**: Rotate the log files. - `LOG_ROTATE`: **true**: Rotate the log files.
- `MAX_SIZE_SHIFT`: **28**: Maximum size shift of a single file, 28 represents 256Mb. - `MAX_SIZE_SHIFT`: **28**: Maximum size shift of a single file, 28 represents 256Mb.
- `DAILY_ROTATE`: **true**: Rotate logs daily. - `DAILY_ROTATE`: **true**: Rotate logs daily.
@ -890,21 +884,13 @@ Default templates for project boards:
- `COMPRESS`: **true**: Compress old log files by default with gzip - `COMPRESS`: **true**: Compress old log files by default with gzip
- `COMPRESSION_LEVEL`: **-1**: Compression level - `COMPRESSION_LEVEL`: **-1**: Compression level
### Conn log mode (`log.conn`, `log.conn.*` or `MODE=conn`) ### Conn log mode (`log.conn`, or `MODE=conn`)
- `RECONNECT_ON_MSG`: **false**: Reconnect host for every single message. - `RECONNECT_ON_MSG`: **false**: Reconnect host for every single message.
- `RECONNECT`: **false**: Try to reconnect when connection is lost. - `RECONNECT`: **false**: Try to reconnect when connection is lost.
- `PROTOCOL`: **tcp**: Set the protocol, either "tcp", "unix" or "udp". - `PROTOCOL`: **tcp**: Set the protocol, either "tcp", "unix" or "udp".
- `ADDR`: **:7020**: Sets the address to connect to. - `ADDR`: **:7020**: Sets the address to connect to.
### SMTP log mode (`log.smtp`, `log.smtp.*` or `MODE=smtp`)
- `USER`: User email address to send from.
- `PASSWD`: Password for the smtp server.
- `HOST`: **127.0.0.1:25**: The SMTP host to connect to.
- `RECEIVERS`: Email addresses to send to.
- `SUBJECT`: **Diagnostic message from Gitea**
## Cron (`cron`) ## Cron (`cron`)
- `ENABLED`: **false**: Enable to run all cron tasks periodically with default settings. - `ENABLED`: **false**: Enable to run all cron tasks periodically with default settings.

View file

@ -0,0 +1,295 @@
---
date: "2019-04-02T17:06:00+01:00"
title: "Logging Configuration"
slug: "logging-config"
weight: 40
toc: false
draft: false
aliases:
- /en-us/logging-configuration
menu:
sidebar:
parent: "administration"
name: "Logging Configuration"
weight: 40
identifier: "logging-config"
---
# Logging Configuration
The logging configuration of Gitea mainly consists of 3 types of components:
- The `[log]` section for general configuration
- `[log.<mode-name>]` sections for the configuration of different log writers to output logs, aka: "writer mode", the mode name is also used as "writer name".
- The `[log]` section can also contain sub-logger configurations following the key schema `logger.<logger-name>.<CONFIG-KEY>`
There is a fully functional log output by default, so it is not necessary to define one.
**Table of Contents**
{{< toc >}}
## Collecting Logs for Help
To collect logs for help and issue report, see [Support Options]({{< relref "doc/help/support.en-us.md" >}}).
## The `[log]` section
Configuration of logging facilities in Gitea happen in the `[log]` section and its subsections.
In the top level `[log]` section the following configurations can be placed:
- `ROOT_PATH`: (Default: **%(GITEA_WORK_DIR)/log**): Base path for log files
- `MODE`: (Default: **console**) List of log outputs to use for the Default logger.
- `LEVEL`: (Default: **Info**) Least severe log events to persist, case-insensitive. Possible values are: `Trace`, `Debug`, `Info`, `Warn`, `Error`, `Fatal`.
- `STACKTRACE_LEVEL`: (Default: **None**) For this and more severe events the stacktrace will be printed upon getting logged.
And it can contain the following sub-loggers:
- `logger.router.MODE`: (Default: **,**): List of log outputs to use for the Router logger.
- `logger.access.MODE`: (Default: **\<empty\>**) List of log outputs to use for the Access logger. By default, the access logger is disabled.
- `logger.xorm.MODE`: (Default: **,**) List of log outputs to use for the XORM logger.
Setting a comma (`,`) to sub-logger's mode means making it use the default global `MODE`.
## Quick samples
### Default (empty) Configuration
The empty configuration is equivalent to default:
```ini
[log]
ROOT_PATH = %(GITEA_WORK_DIR)/log
MODE = console
LEVEL = Info
STACKTRACE_LEVEL = None
logger.router.MODE = ,
logger.xorm.MODE = ,
logger.access.MODE =
; this is the config options of "console" mode (used by MODE=console above)
[log.console]
MODE = console
FLAGS = stdflags
PREFIX =
COLORIZE = true
```
This is equivalent to sending all logs to the console, with default Golang log being sent to the console log too.
This is only a sample, and it is the default, do not need to write it into your configuration file.
### Disable Router logs and record some access logs to file
The Router logger is disabled, the access logs (>=Warn) goes into `access.log`:
```ini
[log]
logger.router.MODE =
logger.access.MODE = access-file
[log.access-file]
MODE = file
LEVEL = Warn
FILE_NAME = access.log
```
### Set different log levels for different modes
Default logs (>=Warn) goes into `gitea.log`, while Error logs goes into `file-error.log`:
```ini
[log]
LEVEL = Warn
MODE = file, file-error
; by default, the "file" mode will record logs to %(log.ROOT_PATH)/gitea.log, so we don't need to set it
; [log.file]
[log.file-error]
LEVEL = Error
FILE_NAME = file-error.log
```
## Log outputs (mode and writer)
Gitea provides the following log output writers:
- `console` - Log to `stdout` (or `stderr` if it is set in the config)
- `file` - Log to a file
- `conn` - Log to a socket (network or unix)
### Common configuration
Certain configuration is common to all modes of log output:
- `MODE` is the mode of the log output writer. It will default to the mode name in the ini section. Thus `[log.console]` will default to `MODE = console`.
- `LEVEL` is the lowest level that this output will log.
- `STACKTRACE_LEVEL` is the lowest level that this output will print a stacktrace.
- `COLORIZE` will default to `true` for `console` as described, otherwise it will default to `false`.
#### `EXPRESSION`
`EXPRESSION` represents a regular expression that log events must match to be logged by the output writer.
Either the log message, (with colors removed), must match or the `longfilename:linenumber:functionname` must match.
NB: the whole message or string doesn't need to completely match.
Please note this expression will be run in the writer's goroutine but not the logging event goroutine.
#### `FLAGS`
`FLAGS` represents the preceding logging context information that is
printed before each message. It is a comma-separated string set. The order of values does not matter.
It defaults to `stdflags` (= `date,time,medfile,shortfuncname,levelinitial`)
Possible values are:
- `none` or `,` - No flags.
- `date` - the date in the local time zone: `2009/01/23`.
- `time` - the time in the local time zone: `01:23:23`.
- `microseconds` - microsecond resolution: `01:23:23.123123`. Assumes time.
- `longfile` - full file name and line number: `/a/b/c/d.go:23`.
- `shortfile` - final file name element and line number: `d.go:23`.
- `funcname` - function name of the caller: `runtime.Caller()`.
- `shortfuncname` - last part of the function name. Overrides `funcname`.
- `utc` - if date or time is set, use UTC rather than the local time zone.
- `levelinitial` - initial character of the provided level in brackets eg. `[I]` for info.
- `level` - level in brackets `[INFO]`.
- `gopid` - the Goroutine-PID of the context.
- `medfile` - last 20 characters of the filename - equivalent to `shortfile,longfile`.
- `stdflags` - equivalent to `date,time,medfile,shortfuncname,levelinitial`.
### Console mode
In this mode the logger will forward log messages to the stdout and
stderr streams attached to the Gitea process.
For loggers in console mode, `COLORIZE` will default to `true` if not
on windows, or the Windows terminal can be set into ANSI mode or is a
cygwin or Msys pipe.
Settings:
- `STDERR`: **false**: Whether the logger should print to `stderr` instead of `stdout`.
### File mode
In this mode the logger will save log messages to a file.
Settings:
- `FILE_NAME`: The file to write the log events to, relative to `ROOT_PATH`, Default to `%(ROOT_PATH)/gitea.log`. Exception: access log will default to `%(ROOT_PATH)/access.log`.
- `MAX_SIZE_SHIFT`: **28**: Maximum size shift of a single file. 28 represents 256Mb. For details see below.
- `LOG_ROTATE` **true**: Whether to rotate the log files. TODO: if false, will it delete instead on daily rotate, or do nothing?.
- `DAILY_ROTATE`: **true**: Whether to rotate logs daily.
- `MAX_DAYS`: **7**: Delete rotated log files after this number of days.
- `COMPRESS`: **true**: Whether to compress old log files by default with gzip.
- `COMPRESSION_LEVEL`: **-1**: Compression level. For details see below.
`MAX_SIZE_SHIFT` defines the maximum size of a file by left shifting 1 the given number of times (`1 << x`).
The exact behavior at the time of v1.17.3 can be seen [here](https://github.com/go-gitea/gitea/blob/v1.17.3/modules/setting/log.go#L185).
The useful values of `COMPRESSION_LEVEL` are from 1 to (and including) 9, where higher numbers mean better compression.
Beware that better compression might come with higher resource usage.
Must be preceded with a `-` sign.
### Conn mode
In this mode the logger will send log messages over a network socket.
Settings:
- `ADDR`: **:7020**: Sets the address to connect to.
- `PROTOCOL`: **tcp**: Set the protocol, either "tcp", "unix" or "udp".
- `RECONNECT`: **false**: Try to reconnect when connection is lost.
- `RECONNECT_ON_MSG`: **false**: Reconnect host for every single message.
### The "Router" logger
The Router logger logs the following message types when Gitea's route handlers work:
- `started` messages will be logged at TRACE level
- `polling`/`completed` routers will be logged at INFO. Exception: "/assets" static resource requests are also logged at TRACE.
- `slow` routers will be logged at WARN
- `failed` routers will be logged at WARN
### The "XORM" logger
To make XORM outputs SQL logs, the `LOG_SQL` in `[database]` section should also be set to `true`.
### The "Access" logger
The Access logger is a new logger since Gitea 1.9. It provides a NCSA
Common Log compliant log format. It's highly configurable but caution
should be taken when changing its template. The main benefit of this
logger is that Gitea can now log accesses in a standard log format so
standard tools may be used.
You can enable this logger using `logger.access.MODE = ...`.
If desired the format of the Access logger can be changed by changing
the value of the `ACCESS_LOG_TEMPLATE`.
Please note, the access logger will log at `INFO` level, setting the
`LEVEL` of this logger to `WARN` or above will result in no access logs.
#### The ACCESS_LOG_TEMPLATE
This value represents a go template. Its default value is
```tmpl
{{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"`
```
The template is passed following options:
- `Ctx` is the `context.Context`
- `Identity` is the `SignedUserName` or `"-"` if the user is not logged in
- `Start` is the start time of the request
- `ResponseWriter` is the `http.ResponseWriter`
Caution must be taken when changing this template as it runs outside of
the standard panic recovery trap. The template should also be as simple
as it runs for every request.
## Releasing-and-Reopening, Pausing and Resuming logging
If you are running on Unix you may wish to release-and-reopen logs in order to use `logrotate` or other tools.
It is possible force Gitea to release and reopen it's logging files and connections by sending `SIGUSR1` to the
running process, or running `gitea manager logging release-and-reopen`.
Alternatively, you may wish to pause and resume logging - this can be accomplished through the use of the
`gitea manager logging pause` and `gitea manager logging resume` commands. Please note that whilst logging
is paused log events below INFO level will not be stored and only a limited number of events will be stored.
Logging may block, albeit temporarily, slowing Gitea considerably whilst paused - therefore it is
recommended that pausing only done for a very short period of time.
## Adding and removing logging whilst Gitea is running
It is possible to add and remove logging whilst Gitea is running using the `gitea manager logging add` and `remove` subcommands.
This functionality can only adjust running log systems and cannot be used to start the access or router loggers if they
were not already initialized. If you wish to start these systems you are advised to adjust the app.ini and (gracefully) restart
the Gitea service.
The main intention of these commands is to easily add a temporary logger to investigate problems on running systems where a restart
may cause the issue to disappear.
## Using `logrotate` instead of built-in log rotation
Gitea includes built-in log rotation, which should be enough for most deployments. However, if you instead want to use the `logrotate` utility:
- Disable built-in log rotation by setting `LOG_ROTATE` to `false` in your `app.ini`.
- Install `logrotate`.
- Configure `logrotate` to match your deployment requirements, see `man 8 logrotate` for configuration syntax details.
In the `postrotate/endscript` block send Gitea a `USR1` signal via `kill -USR1` or `kill -10` to the `gitea` process itself,
or run `gitea manager logging release-and-reopen` (with the appropriate environment).
Ensure that your configurations apply to all files emitted by Gitea loggers as described in the above sections.
- Always do `logrotate /etc/logrotate.conf --debug` to test your configurations.
- If you are using docker and are running from outside the container you can use
`docker exec -u $OS_USER $CONTAINER_NAME sh -c 'gitea manager logging release-and-reopen'`
or `docker exec $CONTAINER_NAME sh -c '/bin/s6-svc -1 /etc/s6/gitea/'` or send `USR1` directly to the Gitea process itself.
The next `logrotate` jobs will include your configurations, so no restart is needed.
You can also immediately reload `logrotate` with `logrotate /etc/logrotate.conf --force`.

View file

@ -1,524 +0,0 @@
---
date: "2019-04-02T17:06:00+01:00"
title: "Logging Configuration"
slug: "logging-configuration"
weight: 40
toc: false
draft: false
aliases:
- /en-us/logging-configuration
menu:
sidebar:
parent: "administration"
name: "Logging Configuration"
weight: 40
identifier: "logging-configuration"
---
# Logging Configuration
The logging configuration of Gitea mainly consists of 3 types of components:
- The `[log]` section for general configuration
- `[log.<sublogger>]` sections for the configuration of different log outputs
- `[log.<sublogger>.<group>]` sections for output specific configuration of a log group
As mentioned below, there is a fully functional log output by default, so it is not necessary to define one.
**Table of Contents**
{{< toc >}}
## Collecting Logs for Help
To collect logs for help and issue report, see [Support Options]({{< relref "doc/help/support.en-us.md" >}}).
## The `[log]` section
Configuration of logging facilities in Gitea happen in the `[log]` section and it's subsections.
In the top level `[log]` section the following configurations can be placed:
- `ROOT_PATH`: (Default: **%(GITEA_WORK_DIR)/log**): Base path for log files
- `MODE`: (Default: **console**) List of log outputs to use for the Default logger.
- `ROUTER`: (Default: **console**): List of log outputs to use for the Router logger.
- `ACCESS`: List of log outputs to use for the Access logger.
- `XORM`: (Default: **,**) List of log outputs to use for the XORM logger.
- `ENABLE_ACCESS_LOG`: (Default: **false**): whether the Access logger is allowed to emit logs
- `ENABLE_XORM_LOG`: (Default: **true**): whether the XORM logger is allowed to emit logs
For details on the loggers check the "Log Groups" section.
Important: log outputs won't be used if you don't enable them for the desired loggers in the corresponding list value.
Lists are specified as comma separated values. This format also works in subsection.
This section may be used for defining default values for subsections.
Examples:
- `LEVEL`: (Default: **Info**) Least severe log events to persist. Case insensitive. The full list of levels as of v1.17.3 can be read [here](https://github.com/go-gitea/gitea/blob/v1.17.3/custom/conf/app.example.ini#L507).
- `STACKTRACE_LEVEL`: (Default: **None**) For this and more severe events the stacktrace will be printed upon getting logged.
Some values are not inherited by subsections. For details see the "Non-inherited default values" section.
## Log outputs
Log outputs are the targets to which log messages will be sent.
The content and the format of the log messages to be saved can be configured in these.
Log outputs are also called subloggers.
Gitea provides 4 possible log outputs:
- `console` - Log to `os.Stdout` or `os.Stderr`
- `file` - Log to a file
- `conn` - Log to a socket (network or unix)
- `smtp` - Log via email
By default, Gitea has a `console` output configured, which is used by the loggers as seen in the section "The log section" above.
### Common configuration
Certain configuration is common to all modes of log output:
- `MODE` is the mode of the log output. It will default to the sublogger
name, thus `[log.console.router]` will default to `MODE = console`.
For mode specific confgurations read further.
- `LEVEL` is the lowest level that this output will log. This value
is inherited from `[log]` and in the case of the non-default loggers
from `[log.sublogger]`.
- `STACKTRACE_LEVEL` is the lowest level that this output will print
a stacktrace. This value is inherited.
- `COLORIZE` will default to `true` for `console` as
described, otherwise it will default to `false`.
### Non-inherited default values
There are several values which are not inherited as described above but
rather default to those specific to type of logger, these are:
`EXPRESSION`, `FLAGS`, `PREFIX` and `FILE_NAME`.
#### `EXPRESSION`
`EXPRESSION` represents a regular expression that log events must match to be logged by the sublogger. Either the log message, (with colors removed), must match or the `longfilename:linenumber:functionname` must match. NB: the whole message or string doesn't need to completely match.
Please note this expression will be run in the sublogger's goroutine
not the logging event subroutine. Therefore it can be complicated.
#### `FLAGS`
`FLAGS` represents the preceding logging context information that is
printed before each message. It is a comma-separated string set. The order of values does not matter.
Possible values are:
- `none` or `,` - No flags.
- `date` - the date in the local time zone: `2009/01/23`.
- `time` - the time in the local time zone: `01:23:23`.
- `microseconds` - microsecond resolution: `01:23:23.123123`. Assumes
time.
- `longfile` - full file name and line number: `/a/b/c/d.go:23`.
- `shortfile` - final file name element and line number: `d.go:23`.
- `funcname` - function name of the caller: `runtime.Caller()`.
- `shortfuncname` - last part of the function name. Overrides
`funcname`.
- `utc` - if date or time is set, use UTC rather than the local time
zone.
- `levelinitial` - Initial character of the provided level in brackets eg. `[I]` for info.
- `level` - Provided level in brackets `[INFO]`
- `medfile` - Last 20 characters of the filename - equivalent to
`shortfile,longfile`.
- `stdflags` - Equivalent to `date,time,medfile,shortfuncname,levelinitial`
### Console mode
In this mode the logger will forward log messages to the stdout and
stderr streams attached to the Gitea process.
For loggers in console mode, `COLORIZE` will default to `true` if not
on windows, or the windows terminal can be set into ANSI mode or is a
cygwin or Msys pipe.
Settings:
- `STDERR`: **false**: Whether the logger should print to `stderr` instead of `stdout`.
### File mode
In this mode the logger will save log messages to a file.
Settings:
- `FILE_NAME`: The file to write the log events to. For details see below.
- `MAX_SIZE_SHIFT`: **28**: Maximum size shift of a single file. 28 represents 256Mb. For details see below.
- `LOG_ROTATE` **true**: Whether to rotate the log files. TODO: if false, will it delete instead on daily rotate, or do nothing?.
- `DAILY_ROTATE`: **true**: Whether to rotate logs daily.
- `MAX_DAYS`: **7**: Delete rotated log files after this number of days.
- `COMPRESS`: **true**: Whether to compress old log files by default with gzip.
- `COMPRESSION_LEVEL`: **-1**: Compression level. For details see below.
The default value of `FILE_NAME` depends on the respective logger facility.
If unset, their own default will be used.
If set it will be relative to the provided `ROOT_PATH` in the master `[log]` section.
`MAX_SIZE_SHIFT` defines the maximum size of a file by left shifting 1 the given number of times (`1 << x`).
The exact behavior at the time of v1.17.3 can be seen [here](https://github.com/go-gitea/gitea/blob/v1.17.3/modules/setting/log.go#L185).
The useful values of `COMPRESSION_LEVEL` are from 1 to (and including) 9, where higher numbers mean better compression.
Beware that better compression might come with higher resource usage.
Must be preceded with a `-` sign.
### Conn mode
In this mode the logger will send log messages over a network socket.
Settings:
- `ADDR`: **:7020**: Sets the address to connect to.
- `PROTOCOL`: **tcp**: Set the protocol, either "tcp", "unix" or "udp".
- `RECONNECT`: **false**: Try to reconnect when connection is lost.
- `RECONNECT_ON_MSG`: **false**: Reconnect host for every single message.
### SMTP mode
In this mode the logger will send log messages in email.
It is not recommended to use this logger to send general logging
messages. However, you could perhaps set this logger to work on `FATAL` messages only.
Settings:
- `HOST`: **127.0.0.1:25**: The SMTP host to connect to.
- `USER`: User email address to send from.
- `PASSWD`: Password for the smtp server.
- `RECEIVERS`: Email addresses to send to.
- `SUBJECT`: **Diagnostic message from Gitea**. The content of the email's subject field.
## Log Groups
The fundamental thing to be aware of in Gitea is that there are several
log groups:
- The "Default" logger
- The Router logger
- The Access logger
- The XORM logger
There is also the go log logger.
### The go log logger
Go provides its own extremely basic logger in the `log` package,
however, this is not sufficient for our purposes as it does not provide
a way of logging at multiple levels, nor does it provide a good way of
controlling where these logs are logged except through setting of a
writer.
We have therefore redirected this logger to our Default logger, and we
will log anything that is logged using the go logger at the INFO level.
### The "Default" logger
Calls to `log.Info`, `log.Debug`, `log.Error` etc. from the `code.gitea.io/gitea/modules/log` package will log to this logger.
You can configure the outputs of this logger by setting the `MODE`
value in the `[log]` section of the configuration.
Each output sublogger is configured in a separate `[log.sublogger.default]`
which inherits from the sublogger `[log.sublogger]` section and from the
generic `[log]` section, but there are certain default values. These will
not be inherited from the `[log]` section:
- `FLAGS` is `stdflags` (Equal to
`date,time,medfile,shortfuncname,levelinitial`)
- `FILE_NAME` will default to `%(ROOT_PATH)/gitea.log`
- `EXPRESSION` will default to `""`
- `PREFIX` will default to `""`
The provider type of the sublogger can be set using the `MODE` value in
its subsection, but will default to the name. This allows you to have
multiple subloggers that will log to files.
### The "Router" logger
The Router logger has been substantially changed in v1.17. If you are using the router logger for fail2ban or other monitoring
you will need to update this configuration.
You can disable Router log by setting `DISABLE_ROUTER_LOG` or by setting all of its sublogger configurations to `none`.
You can configure the outputs of this
router log by setting the `ROUTER` value in the `[log]` section of the
configuration. `ROUTER` will default to `console` if unset and will default to same level as main logger.
The Router logger logs the following:
- `started` messages will be logged at TRACE level
- `polling`/`completed` routers will be logged at INFO
- `slow` routers will be logged at WARN
- `failed` routers will be logged at WARN
The logging level for the router will default to that of the main configuration. Set `[log.<mode>.router]` `LEVEL` to change this.
Each output sublogger for this logger is configured in
`[log.sublogger.router]` sections. There are certain default values
which will not be inherited from the `[log]` or relevant
`[log.sublogger]` sections:
- `FILE_NAME` will default to `%(ROOT_PATH)/router.log`
- `FLAGS` defaults to `date,time`
- `EXPRESSION` will default to `""`
- `PREFIX` will default to `""`
NB: You can redirect the router logger to send its events to the Gitea
log using the value: `ROUTER = ,`
### The "Access" logger
The Access logger is a new logger for version 1.9. It provides a NCSA
Common Log compliant log format. It's highly configurable but caution
should be taken when changing its template. The main benefit of this
logger is that Gitea can now log accesses in a standard log format so
standard tools may be used.
You can enable this logger using `ENABLE_ACCESS_LOG`. Its outputs are
configured by setting the `ACCESS` value in the `[log]` section of the
configuration. `ACCESS` defaults to `file` if unset.
Each output sublogger for this logger is configured in
`[log.sublogger.access]` sections. There are certain default values
which will not be inherited from the `[log]` or relevant
`[log.sublogger]` sections:
- `FILE_NAME` will default to `%(ROOT_PATH)/access.log`
- `FLAGS` defaults to `` or None
- `EXPRESSION` will default to `""`
- `PREFIX` will default to `""`
If desired the format of the Access logger can be changed by changing
the value of the `ACCESS_LOG_TEMPLATE`.
Please note, the access logger will log at `INFO` level, setting the
`LEVEL` of this logger to `WARN` or above will result in no access logs.
NB: You can redirect the access logger to send its events to the Gitea
log using the value: `ACCESS = ,`
#### The ACCESS_LOG_TEMPLATE
This value represent a go template. It's default value is:
`{{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"`
The template is passed following options:
- `Ctx` is the `context.Context`
- `Identity` is the `SignedUserName` or `"-"` if the user is not logged
in
- `Start` is the start time of the request
- `ResponseWriter` is the `http.ResponseWriter`
Caution must be taken when changing this template as it runs outside of
the standard panic recovery trap. The template should also be as simple
as it runs for every request.
### The "XORM" logger
The XORM logger is a long-standing logger that exists to collect XORM
log events. It is enabled by default but can be switched off by setting
`ENABLE_XORM_LOG` to `false` in the `[log]` section. Its outputs are
configured by setting the `XORM` value in the `[log]` section of the
configuration. `XORM` defaults to `,` if unset, meaning it is redirected
to the main Gitea log.
XORM will log SQL events by default. This can be changed by setting
the `LOG_SQL` value to `false` in the `[database]` section.
Each output sublogger for this logger is configured in
`[log.sublogger.xorm]` sections. There are certain default values
which will not be inherited from the `[log]` or relevant
`[log.sublogger]` sections:
- `FILE_NAME` will default to `%(ROOT_PATH)/xorm.log`
- `FLAGS` defaults to `date,time`
- `EXPRESSION` will default to `""`
- `PREFIX` will default to `""`
## Debugging problems
When submitting logs in Gitea issues it is often helpful to submit
merged logs obtained by either by redirecting the console log to a file or
copying and pasting it. To that end it is recommended to set your logging to:
```ini
[database]
LOG_SQL = false ; SQL logs are rarely helpful unless we specifically ask for them
...
[log]
MODE = console
LEVEL = debug ; please set the level to debug when we are debugging a problem
ROUTER = console
COLORIZE = false ; this can be true if you can strip out the ansi coloring
ENABLE_SSH_LOG = true ; shows logs related to git over SSH.
```
Sometimes it will be helpful get some specific `TRACE` level logging restricted
to messages that match a specific `EXPRESSION`. Adjusting the `MODE` in the
`[log]` section to `MODE = console,traceconsole` to add a new logger output
`traceconsole` and then adding its corresponding section would be helpful:
```ini
[log.traceconsole] ; traceconsole here is just a name
MODE = console ; this is the output that the traceconsole writes to
LEVEL = trace
EXPRESSION = ; putting a string here will restrict this logger to logging only those messages that match this expression
```
(It's worth noting that log messages that match the expression at or above debug
level will get logged twice so don't worry about that.)
`STACKTRACE_LEVEL` should generally be left unconfigured (and hence kept at
`none`). There are only very specific occasions when it useful.
## Empty Configuration
The empty configuration is equivalent to:
```ini
[log]
ROOT_PATH = %(GITEA_WORK_DIR)/log
MODE = console
LEVEL = Info
STACKTRACE_LEVEL = None
ENABLE_ACCESS_LOG = false
ENABLE_XORM_LOG = true
XORM = ,
[log.console]
MODE = console
LEVEL = %(LEVEL)
STACKTRACE_LEVEL = %(STACKTRACE_LEVEL)
FLAGS = stdflags
PREFIX =
COLORIZE = true # Or false if your windows terminal cannot color
```
This is equivalent to sending all logs to the console, with default go log being sent to the console log too.
## Releasing-and-Reopening, Pausing and Resuming logging
If you are running on Unix you may wish to release-and-reopen logs in order to use `logrotate` or other tools.
It is possible force Gitea to release and reopen it's logging files and connections by sending `SIGUSR1` to the
running process, or running `gitea manager logging release-and-reopen`.
Alternatively, you may wish to pause and resume logging - this can be accomplished through the use of the
`gitea manager logging pause` and `gitea manager logging resume` commands. Please note that whilst logging
is paused log events below INFO level will not be stored and only a limited number of events will be stored.
Logging may block, albeit temporarily, slowing Gitea considerably whilst paused - therefore it is
recommended that pausing only done for a very short period of time.
## Adding and removing logging whilst Gitea is running
It is possible to add and remove logging whilst Gitea is running using the `gitea manager logging add` and `remove` subcommands.
This functionality can only adjust running log systems and cannot be used to start the access or router loggers if they
were not already initialized. If you wish to start these systems you are advised to adjust the app.ini and (gracefully) restart
the Gitea service.
The main intention of these commands is to easily add a temporary logger to investigate problems on running systems where a restart
may cause the issue to disappear.
## Log colorization
Logs to the console will be colorized by default when not running on
Windows. Terminal sniffing will occur on Windows and if it is
determined that we are running on a terminal capable of color we will
colorize.
Further, on \*nix it is becoming common to have file logs that are
colored by default. Therefore file logs will be colorised by default
when not running on Windows.
You can switch on or off colorization by using the `COLORIZE` value.
From a development point of view. If you write
`log.Info("A %s string", "formatted")` the `formatted` part of the log
message will be Bolded on colorized logs.
You can change this by either rendering the formatted string yourself.
Or you can wrap the value in a `log.ColoredValue` struct.
The `log.ColoredValue` struct contains a pointer to value, a pointer to
string of bytes which should represent a color and second set of reset
bytes. Pointers were chosen to prevent copying of large numbers of
values. There are several helper methods:
- `log.NewColoredValue` takes a value and 0 or more color attributes
that represent the color. If 0 are provided it will default to a cached
bold. Note, it is recommended that color bytes constructed from
attributes should be cached if this is a commonly used log message.
- `log.NewColoredValuePointer` takes a pointer to a value, and
0 or more color attributes that represent the color.
- `log.NewColoredValueBytes` takes a value and a pointer to an array
of bytes representing the color.
These functions will not double wrap a `log.ColoredValue`. They will
also set the `resetBytes` to the cached `resetBytes`.
The `colorBytes` and `resetBytes` are not exported to prevent
accidental overwriting of internal values.
## ColorFormat & ColorFormatted
Structs may implement the `log.ColorFormatted` interface by implementing the `ColorFormat(fmt.State)` function.
If a `log.ColorFormatted` struct is logged with `%-v` format, its `ColorFormat` will be used instead of the usual `%v`. The full `fmt.State` will be passed to allow implementers to look at additional flags.
In order to help implementers provide `ColorFormat` methods. There is a
`log.ColorFprintf(...)` function in the log module that will wrap values in `log.ColoredValue` and recognise `%-v`.
In general it is recommended not to make the results of this function too verbose to help increase its versatility. Usually this should simply be an `ID`:`Name`. If you wish to make a more verbose result, it is recommended to use `%-+v` as your marker.
## Log Spoofing protection
In order to protect the logs from being spoofed with cleverly
constructed messages. Newlines are now prefixed with a tab and control
characters except those used in an ANSI CSI are escaped with a
preceding `\` and their octal value.
## Creating a new named logger group
Should a developer wish to create a new named logger, `NEWONE`. It is
recommended to add an `ENABLE_NEWONE_LOG` value to the `[log]`
section, and to add a new `NEWONE` value for the modes.
A function like `func newNewOneLogService()` is recommended to manage
construction of the named logger. e.g.
```go
func newNewoneLogService() {
EnableNewoneLog = Cfg.Section("log").Key("ENABLE_NEWONE_LOG").MustBool(false)
Cfg.Section("log").Key("NEWONE").MustString("file") // or console? or "," if you want to send this to default logger by default
if EnableNewoneLog {
options := newDefaultLogOptions()
options.filename = filepath.Join(LogRootPath, "newone.log")
options.flags = "stdflags"
options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
generateNamedLogger("newone", options)
}
}
```
You should then add `newOneLogService` to `NewServices()` in
`modules/setting/setting.go`
## Using `logrotate` instead of built-in log rotation
Gitea includes built-in log rotation, which should be enough for most deployments. However, if you instead want to use the `logrotate` utility:
- Disable built-in log rotation by setting `LOG_ROTATE` to `false` in your `app.ini`.
- Install `logrotate`.
- Configure `logrotate` to match your deployment requirements, see `man 8 logrotate` for configuration syntax details. In the `postrotate/endscript` block send Gitea a `USR1` signal via `kill -USR1` or `kill -10` to the `gitea` process itself, or run `gitea manager logging release-and-reopen` (with the appropriate environment). Ensure that your configurations apply to all files emitted by Gitea loggers as described in the above sections.
- Always do `logrotate /etc/logrotate.conf --debug` to test your configurations.
- If you are using docker and are running from outside of the container you can use `docker exec -u $OS_USER $CONTAINER_NAME sh -c 'gitea manager logging release-and-reopen'` or `docker exec $CONTAINER_NAME sh -c '/bin/s6-svc -1 /etc/s6/gitea/'` or send `USR1` directly to the Gitea process itself.
The next `logrotate` jobs will include your configurations, so no restart is needed. You can also immediately reload `logrotate` with `logrotate /etc/logrotate.conf --force`.

View file

@ -120,6 +120,8 @@ arguments - which can alternatively be run by running the subcommand web.`
if err != nil { if err != nil {
log.Fatal("Failed to run app with %s: %v", os.Args, err) log.Fatal("Failed to run app with %s: %v", os.Args, err)
} }
log.GetManager().Close()
} }
func setFlagsAndBeforeOnSubcommands(command *cli.Command, defaultFlags []cli.Flag, before cli.BeforeFunc) { func setFlagsAndBeforeOnSubcommands(command *cli.Command, defaultFlags []cli.Flag, before cli.BeforeFunc) {

View file

@ -14,67 +14,62 @@ import (
// XORMLogBridge a logger bridge from Logger to xorm // XORMLogBridge a logger bridge from Logger to xorm
type XORMLogBridge struct { type XORMLogBridge struct {
showSQLint *int32 showSQL atomic.Bool
logger log.Logger logger log.Logger
} }
// NewXORMLogger inits a log bridge for xorm // NewXORMLogger inits a log bridge for xorm
func NewXORMLogger(showSQL bool) xormlog.Logger { func NewXORMLogger(showSQL bool) xormlog.Logger {
showSQLint := int32(0) l := &XORMLogBridge{logger: log.GetLogger("xorm")}
if showSQL { l.showSQL.Store(showSQL)
showSQLint = 1 return l
}
return &XORMLogBridge{
showSQLint: &showSQLint,
logger: log.GetLogger("xorm"),
}
} }
const stackLevel = 8 const stackLevel = 8
// Log a message with defined skip and at logging level // Log a message with defined skip and at logging level
func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...interface{}) error { func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...interface{}) {
return l.logger.Log(skip+1, level, format, v...) l.logger.Log(skip+1, level, format, v...)
} }
// Debug show debug log // Debug show debug log
func (l *XORMLogBridge) Debug(v ...interface{}) { func (l *XORMLogBridge) Debug(v ...interface{}) {
_ = l.Log(stackLevel, log.DEBUG, fmt.Sprint(v...)) l.Log(stackLevel, log.DEBUG, "%s", fmt.Sprint(v...))
} }
// Debugf show debug log // Debugf show debug log
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) { func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
_ = l.Log(stackLevel, log.DEBUG, format, v...) l.Log(stackLevel, log.DEBUG, format, v...)
} }
// Error show error log // Error show error log
func (l *XORMLogBridge) Error(v ...interface{}) { func (l *XORMLogBridge) Error(v ...interface{}) {
_ = l.Log(stackLevel, log.ERROR, fmt.Sprint(v...)) l.Log(stackLevel, log.ERROR, "%s", fmt.Sprint(v...))
} }
// Errorf show error log // Errorf show error log
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) { func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
_ = l.Log(stackLevel, log.ERROR, format, v...) l.Log(stackLevel, log.ERROR, format, v...)
} }
// Info show information level log // Info show information level log
func (l *XORMLogBridge) Info(v ...interface{}) { func (l *XORMLogBridge) Info(v ...interface{}) {
_ = l.Log(stackLevel, log.INFO, fmt.Sprint(v...)) l.Log(stackLevel, log.INFO, "%s", fmt.Sprint(v...))
} }
// Infof show information level log // Infof show information level log
func (l *XORMLogBridge) Infof(format string, v ...interface{}) { func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
_ = l.Log(stackLevel, log.INFO, format, v...) l.Log(stackLevel, log.INFO, format, v...)
} }
// Warn show warning log // Warn show warning log
func (l *XORMLogBridge) Warn(v ...interface{}) { func (l *XORMLogBridge) Warn(v ...interface{}) {
_ = l.Log(stackLevel, log.WARN, fmt.Sprint(v...)) l.Log(stackLevel, log.WARN, "%s", fmt.Sprint(v...))
} }
// Warnf show warnning log // Warnf show warnning log
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) { func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
_ = l.Log(stackLevel, log.WARN, format, v...) l.Log(stackLevel, log.WARN, format, v...)
} }
// Level get logger level // Level get logger level
@ -86,10 +81,12 @@ func (l *XORMLogBridge) Level() xormlog.LogLevel {
return xormlog.LOG_INFO return xormlog.LOG_INFO
case log.WARN: case log.WARN:
return xormlog.LOG_WARNING return xormlog.LOG_WARNING
case log.ERROR, log.CRITICAL: case log.ERROR:
return xormlog.LOG_ERR return xormlog.LOG_ERR
} case log.NONE:
return xormlog.LOG_OFF return xormlog.LOG_OFF
}
return xormlog.LOG_UNKNOWN
} }
// SetLevel set the logger level // SetLevel set the logger level
@ -98,16 +95,13 @@ func (l *XORMLogBridge) SetLevel(lvl xormlog.LogLevel) {
// ShowSQL set if record SQL // ShowSQL set if record SQL
func (l *XORMLogBridge) ShowSQL(show ...bool) { func (l *XORMLogBridge) ShowSQL(show ...bool) {
showSQL := int32(1) if len(show) == 0 {
if len(show) > 0 && !show[0] { show = []bool{true}
showSQL = 0
} }
atomic.StoreInt32(l.showSQLint, showSQL) l.showSQL.Store(show[0])
} }
// IsShowSQL if record SQL // IsShowSQL if record SQL
func (l *XORMLogBridge) IsShowSQL() bool { func (l *XORMLogBridge) IsShowSQL() bool {
showSQL := atomic.LoadInt32(l.showSQLint) return l.showSQL.Load()
return showSQL == 1
} }

View file

@ -224,40 +224,27 @@ func DeletePullsByBaseRepoID(ctx context.Context, repoID int64) error {
return err return err
} }
// ColorFormat writes a colored string to identify this struct func (pr *PullRequest) String() string {
func (pr *PullRequest) ColorFormat(s fmt.State) {
if pr == nil { if pr == nil {
log.ColorFprintf(s, "PR[%d]%s#%d[%s...%s:%s]", return "<PullRequest nil>"
log.NewColoredIDValue(0),
log.NewColoredValue("<nil>/<nil>"),
log.NewColoredIDValue(0),
log.NewColoredValue("<nil>"),
log.NewColoredValue("<nil>/<nil>"),
log.NewColoredValue("<nil>"),
)
return
} }
log.ColorFprintf(s, "PR[%d]", log.NewColoredIDValue(pr.ID)) s := new(strings.Builder)
fmt.Fprintf(s, "<PullRequest [%d]", pr.ID)
if pr.BaseRepo != nil { if pr.BaseRepo != nil {
log.ColorFprintf(s, "%s#%d[%s...", log.NewColoredValue(pr.BaseRepo.FullName()), fmt.Fprintf(s, "%s#%d[%s...", pr.BaseRepo.FullName(), pr.Index, pr.BaseBranch)
log.NewColoredIDValue(pr.Index), log.NewColoredValue(pr.BaseBranch))
} else { } else {
log.ColorFprintf(s, "Repo[%d]#%d[%s...", log.NewColoredIDValue(pr.BaseRepoID), fmt.Fprintf(s, "Repo[%d]#%d[%s...", pr.BaseRepoID, pr.Index, pr.BaseBranch)
log.NewColoredIDValue(pr.Index), log.NewColoredValue(pr.BaseBranch))
} }
if pr.HeadRepoID == pr.BaseRepoID { if pr.HeadRepoID == pr.BaseRepoID {
log.ColorFprintf(s, "%s]", log.NewColoredValue(pr.HeadBranch)) fmt.Fprintf(s, "%s]", pr.HeadBranch)
} else if pr.HeadRepo != nil { } else if pr.HeadRepo != nil {
log.ColorFprintf(s, "%s:%s]", log.NewColoredValue(pr.HeadRepo.FullName()), log.NewColoredValue(pr.HeadBranch)) fmt.Fprintf(s, "%s:%s]", pr.HeadRepo.FullName(), pr.HeadBranch)
} else { } else {
log.ColorFprintf(s, "Repo[%d]:%s]", log.NewColoredIDValue(pr.HeadRepoID), log.NewColoredValue(pr.HeadBranch)) fmt.Fprintf(s, "Repo[%d]:%s]", pr.HeadRepoID, pr.HeadBranch)
} }
} s.WriteByte('>')
return s.String()
// String represents the pr as a simple string
func (pr *PullRequest) String() string {
return log.ColorFormatAsString(pr)
} }
// MustHeadUserName returns the HeadRepo's username if failed return blank // MustHeadUserName returns the HeadRepo's username if failed return blank

View file

@ -24,6 +24,8 @@ import (
"xorm.io/xorm" "xorm.io/xorm"
) )
// FIXME: this file shouldn't be in a normal package, it should only be compiled for tests
// PrepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0. // PrepareTestEnv prepares the test environment and reset the database. The skip parameter should usually be 0.
// Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from. // Provide models to be sync'd with the database - in particular any models you expect fixtures to be loaded from.
// //
@ -110,7 +112,7 @@ func PrepareTestEnv(t *testing.T, skip int, syncModels ...interface{}) (*xorm.En
} }
func MainTest(m *testing.M) { func MainTest(m *testing.M) {
log.Register("test", testlogger.NewTestLogger) log.RegisterEventWriter("test", testlogger.NewTestLoggerWriter)
giteaRoot := base.SetupGiteaRoot() giteaRoot := base.SetupGiteaRoot()
if giteaRoot == "" { if giteaRoot == "" {
@ -154,7 +156,7 @@ func MainTest(m *testing.M) {
os.Exit(1) os.Exit(1)
} }
setting.LoadDBSetting() setting.LoadDBSetting()
setting.InitLogs(true) setting.InitLoggersForTest()
exitStatus := m.Run() exitStatus := m.Run()

View file

@ -94,21 +94,11 @@ func init() {
db.RegisterModel(new(TeamInvite)) db.RegisterModel(new(TeamInvite))
} }
// ColorFormat provides a basic color format for a Team func (t *Team) LogString() string {
func (t *Team) ColorFormat(s fmt.State) {
if t == nil { if t == nil {
log.ColorFprintf(s, "%d:%s (OrgID: %d) %-v", return "<Team nil>"
log.NewColoredIDValue(0),
"<nil>",
log.NewColoredIDValue(0),
0)
return
} }
log.ColorFprintf(s, "%d:%s (OrgID: %d) %-v", return fmt.Sprintf("<Team %d:%s OrgID=%d AccessMode=%s>", t.ID, t.Name, t.OrgID, t.AccessMode.LogString())
log.NewColoredIDValue(t.ID),
t.Name,
log.NewColoredIDValue(t.OrgID),
t.AccessMode)
} }
// LoadUnits load a list of available units for a team // LoadUnits load a list of available units for a team

View file

@ -102,17 +102,10 @@ func (p *Permission) CanWriteIssuesOrPulls(isPull bool) bool {
return p.CanWrite(unit.TypeIssues) return p.CanWrite(unit.TypeIssues)
} }
// ColorFormat writes a colored string for these Permissions func (p *Permission) LogString() string {
func (p *Permission) ColorFormat(s fmt.State) { format := "<Permission AccessMode=%s, %d Units, %d UnitsMode(s): [ "
noColor := log.ColorBytes(log.Reset) args := []any{p.AccessMode.String(), len(p.Units), len(p.UnitsMode)}
format := "perm_model.AccessMode: %-v, %d Units, %d UnitsMode(s): [ "
args := []interface{}{
p.AccessMode,
log.NewColoredValueBytes(len(p.Units), &noColor),
log.NewColoredValueBytes(len(p.UnitsMode), &noColor),
}
if s.Flag('+') {
for i, unit := range p.Units { for i, unit := range p.Units {
config := "" config := ""
if unit.Config != nil { if unit.Config != nil {
@ -122,25 +115,15 @@ func (p *Permission) ColorFormat(s fmt.State) {
config = err.Error() config = err.Error()
} }
} }
format += "\nUnits[%d]: ID: %d RepoID: %d Type: %-v Config: %s" format += "\nUnits[%d]: ID: %d RepoID: %d Type: %s Config: %s"
args = append(args, args = append(args, i, unit.ID, unit.RepoID, unit.Type.LogString(), config)
log.NewColoredValueBytes(i, &noColor),
log.NewColoredIDValue(unit.ID),
log.NewColoredIDValue(unit.RepoID),
unit.Type,
config)
} }
for key, value := range p.UnitsMode { for key, value := range p.UnitsMode {
format += "\nUnitMode[%-v]: %-v" format += "\nUnitMode[%-v]: %-v"
args = append(args, args = append(args, key.LogString(), value.LogString())
key,
value)
} }
} else { format += " ]>"
format += "..." return fmt.Sprintf(format, args...)
}
format += " ]"
log.ColorFprintf(s, format, args...)
} }
// GetUserRepoPermission returns the user permissions to the repository // GetUserRepoPermission returns the user permissions to the repository

View file

@ -5,8 +5,6 @@ package perm
import ( import (
"fmt" "fmt"
"code.gitea.io/gitea/modules/log"
) )
// AccessMode specifies the users access mode // AccessMode specifies the users access mode
@ -40,11 +38,8 @@ func (mode AccessMode) String() string {
} }
} }
// ColorFormat provides a ColorFormatted version of this AccessMode func (mode AccessMode) LogString() string {
func (mode AccessMode) ColorFormat(s fmt.State) { return fmt.Sprintf("<AccessMode:%d:%s>", mode, mode.String())
log.ColorFprintf(s, "%d:%s",
log.NewColoredIDValue(mode),
mode)
} }
// ParseAccessMode returns corresponding access mode to given permission string. // ParseAccessMode returns corresponding access mode to given permission string.

View file

@ -196,19 +196,11 @@ func (repo *Repository) SanitizedOriginalURL() string {
return u.String() return u.String()
} }
// ColorFormat returns a colored string to represent this repo func (repo *Repository) LogString() string {
func (repo *Repository) ColorFormat(s fmt.State) {
if repo == nil { if repo == nil {
log.ColorFprintf(s, "%d:%s/%s", return "<Repository nil>"
log.NewColoredIDValue(0),
"<nil>",
"<nil>")
return
} }
log.ColorFprintf(s, "%d:%s/%s", return fmt.Sprintf("<Repository %d:%s/%s>", repo.ID, repo.OwnerName, repo.Name)
log.NewColoredIDValue(repo.ID),
repo.OwnerName,
repo.Name)
} }
// IsBeingMigrated indicates that repository is being migrated // IsBeingMigrated indicates that repository is being migrated

View file

@ -62,11 +62,8 @@ func (u Type) String() string {
return fmt.Sprintf("Unknown Type %d", u) return fmt.Sprintf("Unknown Type %d", u)
} }
// ColorFormat provides a ColorFormatted version of this Type func (u Type) LogString() string {
func (u Type) ColorFormat(s fmt.State) { return fmt.Sprintf("<UnitType:%d:%s>", u, u.String())
log.ColorFprintf(s, "%d:%s",
log.NewColoredIDValue(u),
u)
} }
var ( var (

View file

@ -151,17 +151,11 @@ type SearchOrganizationsOptions struct {
All bool All bool
} }
// ColorFormat writes a colored string to identify this struct func (u *User) LogString() string {
func (u *User) ColorFormat(s fmt.State) {
if u == nil { if u == nil {
log.ColorFprintf(s, "%d:%s", return "<User nil>"
log.NewColoredIDValue(0),
log.NewColoredValue("<nil>"))
return
} }
log.ColorFprintf(s, "%d:%s", return fmt.Sprintf("<User %d:%s>", u.ID, u.Name)
log.NewColoredIDValue(u.ID),
log.NewColoredValue(u.Name))
} }
// BeforeUpdate is invoked from XORM before updating this object. // BeforeUpdate is invoked from XORM before updating this object.

View file

@ -95,10 +95,7 @@ func AccessLogger() func(http.Handler) http.Handler {
log.Error("Could not set up chi access logger: %v", err.Error()) log.Error("Could not set up chi access logger: %v", err.Error())
} }
err = logger.SendLog(log.INFO, "", "", 0, buf.String(), "") logger.Info("%s", buf.String())
if err != nil {
log.Error("Could not set up chi access logger: %v", err.Error())
}
}) })
} }
} }

View file

@ -6,6 +6,7 @@ package doctor
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"sort" "sort"
"strings" "strings"
@ -26,27 +27,9 @@ type Check struct {
Priority int Priority int
} }
type wrappedLevelLogger struct { func initDBSkipLogger(ctx context.Context) error {
log.LevelLogger
}
func (w *wrappedLevelLogger) Log(skip int, level log.Level, format string, v ...interface{}) error {
return w.LevelLogger.Log(
skip+1,
level,
" - %s "+format,
append(
[]interface{}{
log.NewColoredValueBytes(
fmt.Sprintf("[%s]", strings.ToUpper(level.String()[0:1])),
level.Color()),
}, v...)...)
}
func initDBDisableConsole(ctx context.Context, disableConsole bool) error {
setting.Init(&setting.Options{}) setting.Init(&setting.Options{})
setting.LoadDBSetting() setting.LoadDBSetting()
setting.InitSQLLog(disableConsole)
if err := db.InitEngine(ctx); err != nil { if err := db.InitEngine(ctx); err != nil {
return fmt.Errorf("db.InitEngine: %w", err) return fmt.Errorf("db.InitEngine: %w", err)
} }
@ -57,30 +40,61 @@ func initDBDisableConsole(ctx context.Context, disableConsole bool) error {
return nil return nil
} }
type doctorCheckLogger struct {
colorize bool
}
var _ log.BaseLogger = (*doctorCheckLogger)(nil)
func (d *doctorCheckLogger) Log(skip int, level log.Level, format string, v ...any) {
_, _ = fmt.Fprintf(os.Stdout, format+"\n", v...)
}
func (d *doctorCheckLogger) GetLevel() log.Level {
return log.TRACE
}
type doctorCheckStepLogger struct {
colorize bool
}
var _ log.BaseLogger = (*doctorCheckStepLogger)(nil)
func (d *doctorCheckStepLogger) Log(skip int, level log.Level, format string, v ...any) {
levelChar := fmt.Sprintf("[%s]", strings.ToUpper(level.String()[0:1]))
var levelArg any = levelChar
if d.colorize {
levelArg = log.NewColoredValue(levelChar, level.ColorAttributes()...)
}
args := append([]any{levelArg}, v...)
_, _ = fmt.Fprintf(os.Stdout, " - %s "+format+"\n", args...)
}
func (d *doctorCheckStepLogger) GetLevel() log.Level {
return log.TRACE
}
// Checks is the list of available commands // Checks is the list of available commands
var Checks []*Check var Checks []*Check
// RunChecks runs the doctor checks for the provided list // RunChecks runs the doctor checks for the provided list
func RunChecks(ctx context.Context, logger log.Logger, autofix bool, checks []*Check) error { func RunChecks(ctx context.Context, colorize, autofix bool, checks []*Check) error {
wrappedLogger := log.LevelLoggerLogger{ // the checks output logs by a special logger, they do not use the default logger
LevelLogger: &wrappedLevelLogger{logger}, logger := log.BaseLoggerToGeneralLogger(&doctorCheckLogger{colorize: colorize})
} loggerStep := log.BaseLoggerToGeneralLogger(&doctorCheckStepLogger{colorize: colorize})
dbIsInit := false dbIsInit := false
for i, check := range checks { for i, check := range checks {
if !dbIsInit && !check.SkipDatabaseInitialization { if !dbIsInit && !check.SkipDatabaseInitialization {
// Only open database after the most basic configuration check // Only open database after the most basic configuration check
setting.Log.EnableXORMLog = false if err := initDBSkipLogger(ctx); err != nil {
if err := initDBDisableConsole(ctx, true); err != nil {
logger.Error("Error whilst initializing the database: %v", err) logger.Error("Error whilst initializing the database: %v", err)
logger.Error("Check if you are using the right config file. You can use a --config directive to specify one.") logger.Error("Check if you are using the right config file. You can use a --config directive to specify one.")
return nil return nil
} }
dbIsInit = true dbIsInit = true
} }
logger.Info("[%d] %s", log.NewColoredIDValue(i+1), check.Title) logger.Info("\n[%d] %s", i+1, check.Title)
logger.Flush() if err := check.Run(ctx, loggerStep, autofix); err != nil {
if err := check.Run(ctx, &wrappedLogger, autofix); err != nil {
if check.AbortIfFailed { if check.AbortIfFailed {
logger.Critical("FAIL") logger.Critical("FAIL")
return err return err
@ -88,9 +102,9 @@ func RunChecks(ctx context.Context, logger log.Logger, autofix bool, checks []*C
logger.Error("ERROR") logger.Error("ERROR")
} else { } else {
logger.Info("OK") logger.Info("OK")
logger.Flush()
} }
} }
logger.Info("\nAll done.")
return nil return nil
} }

View file

@ -10,7 +10,6 @@ import (
"strings" "strings"
"testing" "testing"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/modules/util"
@ -18,8 +17,6 @@ import (
) )
func testRun(m *testing.M) error { func testRun(m *testing.M) error {
_ = log.NewLogger(1000, "console", "console", `{"level":"trace","stacktracelevel":"NONE","stderr":true}`)
gitHomePath, err := os.MkdirTemp(os.TempDir(), "git-home") gitHomePath, err := os.MkdirTemp(os.TempDir(), "git-home")
if err != nil { if err != nil {
return fmt.Errorf("unable to create temp dir: %w", err) return fmt.Errorf("unable to create temp dir: %w", err)

View file

@ -30,7 +30,7 @@ const (
// * HTTP redirection fallback // * HTTP redirection fallback
// * Builtin SSH listener // * Builtin SSH listener
// //
// If you add an additional place you must increment this number // If you add a new place you must increment this number
// and add a function to call manager.InformCleanup if it's not going to be used // and add a function to call manager.InformCleanup if it's not going to be used
const numberOfServersToCreate = 4 const numberOfServersToCreate = 4

View file

@ -16,6 +16,7 @@ import (
"syscall" "syscall"
"time" "time"
"code.gitea.io/gitea/modules/graceful/releasereopen"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/process" "code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/setting"
@ -185,7 +186,7 @@ func (g *Manager) handleSignals(ctx context.Context) {
case syscall.SIGUSR1: case syscall.SIGUSR1:
log.Warn("PID %d. Received SIGUSR1. Releasing and reopening logs", pid) log.Warn("PID %d. Received SIGUSR1. Releasing and reopening logs", pid)
g.notify(statusMsg("Releasing and reopening logs")) g.notify(statusMsg("Releasing and reopening logs"))
if err := log.ReleaseReopen(); err != nil { if err := releasereopen.GetManager().ReleaseReopen(); err != nil {
log.Error("Error whilst releasing and reopening logs: %v", err) log.Error("Error whilst releasing and reopening logs: %v", err)
} }
case syscall.SIGUSR2: case syscall.SIGUSR2:

View file

@ -0,0 +1,61 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package releasereopen
import (
"errors"
"sync"
)
type ReleaseReopener interface {
ReleaseReopen() error
}
type Manager struct {
mu sync.Mutex
counter int64
releaseReopeners map[int64]ReleaseReopener
}
func (r *Manager) Register(rr ReleaseReopener) (cancel func()) {
r.mu.Lock()
defer r.mu.Unlock()
r.counter++
currentCounter := r.counter
r.releaseReopeners[r.counter] = rr
return func() {
r.mu.Lock()
defer r.mu.Unlock()
delete(r.releaseReopeners, currentCounter)
}
}
func (r *Manager) ReleaseReopen() error {
r.mu.Lock()
defer r.mu.Unlock()
var errs []error
for _, rr := range r.releaseReopeners {
if err := rr.ReleaseReopen(); err != nil {
errs = append(errs, err)
}
}
return errors.Join(errs...)
}
func GetManager() *Manager {
return manager
}
func NewManager() *Manager {
return &Manager{
releaseReopeners: make(map[int64]ReleaseReopener),
}
}
var manager = NewManager()

View file

@ -0,0 +1,43 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package releasereopen
import (
"testing"
"github.com/stretchr/testify/assert"
)
type testReleaseReopener struct {
count int
}
func (t *testReleaseReopener) ReleaseReopen() error {
t.count++
return nil
}
func TestManager(t *testing.T) {
m := NewManager()
t1 := &testReleaseReopener{}
t2 := &testReleaseReopener{}
t3 := &testReleaseReopener{}
_ = m.Register(t1)
c2 := m.Register(t2)
_ = m.Register(t3)
assert.NoError(t, m.ReleaseReopen())
assert.EqualValues(t, 1, t1.count)
assert.EqualValues(t, 1, t2.count)
assert.EqualValues(t, 1, t3.count)
c2()
assert.NoError(t, m.ReleaseReopen())
assert.EqualValues(t, 2, t1.count)
assert.EqualValues(t, 1, t2.count)
assert.EqualValues(t, 2, t3.count)
}

View file

@ -49,14 +49,6 @@ type ElasticSearchIndexer struct {
lock sync.RWMutex lock sync.RWMutex
} }
type elasticLogger struct {
log.Logger
}
func (l elasticLogger) Printf(format string, args ...interface{}) {
_ = l.Logger.Log(2, l.Logger.GetLevel(), format, args...)
}
// NewElasticSearchIndexer creates a new elasticsearch indexer // NewElasticSearchIndexer creates a new elasticsearch indexer
func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, bool, error) { func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, bool, error) {
opts := []elastic.ClientOptionFunc{ opts := []elastic.ClientOptionFunc{
@ -66,15 +58,11 @@ func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, bo
elastic.SetGzip(false), elastic.SetGzip(false),
} }
logger := elasticLogger{log.GetLogger(log.DEFAULT)} logger := log.GetLogger(log.DEFAULT)
if logger.GetLevel() == log.TRACE || logger.GetLevel() == log.DEBUG { opts = append(opts, elastic.SetTraceLog(&log.PrintfLogger{Logf: logger.Trace}))
opts = append(opts, elastic.SetTraceLog(logger)) opts = append(opts, elastic.SetInfoLog(&log.PrintfLogger{Logf: logger.Info}))
} else if logger.GetLevel() == log.ERROR || logger.GetLevel() == log.CRITICAL || logger.GetLevel() == log.FATAL { opts = append(opts, elastic.SetErrorLog(&log.PrintfLogger{Logf: logger.Error}))
opts = append(opts, elastic.SetErrorLog(logger))
} else if logger.GetLevel() == log.INFO || logger.GetLevel() == log.WARN {
opts = append(opts, elastic.SetInfoLog(logger))
}
client, err := elastic.NewClient(opts...) client, err := elastic.NewClient(opts...)
if err != nil { if err != nil {

View file

@ -29,14 +29,6 @@ type ElasticSearchIndexer struct {
lock sync.RWMutex lock sync.RWMutex
} }
type elasticLogger struct {
log.LevelLogger
}
func (l elasticLogger) Printf(format string, args ...interface{}) {
_ = l.Log(2, l.GetLevel(), format, args...)
}
// NewElasticSearchIndexer creates a new elasticsearch indexer // NewElasticSearchIndexer creates a new elasticsearch indexer
func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, error) { func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, error) {
opts := []elastic.ClientOptionFunc{ opts := []elastic.ClientOptionFunc{
@ -46,15 +38,10 @@ func NewElasticSearchIndexer(url, indexerName string) (*ElasticSearchIndexer, er
elastic.SetGzip(false), elastic.SetGzip(false),
} }
logger := elasticLogger{log.GetLogger(log.DEFAULT)} logger := log.GetLogger(log.DEFAULT)
opts = append(opts, elastic.SetTraceLog(&log.PrintfLogger{Logf: logger.Trace}))
if logger.GetLevel() == log.TRACE || logger.GetLevel() == log.DEBUG { opts = append(opts, elastic.SetInfoLog(&log.PrintfLogger{Logf: logger.Info}))
opts = append(opts, elastic.SetTraceLog(logger)) opts = append(opts, elastic.SetErrorLog(&log.PrintfLogger{Logf: logger.Error}))
} else if logger.GetLevel() == log.ERROR || logger.GetLevel() == log.CRITICAL || logger.GetLevel() == log.FATAL {
opts = append(opts, elastic.SetErrorLog(logger))
} else if logger.GetLevel() == log.INFO || logger.GetLevel() == log.WARN {
opts = append(opts, elastic.SetInfoLog(logger))
}
client, err := elastic.NewClient(opts...) client, err := elastic.NewClient(opts...)
if err != nil { if err != nil {

View file

@ -13,8 +13,6 @@ import (
"strconv" "strconv"
"strings" "strings"
"code.gitea.io/gitea/modules/log"
"github.com/minio/sha256-simd" "github.com/minio/sha256-simd"
) )
@ -113,15 +111,11 @@ func (p Pointer) RelativePath() string {
return path.Join(p.Oid[0:2], p.Oid[2:4], p.Oid[4:]) return path.Join(p.Oid[0:2], p.Oid[2:4], p.Oid[4:])
} }
// ColorFormat provides a basic color format for a Team func (p Pointer) LogString() string {
func (p Pointer) ColorFormat(s fmt.State) {
if p.Oid == "" && p.Size == 0 { if p.Oid == "" && p.Size == 0 {
log.ColorFprintf(s, "<empty>") return "<LFSPointer empty>"
return
} }
log.ColorFprintf(s, "%s:%d", return fmt.Sprintf("<LFSPointer %s:%d>", p.Oid, p.Size)
log.NewColoredIDValue(p.Oid),
p.Size)
} }
// GeneratePointer generates a pointer for arbitrary content // GeneratePointer generates a pointer for arbitrary content

115
modules/log/color.go Normal file
View file

@ -0,0 +1,115 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"strconv"
)
const escape = "\033"
// ColorAttribute defines a single SGR Code
type ColorAttribute int
// Base ColorAttributes
const (
Reset ColorAttribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack ColorAttribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack ColorAttribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack ColorAttribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack ColorAttribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
var (
resetBytes = ColorBytes(Reset)
fgCyanBytes = ColorBytes(FgCyan)
fgGreenBytes = ColorBytes(FgGreen)
)
type ColoredValue struct {
v any
colors []ColorAttribute
}
func (c *ColoredValue) Format(f fmt.State, verb rune) {
_, _ = f.Write(ColorBytes(c.colors...))
s := fmt.Sprintf(fmt.FormatString(f, verb), c.v)
_, _ = f.Write([]byte(s))
_, _ = f.Write(resetBytes)
}
func NewColoredValue(v any, color ...ColorAttribute) *ColoredValue {
return &ColoredValue{v: v, colors: color}
}
// ColorBytes converts a list of ColorAttributes to a byte array
func ColorBytes(attrs ...ColorAttribute) []byte {
bytes := make([]byte, 0, 20)
bytes = append(bytes, escape[0], '[')
if len(attrs) > 0 {
bytes = append(bytes, strconv.Itoa(int(attrs[0]))...)
for _, a := range attrs[1:] {
bytes = append(bytes, ';')
bytes = append(bytes, strconv.Itoa(int(a))...)
}
} else {
bytes = append(bytes, strconv.Itoa(int(Bold))...)
}
bytes = append(bytes, 'm')
return bytes
}

View file

@ -0,0 +1,14 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
// CanColorStdout reports if we can color the Stdout
// Although we could do terminal sniffing and the like - in reality
// most tools on *nix are happy to display ansi colors.
// We will terminal sniff on Windows in console_windows.go
var CanColorStdout = true
// CanColorStderr reports if we can color the Stderr
var CanColorStderr = true

View file

@ -20,7 +20,7 @@ func enableVTMode(console windows.Handle) bool {
// EnableVirtualTerminalProcessing is the console mode to allow ANSI code // EnableVirtualTerminalProcessing is the console mode to allow ANSI code
// interpretation on the console. See: // interpretation on the console. See:
// https://docs.microsoft.com/en-us/windows/console/setconsolemode // https://docs.microsoft.com/en-us/windows/console/setconsolemode
// It only works on windows 10. Earlier terminals will fail with an err which we will // It only works on Windows 10. Earlier terminals will fail with an err which we will
// handle to say don't color // handle to say don't color
mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
err = windows.SetConsoleMode(console, mode) err = windows.SetConsoleMode(console, mode)

View file

@ -8,15 +8,15 @@ import (
"time" "time"
) )
var statusToColor = map[int][]byte{ var statusToColor = map[int][]ColorAttribute{
100: ColorBytes(Bold), 100: {Bold},
200: ColorBytes(FgGreen), 200: {FgGreen},
300: ColorBytes(FgYellow), 300: {FgYellow},
304: ColorBytes(FgCyan), 304: {FgCyan},
400: ColorBytes(Bold, FgRed), 400: {Bold, FgRed},
401: ColorBytes(Bold, FgMagenta), 401: {Bold, FgMagenta},
403: ColorBytes(Bold, FgMagenta), 403: {Bold, FgMagenta},
500: ColorBytes(Bold, BgRed), 500: {Bold, BgRed},
} }
// ColoredStatus adds colors for HTTP status // ColoredStatus adds colors for HTTP status
@ -26,30 +26,30 @@ func ColoredStatus(status int, s ...string) *ColoredValue {
color, ok = statusToColor[(status/100)*100] color, ok = statusToColor[(status/100)*100]
} }
if !ok { if !ok {
color = fgBoldBytes color = []ColorAttribute{Bold}
} }
if len(s) > 0 { if len(s) > 0 {
return NewColoredValueBytes(s[0], &color) return NewColoredValue(s[0], color...)
} }
return NewColoredValueBytes(status, &color) return NewColoredValue(status, color...)
} }
var methodToColor = map[string][]byte{ var methodToColor = map[string][]ColorAttribute{
"GET": ColorBytes(FgBlue), "GET": {FgBlue},
"POST": ColorBytes(FgGreen), "POST": {FgGreen},
"DELETE": ColorBytes(FgRed), "DELETE": {FgRed},
"PATCH": ColorBytes(FgCyan), "PATCH": {FgCyan},
"PUT": ColorBytes(FgYellow, Faint), "PUT": {FgYellow, Faint},
"HEAD": ColorBytes(FgBlue, Faint), "HEAD": {FgBlue, Faint},
} }
// ColoredMethod adds colors for HTTP methods on log // ColoredMethod adds colors for HTTP methods on log
func ColoredMethod(method string) *ColoredValue { func ColoredMethod(method string) *ColoredValue {
color, ok := methodToColor[method] color, ok := methodToColor[method]
if !ok { if !ok {
return NewColoredValueBytes(method, &fgBoldBytes) return NewColoredValue(method, Bold)
} }
return NewColoredValueBytes(method, &color) return NewColoredValue(method, color...)
} }
var ( var (
@ -61,15 +61,15 @@ var (
10 * time.Second, 10 * time.Second,
} }
durationColors = [][]byte{ durationColors = [][]ColorAttribute{
ColorBytes(FgGreen), {FgGreen},
ColorBytes(Bold), {Bold},
ColorBytes(FgYellow), {FgYellow},
ColorBytes(FgRed, Bold), {FgRed, Bold},
ColorBytes(BgRed), {BgRed},
} }
wayTooLong = ColorBytes(BgMagenta) wayTooLong = BgMagenta
) )
// ColoredTime converts the provided time to a ColoredValue for logging. The duration is always formatted in milliseconds. // ColoredTime converts the provided time to a ColoredValue for logging. The duration is always formatted in milliseconds.
@ -80,8 +80,8 @@ func ColoredTime(duration time.Duration) *ColoredValue {
str := fmt.Sprintf("%.1fms", float64(duration.Microseconds())/1000) str := fmt.Sprintf("%.1fms", float64(duration.Microseconds())/1000)
for i, k := range durations { for i, k := range durations {
if duration < k { if duration < k {
return NewColoredValueBytes(str, &durationColors[i]) return NewColoredValue(str, durationColors[i]...)
} }
} }
return NewColoredValueBytes(str, &wayTooLong) return NewColoredValue(str, wayTooLong)
} }

View file

@ -1,435 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"io"
"reflect"
"strconv"
"strings"
)
const escape = "\033"
// ColorAttribute defines a single SGR Code
type ColorAttribute int
// Base ColorAttributes
const (
Reset ColorAttribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack ColorAttribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack ColorAttribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack ColorAttribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack ColorAttribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
var colorAttributeToString = map[ColorAttribute]string{
Reset: "Reset",
Bold: "Bold",
Faint: "Faint",
Italic: "Italic",
Underline: "Underline",
BlinkSlow: "BlinkSlow",
BlinkRapid: "BlinkRapid",
ReverseVideo: "ReverseVideo",
Concealed: "Concealed",
CrossedOut: "CrossedOut",
FgBlack: "FgBlack",
FgRed: "FgRed",
FgGreen: "FgGreen",
FgYellow: "FgYellow",
FgBlue: "FgBlue",
FgMagenta: "FgMagenta",
FgCyan: "FgCyan",
FgWhite: "FgWhite",
FgHiBlack: "FgHiBlack",
FgHiRed: "FgHiRed",
FgHiGreen: "FgHiGreen",
FgHiYellow: "FgHiYellow",
FgHiBlue: "FgHiBlue",
FgHiMagenta: "FgHiMagenta",
FgHiCyan: "FgHiCyan",
FgHiWhite: "FgHiWhite",
BgBlack: "BgBlack",
BgRed: "BgRed",
BgGreen: "BgGreen",
BgYellow: "BgYellow",
BgBlue: "BgBlue",
BgMagenta: "BgMagenta",
BgCyan: "BgCyan",
BgWhite: "BgWhite",
BgHiBlack: "BgHiBlack",
BgHiRed: "BgHiRed",
BgHiGreen: "BgHiGreen",
BgHiYellow: "BgHiYellow",
BgHiBlue: "BgHiBlue",
BgHiMagenta: "BgHiMagenta",
BgHiCyan: "BgHiCyan",
BgHiWhite: "BgHiWhite",
}
func (c *ColorAttribute) String() string {
return colorAttributeToString[*c]
}
var colorAttributeFromString = map[string]ColorAttribute{}
// ColorAttributeFromString will return a ColorAttribute given a string
func ColorAttributeFromString(from string) ColorAttribute {
lowerFrom := strings.TrimSpace(strings.ToLower(from))
return colorAttributeFromString[lowerFrom]
}
// ColorString converts a list of ColorAttributes to a color string
func ColorString(attrs ...ColorAttribute) string {
return string(ColorBytes(attrs...))
}
// ColorBytes converts a list of ColorAttributes to a byte array
func ColorBytes(attrs ...ColorAttribute) []byte {
bytes := make([]byte, 0, 20)
bytes = append(bytes, escape[0], '[')
if len(attrs) > 0 {
bytes = append(bytes, strconv.Itoa(int(attrs[0]))...)
for _, a := range attrs[1:] {
bytes = append(bytes, ';')
bytes = append(bytes, strconv.Itoa(int(a))...)
}
} else {
bytes = append(bytes, strconv.Itoa(int(Bold))...)
}
bytes = append(bytes, 'm')
return bytes
}
var levelToColor = map[Level][]byte{
TRACE: ColorBytes(Bold, FgCyan),
DEBUG: ColorBytes(Bold, FgBlue),
INFO: ColorBytes(Bold, FgGreen),
WARN: ColorBytes(Bold, FgYellow),
ERROR: ColorBytes(Bold, FgRed),
CRITICAL: ColorBytes(Bold, BgMagenta),
FATAL: ColorBytes(Bold, BgRed),
NONE: ColorBytes(Reset),
}
var (
resetBytes = ColorBytes(Reset)
fgCyanBytes = ColorBytes(FgCyan)
fgGreenBytes = ColorBytes(FgGreen)
fgBoldBytes = ColorBytes(Bold)
)
type protectedANSIWriterMode int
const (
escapeAll protectedANSIWriterMode = iota
allowColor
removeColor
)
type protectedANSIWriter struct {
w io.Writer
mode protectedANSIWriterMode
}
// Write will protect against unusual characters
func (c *protectedANSIWriter) Write(bytes []byte) (int, error) {
end := len(bytes)
totalWritten := 0
normalLoop:
for i := 0; i < end; {
lasti := i
if c.mode == escapeAll {
for i < end && (bytes[i] >= ' ' || bytes[i] == '\n' || bytes[i] == '\t') {
i++
}
} else {
// Allow tabs if we're not escaping everything
for i < end && (bytes[i] >= ' ' || bytes[i] == '\t') {
i++
}
}
if i > lasti {
written, err := c.w.Write(bytes[lasti:i])
totalWritten += written
if err != nil {
return totalWritten, err
}
}
if i >= end {
break
}
// If we're not just escaping all we should prefix all newlines with a \t
if c.mode != escapeAll {
if bytes[i] == '\n' {
written, err := c.w.Write([]byte{'\n', '\t'})
if written > 0 {
totalWritten++
}
if err != nil {
return totalWritten, err
}
i++
continue normalLoop
}
if bytes[i] == escape[0] && i+1 < end && bytes[i+1] == '[' {
for j := i + 2; j < end; j++ {
if bytes[j] >= '0' && bytes[j] <= '9' {
continue
}
if bytes[j] == ';' {
continue
}
if bytes[j] == 'm' {
if c.mode == allowColor {
written, err := c.w.Write(bytes[i : j+1])
totalWritten += written
if err != nil {
return totalWritten, err
}
} else {
totalWritten = j
}
i = j + 1
continue normalLoop
}
break
}
}
}
// Process naughty character
if _, err := fmt.Fprintf(c.w, `\%#03o`, bytes[i]); err != nil {
return totalWritten, err
}
i++
totalWritten++
}
return totalWritten, nil
}
// ColorSprintf returns a colored string from a format and arguments
// arguments will be wrapped in ColoredValues to protect against color spoofing
func ColorSprintf(format string, args ...interface{}) string {
if len(args) > 0 {
v := make([]interface{}, len(args))
for i := 0; i < len(v); i++ {
v[i] = NewColoredValuePointer(&args[i])
}
return fmt.Sprintf(format, v...)
}
return format
}
// ColorFprintf will write to the provided writer similar to ColorSprintf
func ColorFprintf(w io.Writer, format string, args ...interface{}) (int, error) {
if len(args) > 0 {
v := make([]interface{}, len(args))
for i := 0; i < len(v); i++ {
v[i] = NewColoredValuePointer(&args[i])
}
return fmt.Fprintf(w, format, v...)
}
return fmt.Fprint(w, format)
}
// ColorFormatted structs provide their own colored string when formatted with ColorSprintf
type ColorFormatted interface {
// ColorFormat provides the colored representation of the value
ColorFormat(s fmt.State)
}
var colorFormattedType = reflect.TypeOf((*ColorFormatted)(nil)).Elem()
// ColoredValue will Color the provided value
type ColoredValue struct {
colorBytes *[]byte
resetBytes *[]byte
Value *interface{}
}
// NewColoredValue is a helper function to create a ColoredValue from a Value
// If no color is provided it defaults to Bold with standard Reset
// If a ColoredValue is provided it is not changed
func NewColoredValue(value interface{}, color ...ColorAttribute) *ColoredValue {
return NewColoredValuePointer(&value, color...)
}
// NewColoredValuePointer is a helper function to create a ColoredValue from a Value Pointer
// If no color is provided it defaults to Bold with standard Reset
// If a ColoredValue is provided it is not changed
func NewColoredValuePointer(value *interface{}, color ...ColorAttribute) *ColoredValue {
if val, ok := (*value).(*ColoredValue); ok {
return val
}
if len(color) > 0 {
bytes := ColorBytes(color...)
return &ColoredValue{
colorBytes: &bytes,
resetBytes: &resetBytes,
Value: value,
}
}
return &ColoredValue{
colorBytes: &fgBoldBytes,
resetBytes: &resetBytes,
Value: value,
}
}
// NewColoredValueBytes creates a value from the provided value with color bytes
// If a ColoredValue is provided it is not changed
func NewColoredValueBytes(value interface{}, colorBytes *[]byte) *ColoredValue {
if val, ok := value.(*ColoredValue); ok {
return val
}
return &ColoredValue{
colorBytes: colorBytes,
resetBytes: &resetBytes,
Value: &value,
}
}
// NewColoredIDValue is a helper function to create a ColoredValue from a Value
// The Value will be colored with FgCyan
// If a ColoredValue is provided it is not changed
func NewColoredIDValue(value interface{}) *ColoredValue {
return NewColoredValueBytes(value, &fgCyanBytes)
}
// Format will format the provided value and protect against ANSI color spoofing within the value
// If the wrapped value is ColorFormatted and the format is "%-v" then its ColorString will
// be used. It is presumed that this ColorString is safe.
func (cv *ColoredValue) Format(s fmt.State, c rune) {
if c == 'v' && s.Flag('-') {
if val, ok := (*cv.Value).(ColorFormatted); ok {
val.ColorFormat(s)
return
}
v := reflect.ValueOf(*cv.Value)
t := v.Type()
if reflect.PtrTo(t).Implements(colorFormattedType) {
vp := reflect.New(t)
vp.Elem().Set(v)
val := vp.Interface().(ColorFormatted)
val.ColorFormat(s)
return
}
}
s.Write(*cv.colorBytes)
fmt.Fprintf(&protectedANSIWriter{w: s}, fmtString(s, c), *(cv.Value))
s.Write(*cv.resetBytes)
}
// ColorFormatAsString returns the result of the ColorFormat without the color
func ColorFormatAsString(colorVal ColorFormatted) string {
s := new(strings.Builder)
_, _ = ColorFprintf(&protectedANSIWriter{w: s, mode: removeColor}, "%-v", colorVal)
return s.String()
}
// SetColorBytes will allow a user to set the colorBytes of a colored value
func (cv *ColoredValue) SetColorBytes(colorBytes []byte) {
cv.colorBytes = &colorBytes
}
// SetColorBytesPointer will allow a user to set the colorBytes pointer of a colored value
func (cv *ColoredValue) SetColorBytesPointer(colorBytes *[]byte) {
cv.colorBytes = colorBytes
}
// SetResetBytes will allow a user to set the resetBytes pointer of a colored value
func (cv *ColoredValue) SetResetBytes(resetBytes []byte) {
cv.resetBytes = &resetBytes
}
// SetResetBytesPointer will allow a user to set the resetBytes pointer of a colored value
func (cv *ColoredValue) SetResetBytesPointer(resetBytes *[]byte) {
cv.resetBytes = resetBytes
}
func fmtString(s fmt.State, c rune) string {
var width, precision string
base := make([]byte, 0, 8)
base = append(base, '%')
for _, c := range []byte(" +-#0") {
if s.Flag(int(c)) {
base = append(base, c)
}
}
if w, ok := s.Width(); ok {
width = strconv.Itoa(w)
}
if p, ok := s.Precision(); ok {
precision = "." + strconv.Itoa(p)
}
return fmt.Sprintf("%s%s%s%c", base, width, precision, c)
}
func init() {
for attr, from := range colorAttributeToString {
colorAttributeFromString[strings.ToLower(from)] = attr
}
}

View file

@ -1,137 +0,0 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"io"
"net"
"code.gitea.io/gitea/modules/json"
)
type connWriter struct {
innerWriter io.WriteCloser
ReconnectOnMsg bool `json:"reconnectOnMsg"`
Reconnect bool `json:"reconnect"`
Net string `json:"net"`
Addr string `json:"addr"`
}
// Close the inner writer
func (i *connWriter) Close() error {
if i.innerWriter != nil {
return i.innerWriter.Close()
}
return nil
}
// Write the data to the connection
func (i *connWriter) Write(p []byte) (int, error) {
if i.neededConnectOnMsg() {
if err := i.connect(); err != nil {
return 0, err
}
}
if i.ReconnectOnMsg {
defer i.innerWriter.Close()
}
return i.innerWriter.Write(p)
}
func (i *connWriter) neededConnectOnMsg() bool {
if i.Reconnect {
i.Reconnect = false
return true
}
if i.innerWriter == nil {
return true
}
return i.ReconnectOnMsg
}
func (i *connWriter) connect() error {
if i.innerWriter != nil {
i.innerWriter.Close()
i.innerWriter = nil
}
conn, err := net.Dial(i.Net, i.Addr)
if err != nil {
return err
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
err = tcpConn.SetKeepAlive(true)
if err != nil {
return err
}
}
i.innerWriter = conn
return nil
}
func (i *connWriter) releaseReopen() error {
if i.innerWriter != nil {
return i.connect()
}
return nil
}
// ConnLogger implements LoggerProvider.
// it writes messages in keep-live tcp connection.
type ConnLogger struct {
WriterLogger
ReconnectOnMsg bool `json:"reconnectOnMsg"`
Reconnect bool `json:"reconnect"`
Net string `json:"net"`
Addr string `json:"addr"`
}
// NewConn creates new ConnLogger returning as LoggerProvider.
func NewConn() LoggerProvider {
conn := new(ConnLogger)
conn.Level = TRACE
return conn
}
// Init inits connection writer with json config.
// json config only need key "level".
func (log *ConnLogger) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), log)
if err != nil {
return fmt.Errorf("Unable to parse JSON: %w", err)
}
log.NewWriterLogger(&connWriter{
ReconnectOnMsg: log.ReconnectOnMsg,
Reconnect: log.Reconnect,
Net: log.Net,
Addr: log.Addr,
}, log.Level)
return nil
}
// Flush does nothing for this implementation
func (log *ConnLogger) Flush() {
}
// GetName returns the default name for this implementation
func (log *ConnLogger) GetName() string {
return "conn"
}
// ReleaseReopen causes the ConnLogger to reconnect to the server
func (log *ConnLogger) ReleaseReopen() error {
return log.out.(*connWriter).releaseReopen()
}
func init() {
Register("conn", NewConn)
}

View file

@ -1,230 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"io"
"net"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func listenReadAndClose(t *testing.T, l net.Listener, expected string) {
conn, err := l.Accept()
assert.NoError(t, err)
defer conn.Close()
written, err := io.ReadAll(conn)
assert.NoError(t, err)
assert.Equal(t, expected, string(written))
}
func TestConnLogger(t *testing.T) {
protocol := "tcp"
address := ":3099"
l, err := net.Listen(protocol, address)
if err != nil {
t.Fatal(err)
}
defer l.Close()
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
logger := NewConn()
connLogger := logger.(*ConnLogger)
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, true, true, protocol, address))
assert.Equal(t, flags, connLogger.Flags)
assert.Equal(t, level, connLogger.Level)
assert.Equal(t, level, logger.GetLevel())
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
listenReadAndClose(t, l, expected)
}()
go func() {
defer wg.Done()
err := logger.LogEvent(&event)
assert.NoError(t, err)
}()
wg.Wait()
event.level = WARN
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
wg.Add(2)
go func() {
defer wg.Done()
listenReadAndClose(t, l, expected)
}()
go func() {
defer wg.Done()
err := logger.LogEvent(&event)
assert.NoError(t, err)
}()
wg.Wait()
logger.Close()
}
func TestConnLoggerBadConfig(t *testing.T) {
logger := NewConn()
err := logger.Init("{")
assert.Error(t, err)
assert.Contains(t, err.Error(), "Unable to parse JSON")
logger.Close()
}
func TestConnLoggerCloseBeforeSend(t *testing.T) {
protocol := "tcp"
address := ":3099"
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
logger := NewConn()
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
logger.Close()
}
func TestConnLoggerFailConnect(t *testing.T) {
protocol := "tcp"
address := ":3099"
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
logger := NewConn()
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
assert.Equal(t, level, logger.GetLevel())
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
// dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
err := logger.LogEvent(&event)
assert.Error(t, err)
logger.Close()
}
func TestConnLoggerClose(t *testing.T) {
protocol := "tcp"
address := ":3099"
l, err := net.Listen(protocol, address)
if err != nil {
t.Fatal(err)
}
defer l.Close()
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
logger := NewConn()
connLogger := logger.(*ConnLogger)
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
assert.Equal(t, flags, connLogger.Flags)
assert.Equal(t, level, connLogger.Level)
assert.Equal(t, level, logger.GetLevel())
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
var wg sync.WaitGroup
wg.Add(2)
go func() {
defer wg.Done()
err := logger.LogEvent(&event)
assert.NoError(t, err)
logger.Close()
}()
go func() {
defer wg.Done()
listenReadAndClose(t, l, expected)
}()
wg.Wait()
logger = NewConn()
connLogger = logger.(*ConnLogger)
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, true, protocol, address))
assert.Equal(t, flags, connLogger.Flags)
assert.Equal(t, level, connLogger.Level)
assert.Equal(t, level, logger.GetLevel())
event.level = WARN
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
wg.Add(2)
go func() {
defer wg.Done()
listenReadAndClose(t, l, expected)
}()
go func() {
defer wg.Done()
err := logger.LogEvent(&event)
assert.NoError(t, err)
logger.Close()
}()
wg.Wait()
logger.Flush()
logger.Close()
}

View file

@ -1,93 +0,0 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"io"
"os"
"code.gitea.io/gitea/modules/json"
)
// CanColorStdout reports if we can color the Stdout
// Although we could do terminal sniffing and the like - in reality
// most tools on *nix are happy to display ansi colors.
// We will terminal sniff on Windows in console_windows.go
var CanColorStdout = true
// CanColorStderr reports if we can color the Stderr
var CanColorStderr = true
type nopWriteCloser struct {
w io.WriteCloser
}
func (n *nopWriteCloser) Write(p []byte) (int, error) {
return n.w.Write(p)
}
func (n *nopWriteCloser) Close() error {
return nil
}
// ConsoleLogger implements LoggerProvider and writes messages to terminal.
type ConsoleLogger struct {
WriterLogger
Stderr bool `json:"stderr"`
}
// NewConsoleLogger create ConsoleLogger returning as LoggerProvider.
func NewConsoleLogger() LoggerProvider {
log := &ConsoleLogger{}
log.NewWriterLogger(&nopWriteCloser{
w: os.Stdout,
})
return log
}
// Init inits connection writer with json config.
// json config only need key "level".
func (log *ConsoleLogger) Init(config string) error {
err := json.Unmarshal([]byte(config), log)
if err != nil {
return fmt.Errorf("Unable to parse JSON: %w", err)
}
if log.Stderr {
log.NewWriterLogger(&nopWriteCloser{
w: os.Stderr,
})
} else {
log.NewWriterLogger(log.out)
}
return nil
}
// Flush when log should be flushed
func (log *ConsoleLogger) Flush() {
}
// ReleaseReopen causes the console logger to reconnect to os.Stdout
func (log *ConsoleLogger) ReleaseReopen() error {
if log.Stderr {
log.NewWriterLogger(&nopWriteCloser{
w: os.Stderr,
})
} else {
log.NewWriterLogger(&nopWriteCloser{
w: os.Stdout,
})
}
return nil
}
// GetName returns the default name for this implementation
func (log *ConsoleLogger) GetName() string {
return "console"
}
func init() {
Register("console", NewConsoleLogger)
}

View file

@ -1,137 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestConsoleLoggerBadConfig(t *testing.T) {
logger := NewConsoleLogger()
err := logger.Init("{")
assert.Error(t, err)
assert.Contains(t, err.Error(), "Unable to parse JSON")
logger.Close()
}
func TestConsoleLoggerMinimalConfig(t *testing.T) {
for _, level := range Levels() {
var written []byte
var closed bool
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
written = p
closed = close
},
}
prefix := ""
flags := LstdFlags
cw := NewConsoleLogger()
realCW := cw.(*ConsoleLogger)
cw.Init(fmt.Sprintf("{\"level\":\"%s\"}", level))
nwc := realCW.out.(*nopWriteCloser)
nwc.w = c
assert.Equal(t, flags, realCW.Flags)
assert.Equal(t, FromString(level), realCW.Level)
assert.Equal(t, FromString(level), cw.GetLevel())
assert.Equal(t, prefix, realCW.Prefix)
assert.Equal(t, "", string(written))
cw.Close()
assert.False(t, closed)
}
}
func TestConsoleLogger(t *testing.T) {
var written []byte
var closed bool
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
written = p
closed = close
},
}
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
cw := NewConsoleLogger()
realCW := cw.(*ConsoleLogger)
realCW.Colorize = false
nwc := realCW.out.(*nopWriteCloser)
nwc.w = c
cw.Init(fmt.Sprintf("{\"expression\":\"FILENAME\",\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d}", prefix, level.String(), flags))
assert.Equal(t, flags, realCW.Flags)
assert.Equal(t, level, realCW.Level)
assert.Equal(t, level, cw.GetLevel())
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
cw.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = DEBUG
expected = ""
cw.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
event.level = TRACE
expected = ""
cw.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
nonMatchEvent := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FI_LENAME",
line: 1,
time: date,
}
event.level = INFO
expected = ""
cw.LogEvent(&nonMatchEvent)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
event.level = WARN
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
cw.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
cw.Close()
assert.False(t, closed)
}

View file

@ -1,61 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import "fmt"
// ErrTimeout represents a "Timeout" kind of error.
type ErrTimeout struct {
Name string
Provider string
}
// IsErrTimeout checks if an error is a ErrTimeout.
func IsErrTimeout(err error) bool {
if err == nil {
return false
}
_, ok := err.(ErrTimeout)
return ok
}
func (err ErrTimeout) Error() string {
return fmt.Sprintf("Log Timeout for %s (%s)", err.Name, err.Provider)
}
// ErrUnknownProvider represents a "Unknown Provider" kind of error.
type ErrUnknownProvider struct {
Provider string
}
// IsErrUnknownProvider checks if an error is a ErrUnknownProvider.
func IsErrUnknownProvider(err error) bool {
if err == nil {
return false
}
_, ok := err.(ErrUnknownProvider)
return ok
}
func (err ErrUnknownProvider) Error() string {
return fmt.Sprintf("Unknown Log Provider \"%s\" (Was it registered?)", err.Provider)
}
// ErrDuplicateName represents a Duplicate Name error
type ErrDuplicateName struct {
Name string
}
// IsErrDuplicateName checks if an error is a ErrDuplicateName.
func IsErrDuplicateName(err error) bool {
if err == nil {
return false
}
_, ok := err.(ErrDuplicateName)
return ok
}
func (err ErrDuplicateName) Error() string {
return fmt.Sprintf("Duplicate named logger: %s", err.Name)
}

View file

@ -1,460 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"context"
"fmt"
"runtime/pprof"
"sync"
"time"
"code.gitea.io/gitea/modules/process"
)
// Event represents a logging event
type Event struct {
level Level
msg string
caller string
filename string
line int
time time.Time
stacktrace string
}
// EventLogger represents the behaviours of a logger
type EventLogger interface {
LogEvent(event *Event) error
Close()
Flush()
GetLevel() Level
GetStacktraceLevel() Level
GetName() string
ReleaseReopen() error
}
// ChannelledLog represents a cached channel to a LoggerProvider
type ChannelledLog struct {
ctx context.Context
finished context.CancelFunc
name string
provider string
queue chan *Event
loggerProvider LoggerProvider
flush chan bool
close chan bool
closed chan bool
}
// NewChannelledLog a new logger instance with given logger provider and config.
func NewChannelledLog(parent context.Context, name, provider, config string, bufferLength int64) (*ChannelledLog, error) {
if log, ok := providers[provider]; ok {
l := &ChannelledLog{
queue: make(chan *Event, bufferLength),
flush: make(chan bool),
close: make(chan bool),
closed: make(chan bool),
}
l.loggerProvider = log()
if err := l.loggerProvider.Init(config); err != nil {
return nil, err
}
l.name = name
l.provider = provider
l.ctx, _, l.finished = process.GetManager().AddTypedContext(parent, fmt.Sprintf("Logger: %s(%s)", l.name, l.provider), process.SystemProcessType, false)
go l.Start()
return l, nil
}
return nil, ErrUnknownProvider{provider}
}
// Start processing the ChannelledLog
func (l *ChannelledLog) Start() {
pprof.SetGoroutineLabels(l.ctx)
defer l.finished()
for {
select {
case event, ok := <-l.queue:
if !ok {
l.closeLogger()
return
}
l.loggerProvider.LogEvent(event) //nolint:errcheck
case _, ok := <-l.flush:
if !ok {
l.closeLogger()
return
}
l.emptyQueue()
l.loggerProvider.Flush()
case <-l.close:
l.emptyQueue()
l.closeLogger()
return
}
}
}
// LogEvent logs an event to this ChannelledLog
func (l *ChannelledLog) LogEvent(event *Event) error {
select {
case l.queue <- event:
return nil
case <-time.After(60 * time.Second):
// We're blocked!
return ErrTimeout{
Name: l.name,
Provider: l.provider,
}
}
}
func (l *ChannelledLog) emptyQueue() bool {
for {
select {
case event, ok := <-l.queue:
if !ok {
return false
}
l.loggerProvider.LogEvent(event) //nolint:errcheck
default:
return true
}
}
}
func (l *ChannelledLog) closeLogger() {
l.loggerProvider.Flush()
l.loggerProvider.Close()
l.closed <- true
}
// Close this ChannelledLog
func (l *ChannelledLog) Close() {
l.close <- true
<-l.closed
}
// Flush this ChannelledLog
func (l *ChannelledLog) Flush() {
l.flush <- true
}
// ReleaseReopen this ChannelledLog
func (l *ChannelledLog) ReleaseReopen() error {
return l.loggerProvider.ReleaseReopen()
}
// GetLevel gets the level of this ChannelledLog
func (l *ChannelledLog) GetLevel() Level {
return l.loggerProvider.GetLevel()
}
// GetStacktraceLevel gets the level of this ChannelledLog
func (l *ChannelledLog) GetStacktraceLevel() Level {
return l.loggerProvider.GetStacktraceLevel()
}
// GetName returns the name of this ChannelledLog
func (l *ChannelledLog) GetName() string {
return l.name
}
// MultiChannelledLog represents a cached channel to a LoggerProvider
type MultiChannelledLog struct {
ctx context.Context
finished context.CancelFunc
name string
bufferLength int64
queue chan *Event
rwmutex sync.RWMutex
loggers map[string]EventLogger
flush chan bool
close chan bool
started bool
level Level
stacktraceLevel Level
closed chan bool
paused chan bool
}
// NewMultiChannelledLog a new logger instance with given logger provider and config.
func NewMultiChannelledLog(name string, bufferLength int64) *MultiChannelledLog {
ctx, _, finished := process.GetManager().AddTypedContext(context.Background(), fmt.Sprintf("Logger: %s", name), process.SystemProcessType, false)
m := &MultiChannelledLog{
ctx: ctx,
finished: finished,
name: name,
queue: make(chan *Event, bufferLength),
flush: make(chan bool),
bufferLength: bufferLength,
loggers: make(map[string]EventLogger),
level: NONE,
stacktraceLevel: NONE,
close: make(chan bool),
closed: make(chan bool),
paused: make(chan bool),
}
return m
}
// AddLogger adds a logger to this MultiChannelledLog
func (m *MultiChannelledLog) AddLogger(logger EventLogger) error {
m.rwmutex.Lock()
name := logger.GetName()
if _, has := m.loggers[name]; has {
m.rwmutex.Unlock()
return ErrDuplicateName{name}
}
m.loggers[name] = logger
if logger.GetLevel() < m.level {
m.level = logger.GetLevel()
}
if logger.GetStacktraceLevel() < m.stacktraceLevel {
m.stacktraceLevel = logger.GetStacktraceLevel()
}
m.rwmutex.Unlock()
go m.Start()
return nil
}
// DelLogger removes a sub logger from this MultiChannelledLog
// NB: If you delete the last sublogger this logger will simply drop
// log events
func (m *MultiChannelledLog) DelLogger(name string) bool {
m.rwmutex.Lock()
logger, has := m.loggers[name]
if !has {
m.rwmutex.Unlock()
return false
}
delete(m.loggers, name)
m.internalResetLevel()
m.rwmutex.Unlock()
logger.Flush()
logger.Close()
return true
}
// GetEventLogger returns a sub logger from this MultiChannelledLog
func (m *MultiChannelledLog) GetEventLogger(name string) EventLogger {
m.rwmutex.RLock()
defer m.rwmutex.RUnlock()
return m.loggers[name]
}
// GetEventLoggerNames returns a list of names
func (m *MultiChannelledLog) GetEventLoggerNames() []string {
m.rwmutex.RLock()
defer m.rwmutex.RUnlock()
var keys []string
for k := range m.loggers {
keys = append(keys, k)
}
return keys
}
func (m *MultiChannelledLog) closeLoggers() {
m.rwmutex.Lock()
for _, logger := range m.loggers {
logger.Flush()
logger.Close()
}
m.rwmutex.Unlock()
m.closed <- true
}
// Pause pauses this Logger
func (m *MultiChannelledLog) Pause() {
m.paused <- true
}
// Resume resumes this Logger
func (m *MultiChannelledLog) Resume() {
m.paused <- false
}
// ReleaseReopen causes this logger to tell its subloggers to release and reopen
func (m *MultiChannelledLog) ReleaseReopen() error {
m.rwmutex.Lock()
defer m.rwmutex.Unlock()
var accumulatedErr error
for _, logger := range m.loggers {
if err := logger.ReleaseReopen(); err != nil {
if accumulatedErr == nil {
accumulatedErr = fmt.Errorf("Error whilst reopening: %s Error: %w", logger.GetName(), err)
} else {
accumulatedErr = fmt.Errorf("Error whilst reopening: %s Error: %v & %w", logger.GetName(), err, accumulatedErr)
}
}
}
return accumulatedErr
}
// Start processing the MultiChannelledLog
func (m *MultiChannelledLog) Start() {
m.rwmutex.Lock()
if m.started {
m.rwmutex.Unlock()
return
}
pprof.SetGoroutineLabels(m.ctx)
defer m.finished()
m.started = true
m.rwmutex.Unlock()
paused := false
for {
if paused {
select {
case paused = <-m.paused:
if !paused {
m.ResetLevel()
}
case _, ok := <-m.flush:
if !ok {
m.closeLoggers()
return
}
m.rwmutex.RLock()
for _, logger := range m.loggers {
logger.Flush()
}
m.rwmutex.RUnlock()
case <-m.close:
m.closeLoggers()
return
}
continue
}
select {
case paused = <-m.paused:
if paused && m.level < INFO {
m.level = INFO
}
case event, ok := <-m.queue:
if !ok {
m.closeLoggers()
return
}
m.rwmutex.RLock()
for _, logger := range m.loggers {
err := logger.LogEvent(event)
if err != nil {
fmt.Println(err) //nolint:forbidigo
}
}
m.rwmutex.RUnlock()
case _, ok := <-m.flush:
if !ok {
m.closeLoggers()
return
}
m.emptyQueue()
m.rwmutex.RLock()
for _, logger := range m.loggers {
logger.Flush()
}
m.rwmutex.RUnlock()
case <-m.close:
m.emptyQueue()
m.closeLoggers()
return
}
}
}
func (m *MultiChannelledLog) emptyQueue() bool {
for {
select {
case event, ok := <-m.queue:
if !ok {
return false
}
m.rwmutex.RLock()
for _, logger := range m.loggers {
err := logger.LogEvent(event)
if err != nil {
fmt.Println(err) //nolint:forbidigo
}
}
m.rwmutex.RUnlock()
default:
return true
}
}
}
// LogEvent logs an event to this MultiChannelledLog
func (m *MultiChannelledLog) LogEvent(event *Event) error {
select {
case m.queue <- event:
return nil
case <-time.After(100 * time.Millisecond):
// We're blocked!
return ErrTimeout{
Name: m.name,
Provider: "MultiChannelledLog",
}
}
}
// Close this MultiChannelledLog
func (m *MultiChannelledLog) Close() {
m.close <- true
<-m.closed
}
// Flush this ChannelledLog
func (m *MultiChannelledLog) Flush() {
m.flush <- true
}
// GetLevel gets the level of this MultiChannelledLog
func (m *MultiChannelledLog) GetLevel() Level {
m.rwmutex.RLock()
defer m.rwmutex.RUnlock()
return m.level
}
// GetStacktraceLevel gets the level of this MultiChannelledLog
func (m *MultiChannelledLog) GetStacktraceLevel() Level {
m.rwmutex.RLock()
defer m.rwmutex.RUnlock()
return m.stacktraceLevel
}
func (m *MultiChannelledLog) internalResetLevel() Level {
m.level = NONE
for _, logger := range m.loggers {
level := logger.GetLevel()
if level < m.level {
m.level = level
}
level = logger.GetStacktraceLevel()
if level < m.stacktraceLevel {
m.stacktraceLevel = level
}
}
return m.level
}
// ResetLevel will reset the level of this MultiChannelledLog
func (m *MultiChannelledLog) ResetLevel() Level {
m.rwmutex.Lock()
defer m.rwmutex.Unlock()
return m.internalResetLevel()
}
// GetName gets the name of this MultiChannelledLog
func (m *MultiChannelledLog) GetName() string {
return m.name
}
func (e *Event) GetMsg() string {
return e.msg
}

246
modules/log/event_format.go Normal file
View file

@ -0,0 +1,246 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"bytes"
"fmt"
"strings"
"time"
)
type Event struct {
Time time.Time
GoroutinePid string
Caller string
Filename string
Line int
Level Level
MsgSimpleText string
msgFormat string // the format and args is only valid in the caller's goroutine
msgArgs []any // they are discarded before the event is passed to the writer's channel
Stacktrace string
}
type EventFormatted struct {
Origin *Event
Msg any // the message formatted by the writer's formatter, the writer knows its type
}
type EventFormatter func(mode *WriterMode, event *Event, msgFormat string, msgArgs ...any) []byte
type logStringFormatter struct {
v LogStringer
}
var _ fmt.Formatter = logStringFormatter{}
func (l logStringFormatter) Format(f fmt.State, verb rune) {
if f.Flag('#') && verb == 'v' {
_, _ = fmt.Fprintf(f, "%#v", l.v)
return
}
_, _ = f.Write([]byte(l.v.LogString()))
}
// Copy of cheap integer to fixed-width decimal to ascii from logger.
// TODO: legacy bugs: doesn't support negative number, overflow if wid it too large.
func itoa(buf []byte, i, wid int) []byte {
var s [20]byte
bp := len(s) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
s[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
s[bp] = byte('0' + i)
return append(buf, s[bp:]...)
}
func colorSprintf(colorize bool, format string, args ...any) string {
hasColorValue := false
for _, v := range args {
if _, hasColorValue = v.(*ColoredValue); hasColorValue {
break
}
}
if colorize || !hasColorValue {
return fmt.Sprintf(format, args...)
}
noColors := make([]any, len(args))
copy(noColors, args)
for i, v := range args {
if cv, ok := v.(*ColoredValue); ok {
noColors[i] = cv.v
}
}
return fmt.Sprintf(format, noColors...)
}
// EventFormatTextMessage makes the log message for a writer with its mode. This function is a copy of the original package
func EventFormatTextMessage(mode *WriterMode, event *Event, msgFormat string, msgArgs ...any) []byte {
buf := make([]byte, 0, 1024)
buf = append(buf, mode.Prefix...)
t := event.Time
flags := mode.Flags.Bits()
if flags&(Ldate|Ltime|Lmicroseconds) != 0 {
if mode.Colorize {
buf = append(buf, fgCyanBytes...)
}
if flags&LUTC != 0 {
t = t.UTC()
}
if flags&Ldate != 0 {
year, month, day := t.Date()
buf = itoa(buf, year, 4)
buf = append(buf, '/')
buf = itoa(buf, int(month), 2)
buf = append(buf, '/')
buf = itoa(buf, day, 2)
buf = append(buf, ' ')
}
if flags&(Ltime|Lmicroseconds) != 0 {
hour, min, sec := t.Clock()
buf = itoa(buf, hour, 2)
buf = append(buf, ':')
buf = itoa(buf, min, 2)
buf = append(buf, ':')
buf = itoa(buf, sec, 2)
if flags&Lmicroseconds != 0 {
buf = append(buf, '.')
buf = itoa(buf, t.Nanosecond()/1e3, 6)
}
buf = append(buf, ' ')
}
if mode.Colorize {
buf = append(buf, resetBytes...)
}
}
if flags&(Lshortfile|Llongfile) != 0 {
if mode.Colorize {
buf = append(buf, fgGreenBytes...)
}
file := event.Filename
if flags&Lmedfile == Lmedfile {
startIndex := len(file) - 20
if startIndex > 0 {
file = "..." + file[startIndex:]
}
} else if flags&Lshortfile != 0 {
startIndex := strings.LastIndexByte(file, '/')
if startIndex > 0 && startIndex < len(file) {
file = file[startIndex+1:]
}
}
buf = append(buf, file...)
buf = append(buf, ':')
buf = itoa(buf, event.Line, -1)
if flags&(Lfuncname|Lshortfuncname) != 0 {
buf = append(buf, ':')
} else {
if mode.Colorize {
buf = append(buf, resetBytes...)
}
buf = append(buf, ' ')
}
}
if flags&(Lfuncname|Lshortfuncname) != 0 {
if mode.Colorize {
buf = append(buf, fgGreenBytes...)
}
funcname := event.Caller
if flags&Lshortfuncname != 0 {
lastIndex := strings.LastIndexByte(funcname, '.')
if lastIndex > 0 && len(funcname) > lastIndex+1 {
funcname = funcname[lastIndex+1:]
}
}
buf = append(buf, funcname...)
if mode.Colorize {
buf = append(buf, resetBytes...)
}
buf = append(buf, ' ')
}
if flags&(Llevel|Llevelinitial) != 0 {
level := strings.ToUpper(event.Level.String())
if mode.Colorize {
buf = append(buf, ColorBytes(levelToColor[event.Level]...)...)
}
buf = append(buf, '[')
if flags&Llevelinitial != 0 {
buf = append(buf, level[0])
} else {
buf = append(buf, level...)
}
buf = append(buf, ']')
if mode.Colorize {
buf = append(buf, resetBytes...)
}
buf = append(buf, ' ')
}
var msg []byte
// if the log needs colorizing, do it
if mode.Colorize && len(msgArgs) > 0 {
hasColorValue := false
for _, v := range msgArgs {
if _, hasColorValue = v.(*ColoredValue); hasColorValue {
break
}
}
if hasColorValue {
msg = []byte(fmt.Sprintf(msgFormat, msgArgs...))
}
}
// try to re-use the pre-formatted simple text message
if len(msg) == 0 {
msg = []byte(event.MsgSimpleText)
}
// if still no message, do the normal Sprintf for the message
if len(msg) == 0 {
msg = []byte(colorSprintf(mode.Colorize, msgFormat, msgArgs...))
}
// remove at most one trailing new line
if len(msg) > 0 && msg[len(msg)-1] == '\n' {
msg = msg[:len(msg)-1]
}
if flags&Lgopid == Lgopid {
if event.GoroutinePid != "" {
buf = append(buf, '[')
if mode.Colorize {
buf = append(buf, ColorBytes(FgHiYellow)...)
}
buf = append(buf, event.GoroutinePid...)
if mode.Colorize {
buf = append(buf, resetBytes...)
}
buf = append(buf, ']', ' ')
}
}
buf = append(buf, msg...)
if event.Stacktrace != "" && mode.StacktraceLevel <= event.Level {
lines := bytes.Split([]byte(event.Stacktrace), []byte("\n"))
for _, line := range lines {
buf = append(buf, "\n\t"...)
buf = append(buf, line...)
}
buf = append(buf, '\n')
}
buf = append(buf, '\n')
return buf
}

View file

@ -0,0 +1,57 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestItoa(t *testing.T) {
b := itoa(nil, 0, 0)
assert.Equal(t, "0", string(b))
b = itoa(nil, 0, 1)
assert.Equal(t, "0", string(b))
b = itoa(nil, 0, 2)
assert.Equal(t, "00", string(b))
}
func TestEventFormatTextMessage(t *testing.T) {
res := EventFormatTextMessage(&WriterMode{Prefix: "[PREFIX] ", Colorize: false, Flags: Flags{defined: true, flags: 0xffffffff}},
&Event{
Time: time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC),
Caller: "caller",
Filename: "filename",
Line: 123,
GoroutinePid: "pid",
Level: ERROR,
Stacktrace: "stacktrace",
},
"msg format: %v %v", "arg0", NewColoredValue("arg1", FgBlue),
)
assert.Equal(t, `[PREFIX] 2020/01/02 03:04:05.000000 filename:123:caller [E] [pid] msg format: arg0 arg1
stacktrace
`, string(res))
res = EventFormatTextMessage(&WriterMode{Prefix: "[PREFIX] ", Colorize: true, Flags: Flags{defined: true, flags: 0xffffffff}},
&Event{
Time: time.Date(2020, 1, 2, 3, 4, 5, 6, time.UTC),
Caller: "caller",
Filename: "filename",
Line: 123,
GoroutinePid: "pid",
Level: ERROR,
Stacktrace: "stacktrace",
},
"msg format: %v %v", "arg0", NewColoredValue("arg1", FgBlue),
)
assert.Equal(t, "[PREFIX] \x1b[36m2020/01/02 03:04:05.000000 \x1b[0m\x1b[32mfilename:123:\x1b[32mcaller\x1b[0m \x1b[1;31m[E]\x1b[0m [\x1b[93mpid\x1b[0m] msg format: arg0 \x1b[34marg1\x1b[0m\n\tstacktrace\n\n", string(res))
}

View file

@ -0,0 +1,54 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
)
// EventWriter is the general interface for all event writers
// EventWriterBase is only used as its base interface
// A writer implementation could override the default EventWriterBase functions
// eg: a writer can override the Run to handle events in its own way with its own goroutine
type EventWriter interface {
EventWriterBase
}
// WriterMode is the mode for creating a new EventWriter, it contains common options for all writers
// Its WriterOption field is the specified options for a writer, it should be passed by value but not by pointer
type WriterMode struct {
BufferLen int
Level Level
Prefix string
Colorize bool
Flags Flags
Expression string
StacktraceLevel Level
WriterOption any
}
// EventWriterProvider is the function for creating a new EventWriter
type EventWriterProvider func(writerName string, writerMode WriterMode) EventWriter
var eventWriterProviders = map[string]EventWriterProvider{}
func RegisterEventWriter(writerType string, p EventWriterProvider) {
eventWriterProviders[writerType] = p
}
func HasEventWriter(writerType string) bool {
_, ok := eventWriterProviders[writerType]
return ok
}
func NewEventWriter(name, writerType string, mode WriterMode) (EventWriter, error) {
if p, ok := eventWriterProviders[writerType]; ok {
return p(name, mode), nil
}
return nil, fmt.Errorf("unknown event writer type %q for writer %q", writerType, name)
}

View file

@ -0,0 +1,160 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"context"
"fmt"
"io"
"regexp"
"time"
)
// EventWriterBase is the base interface for most event writers
// It provides default implementations for most methods
type EventWriterBase interface {
Base() *EventWriterBaseImpl
GetWriterType() string
GetWriterName() string
GetLevel() Level
Run(ctx context.Context)
}
type EventWriterBaseImpl struct {
writerType string
Name string
Mode *WriterMode
Queue chan *EventFormatted
FormatMessage EventFormatter // format the Event to a message and write it to output
OutputWriteCloser io.WriteCloser // it will be closed when the event writer is stopped
GetPauseChan func() chan struct{}
shared bool
stopped chan struct{}
}
var _ EventWriterBase = (*EventWriterBaseImpl)(nil)
func (b *EventWriterBaseImpl) Base() *EventWriterBaseImpl {
return b
}
func (b *EventWriterBaseImpl) GetWriterType() string {
return b.writerType
}
func (b *EventWriterBaseImpl) GetWriterName() string {
return b.Name
}
func (b *EventWriterBaseImpl) GetLevel() Level {
return b.Mode.Level
}
// Run is the default implementation for EventWriter.Run
func (b *EventWriterBaseImpl) Run(ctx context.Context) {
defer b.OutputWriteCloser.Close()
var exprRegexp *regexp.Regexp
if b.Mode.Expression != "" {
var err error
if exprRegexp, err = regexp.Compile(b.Mode.Expression); err != nil {
FallbackErrorf("unable to compile expression %q for writer %q: %v", b.Mode.Expression, b.Name, err)
}
}
for {
if b.GetPauseChan != nil {
pause := b.GetPauseChan()
if pause != nil {
select {
case <-pause:
case <-ctx.Done():
return
}
}
}
select {
case <-ctx.Done():
return
case event, ok := <-b.Queue:
if !ok {
return
}
if exprRegexp != nil {
fileLineCaller := fmt.Sprintf("%s:%d:%s", event.Origin.Filename, event.Origin.Line, event.Origin.Caller)
matched := exprRegexp.Match([]byte(fileLineCaller)) || exprRegexp.Match([]byte(event.Origin.MsgSimpleText))
if !matched {
continue
}
}
var err error
switch msg := event.Msg.(type) {
case string:
_, err = b.OutputWriteCloser.Write([]byte(msg))
case []byte:
_, err = b.OutputWriteCloser.Write(msg)
case io.WriterTo:
_, err = msg.WriteTo(b.OutputWriteCloser)
default:
_, err = b.OutputWriteCloser.Write([]byte(fmt.Sprint(msg)))
}
if err != nil {
FallbackErrorf("unable to write log message of %q (%v): %v", b.Name, err, event.Msg)
}
}
}
}
func NewEventWriterBase(name, writerType string, mode WriterMode) *EventWriterBaseImpl {
if mode.BufferLen == 0 {
mode.BufferLen = 1000
}
if mode.Level == UNDEFINED {
mode.Level = INFO
}
if mode.StacktraceLevel == UNDEFINED {
mode.StacktraceLevel = NONE
}
b := &EventWriterBaseImpl{
writerType: writerType,
Name: name,
Mode: &mode,
Queue: make(chan *EventFormatted, mode.BufferLen),
GetPauseChan: GetManager().GetPauseChan, // by default, use the global pause channel
FormatMessage: EventFormatTextMessage,
}
return b
}
// eventWriterStartGo use "go" to start an event worker's Run method
func eventWriterStartGo(ctx context.Context, w EventWriter, shared bool) {
if w.Base().stopped != nil {
return // already started
}
w.Base().shared = shared
w.Base().stopped = make(chan struct{})
go func() {
defer close(w.Base().stopped)
w.Run(ctx)
}()
}
// eventWriterStopWait stops an event writer and waits for it to finish flushing (with a timeout)
func eventWriterStopWait(w EventWriter) {
close(w.Base().Queue)
select {
case <-w.Base().stopped:
case <-time.After(2 * time.Second):
FallbackErrorf("unable to stop log writer %q in time, skip", w.GetWriterName())
}
}

View file

@ -0,0 +1,111 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"io"
"net"
)
type WriterConnOption struct {
Addr string
Protocol string
Reconnect bool
ReconnectOnMsg bool
}
type eventWriterConn struct {
*EventWriterBaseImpl
connWriter connWriter
}
var _ EventWriter = (*eventWriterConn)(nil)
func NewEventWriterConn(writerName string, writerMode WriterMode) EventWriter {
w := &eventWriterConn{EventWriterBaseImpl: NewEventWriterBase(writerName, "conn", writerMode)}
opt := writerMode.WriterOption.(WriterConnOption)
w.connWriter = connWriter{
ReconnectOnMsg: opt.ReconnectOnMsg,
Reconnect: opt.Reconnect,
Net: opt.Protocol,
Addr: opt.Addr,
}
w.OutputWriteCloser = &w.connWriter
return w
}
func init() {
RegisterEventWriter("conn", NewEventWriterConn)
}
// below is copied from old code
type connWriter struct {
innerWriter io.WriteCloser
ReconnectOnMsg bool
Reconnect bool
Net string `json:"net"`
Addr string `json:"addr"`
}
var _ io.WriteCloser = (*connWriter)(nil)
// Close the inner writer
func (i *connWriter) Close() error {
if i.innerWriter != nil {
return i.innerWriter.Close()
}
return nil
}
// Write the data to the connection
func (i *connWriter) Write(p []byte) (int, error) {
if i.neededConnectOnMsg() {
if err := i.connect(); err != nil {
return 0, err
}
}
if i.ReconnectOnMsg {
defer i.innerWriter.Close()
}
return i.innerWriter.Write(p)
}
func (i *connWriter) neededConnectOnMsg() bool {
if i.Reconnect {
i.Reconnect = false
return true
}
if i.innerWriter == nil {
return true
}
return i.ReconnectOnMsg
}
func (i *connWriter) connect() error {
if i.innerWriter != nil {
_ = i.innerWriter.Close()
i.innerWriter = nil
}
conn, err := net.Dial(i.Net, i.Addr)
if err != nil {
return err
}
if tcpConn, ok := conn.(*net.TCPConn); ok {
err = tcpConn.SetKeepAlive(true)
if err != nil {
return err
}
}
i.innerWriter = conn
return nil
}

View file

@ -0,0 +1,75 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"context"
"fmt"
"io"
"net"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func listenReadAndClose(t *testing.T, l net.Listener, expected string) {
conn, err := l.Accept()
assert.NoError(t, err)
defer conn.Close()
written, err := io.ReadAll(conn)
assert.NoError(t, err)
assert.Equal(t, expected, string(written))
}
func TestConnLogger(t *testing.T) {
protocol := "tcp"
address := ":3099"
l, err := net.Listen(protocol, address)
if err != nil {
t.Fatal(err)
}
defer l.Close()
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
logger := NewLoggerWithWriters(context.Background(), NewEventWriterConn("test-conn", WriterMode{
Level: level,
Prefix: prefix,
Flags: FlagsFromBits(flags),
WriterOption: WriterConnOption{Addr: address, Protocol: protocol, Reconnect: true, ReconnectOnMsg: true},
}))
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
Level: INFO,
MsgSimpleText: "TEST MSG",
Caller: "CALLER",
Filename: "FULL/FILENAME",
Line: 1,
Time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.Filename, event.Line, event.Caller, strings.ToUpper(event.Level.String())[0], event.MsgSimpleText)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
listenReadAndClose(t, l, expected)
}()
logger.SendLogEvent(&event)
wg.Wait()
logger.Close()
}

View file

@ -0,0 +1,40 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"io"
"os"
)
type WriterConsoleOption struct {
Stderr bool
}
type eventWriterConsole struct {
*EventWriterBaseImpl
}
var _ EventWriter = (*eventWriterConsole)(nil)
type nopCloser struct {
io.Writer
}
func (nopCloser) Close() error { return nil }
func NewEventWriterConsole(name string, mode WriterMode) EventWriter {
w := &eventWriterConsole{EventWriterBaseImpl: NewEventWriterBase(name, "console", mode)}
opt := mode.WriterOption.(WriterConsoleOption)
if opt.Stderr {
w.OutputWriteCloser = nopCloser{os.Stderr}
} else {
w.OutputWriteCloser = nopCloser{os.Stdout}
}
return w
}
func init() {
RegisterEventWriter("console", NewEventWriterConsole)
}

View file

@ -0,0 +1,48 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"code.gitea.io/gitea/modules/util/rotatingfilewriter"
)
type WriterFileOption struct {
FileName string
MaxSize int64
LogRotate bool
DailyRotate bool
MaxDays int
Compress bool
CompressionLevel int
}
type eventWriterFile struct {
*EventWriterBaseImpl
fileWriter *rotatingfilewriter.RotatingFileWriter
}
var _ EventWriter = (*eventWriterFile)(nil)
func NewEventWriterFile(name string, mode WriterMode) EventWriter {
w := &eventWriterFile{EventWriterBaseImpl: NewEventWriterBase(name, "file", mode)}
opt := mode.WriterOption.(WriterFileOption)
var err error
w.fileWriter, err = rotatingfilewriter.Open(opt.FileName, &rotatingfilewriter.Options{
Rotate: opt.LogRotate,
MaximumSize: opt.MaxSize,
RotateDaily: opt.DailyRotate,
KeepDays: opt.MaxDays,
Compress: opt.Compress,
CompressionLevel: opt.CompressionLevel,
})
if err != nil {
FallbackErrorf("unable to open log file %q: %v", opt.FileName, err)
}
w.OutputWriteCloser = w.fileWriter
return w
}
func init() {
RegisterEventWriter("file", NewEventWriterFile)
}

View file

@ -1,283 +0,0 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"bufio"
"compress/gzip"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/util"
)
// FileLogger implements LoggerProvider.
// It writes messages by lines limit, file size limit, or time frequency.
type FileLogger struct {
WriterLogger
mw *MuxWriter
// The opened file
Filename string `json:"filename"`
// Rotate at size
Maxsize int `json:"maxsize"`
maxsizeCursize int
// Rotate daily
Daily bool `json:"daily"`
Maxdays int64 `json:"maxdays"`
dailyOpenDate int
Rotate bool `json:"rotate"`
Compress bool `json:"compress"`
CompressionLevel int `json:"compressionLevel"`
startLock sync.Mutex // Only one log can write to the file
}
// MuxWriter an *os.File writer with locker.
type MuxWriter struct {
mu sync.Mutex
fd *os.File
owner *FileLogger
}
// Write writes to os.File.
func (mw *MuxWriter) Write(b []byte) (int, error) {
mw.mu.Lock()
defer mw.mu.Unlock()
mw.owner.docheck(len(b))
return mw.fd.Write(b)
}
// Close the internal writer
func (mw *MuxWriter) Close() error {
return mw.fd.Close()
}
// SetFd sets os.File in writer.
func (mw *MuxWriter) SetFd(fd *os.File) {
if mw.fd != nil {
mw.fd.Close()
}
mw.fd = fd
}
// NewFileLogger create a FileLogger returning as LoggerProvider.
func NewFileLogger() LoggerProvider {
log := &FileLogger{
Filename: "",
Maxsize: 1 << 28, // 256 MB
Daily: true,
Maxdays: 7,
Rotate: true,
Compress: true,
CompressionLevel: gzip.DefaultCompression,
}
log.Level = TRACE
// use MuxWriter instead direct use os.File for lock write when rotate
log.mw = new(MuxWriter)
log.mw.owner = log
return log
}
// Init file logger with json config.
// config like:
//
// {
// "filename":"log/gogs.log",
// "maxsize":1<<30,
// "daily":true,
// "maxdays":15,
// "rotate":true
// }
func (log *FileLogger) Init(config string) error {
if err := json.Unmarshal([]byte(config), log); err != nil {
return fmt.Errorf("Unable to parse JSON: %w", err)
}
if len(log.Filename) == 0 {
return errors.New("config must have filename")
}
// set MuxWriter as Logger's io.Writer
log.NewWriterLogger(log.mw)
return log.StartLogger()
}
// StartLogger start file logger. create log file and set to locker-inside file writer.
func (log *FileLogger) StartLogger() error {
fd, err := log.createLogFile()
if err != nil {
return err
}
log.mw.SetFd(fd)
return log.initFd()
}
func (log *FileLogger) docheck(size int) {
log.startLock.Lock()
defer log.startLock.Unlock()
if log.Rotate && ((log.Maxsize > 0 && log.maxsizeCursize >= log.Maxsize) ||
(log.Daily && time.Now().Day() != log.dailyOpenDate)) {
if err := log.DoRotate(); err != nil {
fmt.Fprintf(os.Stderr, "FileLogger(%q): %s\n", log.Filename, err)
return
}
}
log.maxsizeCursize += size
}
func (log *FileLogger) createLogFile() (*os.File, error) {
// Open the log file
return os.OpenFile(log.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o660)
}
func (log *FileLogger) initFd() error {
fd := log.mw.fd
finfo, err := fd.Stat()
if err != nil {
return fmt.Errorf("get stat: %w", err)
}
log.maxsizeCursize = int(finfo.Size())
log.dailyOpenDate = time.Now().Day()
return nil
}
// DoRotate means it need to write file in new file.
// new file name like xx.log.2013-01-01.2
func (log *FileLogger) DoRotate() error {
_, err := os.Lstat(log.Filename)
if err == nil { // file exists
// Find the next available number
num := 1
fname := ""
for ; err == nil && num <= 999; num++ {
fname = log.Filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num)
_, err = os.Lstat(fname)
if log.Compress && err != nil {
_, err = os.Lstat(fname + ".gz")
}
}
// return error if the last file checked still existed
if err == nil {
return fmt.Errorf("rotate: cannot find free log number to rename %s", log.Filename)
}
fd := log.mw.fd
fd.Close()
// close fd before rename
// Rename the file to its newfound home
if err = util.Rename(log.Filename, fname); err != nil {
return fmt.Errorf("Rotate: %w", err)
}
if log.Compress {
go compressOldLogFile(fname, log.CompressionLevel) //nolint:errcheck
}
// re-start logger
if err = log.StartLogger(); err != nil {
return fmt.Errorf("Rotate StartLogger: %w", err)
}
go log.deleteOldLog()
}
return nil
}
func compressOldLogFile(fname string, compressionLevel int) error {
reader, err := os.Open(fname)
if err != nil {
return err
}
defer reader.Close()
buffer := bufio.NewReader(reader)
fw, err := os.OpenFile(fname+".gz", os.O_WRONLY|os.O_CREATE, 0o660)
if err != nil {
return err
}
defer fw.Close()
zw, err := gzip.NewWriterLevel(fw, compressionLevel)
if err != nil {
return err
}
defer zw.Close()
_, err = buffer.WriteTo(zw)
if err != nil {
zw.Close()
fw.Close()
util.Remove(fname + ".gz") //nolint:errcheck
return err
}
reader.Close()
return util.Remove(fname)
}
func (log *FileLogger) deleteOldLog() {
dir := filepath.Dir(log.Filename)
_ = filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) (returnErr error) {
defer func() {
if r := recover(); r != nil {
returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r)
}
}()
if err != nil {
return err
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if info.ModTime().Unix() < (time.Now().Unix() - 60*60*24*log.Maxdays) {
if strings.HasPrefix(filepath.Base(path), filepath.Base(log.Filename)) {
if err := util.Remove(path); err != nil {
returnErr = fmt.Errorf("Failed to remove %s: %w", path, err)
}
}
}
return returnErr
})
}
// Flush flush file logger.
// there are no buffering messages in file logger in memory.
// flush file means sync file from disk.
func (log *FileLogger) Flush() {
_ = log.mw.fd.Sync()
}
// ReleaseReopen releases and reopens log files
func (log *FileLogger) ReleaseReopen() error {
closingErr := log.mw.fd.Close()
startingErr := log.StartLogger()
if startingErr != nil {
if closingErr != nil {
return fmt.Errorf("Error during closing: %v Error during starting: %v", closingErr, startingErr)
}
return startingErr
}
return closingErr
}
// GetName returns the default name for this implementation
func (log *FileLogger) GetName() string {
return "file"
}
func init() {
Register("file", NewFileLogger)
}

View file

@ -1,235 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"compress/gzip"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestFileLoggerFails(t *testing.T) {
tmpDir := t.TempDir()
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
// filename := filepath.Join(tmpDir, "test.log")
fileLogger := NewFileLogger()
// realFileLogger, ok := fileLogger.(*FileLogger)
// assert.True(t, ok)
// Fail if there is bad json
err := fileLogger.Init("{")
assert.Error(t, err)
// Fail if there is no filename
err = fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\"}", prefix, level.String(), flags, ""))
assert.Error(t, err)
// Fail if the file isn't a filename
err = fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\"}", prefix, level.String(), flags, filepath.ToSlash(tmpDir)))
assert.Error(t, err)
}
func TestFileLogger(t *testing.T) {
tmpDir := t.TempDir()
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
filename := filepath.Join(tmpDir, "test.log")
fileLogger := NewFileLogger()
realFileLogger, ok := fileLogger.(*FileLogger)
assert.True(t, ok)
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\",\"maxsize\":%d,\"compress\":false}", prefix, level.String(), flags, filepath.ToSlash(filename), len(expected)*2))
assert.Equal(t, flags, realFileLogger.Flags)
assert.Equal(t, level, realFileLogger.Level)
assert.Equal(t, level, fileLogger.GetLevel())
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err := os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
event.level = DEBUG
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
event.level = TRACE
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
event.level = WARN
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
// Should rotate
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1))
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
assert.Equal(t, expected, string(logData))
for num := 2; num <= 999; num++ {
file, err := os.OpenFile(filename+fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num), os.O_RDONLY|os.O_CREATE, 0o666)
assert.NoError(t, err)
file.Close()
}
err = realFileLogger.DoRotate()
assert.Error(t, err)
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
// Should fail to rotate
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
fileLogger.Close()
}
func TestCompressFileLogger(t *testing.T) {
tmpDir := t.TempDir()
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
filename := filepath.Join(tmpDir, "test.log")
fileLogger := NewFileLogger()
realFileLogger, ok := fileLogger.(*FileLogger)
assert.True(t, ok)
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\",\"maxsize\":%d,\"compress\":true}", prefix, level.String(), flags, filepath.ToSlash(filename), len(expected)*2))
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err := os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
event.level = WARN
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
fileLogger.LogEvent(&event)
fileLogger.Flush()
logData, err = os.ReadFile(filename)
assert.NoError(t, err)
assert.Equal(t, expected, string(logData))
// Should rotate
fileLogger.LogEvent(&event)
fileLogger.Flush()
for num := 2; num <= 999; num++ {
file, err := os.OpenFile(filename+fmt.Sprintf(".%s.%03d.gz", time.Now().Format("2006-01-02"), num), os.O_RDONLY|os.O_CREATE, 0o666)
assert.NoError(t, err)
file.Close()
}
err = realFileLogger.DoRotate()
assert.Error(t, err)
}
func TestCompressOldFile(t *testing.T) {
tmpDir := t.TempDir()
fname := filepath.Join(tmpDir, "test")
nonGzip := filepath.Join(tmpDir, "test-nonGzip")
f, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY, 0o660)
assert.NoError(t, err)
ng, err := os.OpenFile(nonGzip, os.O_CREATE|os.O_WRONLY, 0o660)
assert.NoError(t, err)
for i := 0; i < 999; i++ {
f.WriteString("This is a test file\n")
ng.WriteString("This is a test file\n")
}
f.Close()
ng.Close()
err = compressOldLogFile(fname, -1)
assert.NoError(t, err)
_, err = os.Lstat(fname + ".gz")
assert.NoError(t, err)
f, err = os.Open(fname + ".gz")
assert.NoError(t, err)
zr, err := gzip.NewReader(f)
assert.NoError(t, err)
data, err := io.ReadAll(zr)
assert.NoError(t, err)
original, err := os.ReadFile(nonGzip)
assert.NoError(t, err)
assert.Equal(t, original, data)
}

View file

@ -1,9 +1,14 @@
// Copyright 2019 The Gitea Authors. All rights reserved. // Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
package log package log
import "strings" import (
"sort"
"strings"
"code.gitea.io/gitea/modules/json"
)
// These flags define which text to prefix to each log entry generated // These flags define which text to prefix to each log entry generated
// by the Logger. Bits are or'ed together to control what's printed. // by the Logger. Bits are or'ed together to control what's printed.
@ -15,7 +20,7 @@ import "strings"
// The standard is: // The standard is:
// 2009/01/23 01:23:23 ...a/logger/c/d.go:23:runtime.Caller() [I]: message // 2009/01/23 01:23:23 ...a/logger/c/d.go:23:runtime.Caller() [I]: message
const ( const (
Ldate = 1 << iota // the date in the local time zone: 2009/01/23 Ldate uint32 = 1 << iota // the date in the local time zone: 2009/01/23
Ltime // the time in the local time zone: 01:23:23 Ltime // the time in the local time zone: 01:23:23
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime. Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
Llongfile // full file name and line number: /a/logger/c/d.go:23 Llongfile // full file name and line number: /a/logger/c/d.go:23
@ -23,18 +28,22 @@ const (
Lfuncname // function name of the caller: runtime.Caller() Lfuncname // function name of the caller: runtime.Caller()
Lshortfuncname // last part of the function name Lshortfuncname // last part of the function name
LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
Llevelinitial // Initial character of the provided level in brackets eg. [I] for info Llevelinitial // Initial character of the provided level in brackets, eg. [I] for info
Llevel // Provided level in brackets [INFO] Llevel // Provided level in brackets [INFO]
Lgopid // the Goroutine-PID of the context
// Last 20 characters of the filename Lmedfile = Lshortfile | Llongfile // last 20 characters of the filename
Lmedfile = Lshortfile | Llongfile LstdFlags = Ldate | Ltime | Lmedfile | Lshortfuncname | Llevelinitial // default
// LstdFlags is the initial value for the standard logger
LstdFlags = Ldate | Ltime | Lmedfile | Lshortfuncname | Llevelinitial
) )
var flagFromString = map[string]int{ const Ldefault = LstdFlags
"none": 0,
type Flags struct {
defined bool
flags uint32
}
var flagFromString = map[string]uint32{
"date": Ldate, "date": Ldate,
"time": Ltime, "time": Ltime,
"microseconds": Lmicroseconds, "microseconds": Lmicroseconds,
@ -45,22 +54,81 @@ var flagFromString = map[string]int{
"utc": LUTC, "utc": LUTC,
"levelinitial": Llevelinitial, "levelinitial": Llevelinitial,
"level": Llevel, "level": Llevel,
"gopid": Lgopid,
"medfile": Lmedfile, "medfile": Lmedfile,
"stdflags": LstdFlags, "stdflags": LstdFlags,
} }
// FlagsFromString takes a comma separated list of flags and returns var flagComboToString = []struct {
// the flags for this string flag uint32
func FlagsFromString(from string) int { name string
flags := 0 }{
for _, flag := range strings.Split(strings.ToLower(from), ",") { // name with more bits comes first
f, ok := flagFromString[strings.TrimSpace(flag)] {LstdFlags, "stdflags"},
if ok { {Lmedfile, "medfile"},
flags |= f
} {Ldate, "date"},
} {Ltime, "time"},
if flags == 0 { {Lmicroseconds, "microseconds"},
return -1 {Llongfile, "longfile"},
} {Lshortfile, "shortfile"},
return flags {Lfuncname, "funcname"},
{Lshortfuncname, "shortfuncname"},
{LUTC, "utc"},
{Llevelinitial, "levelinitial"},
{Llevel, "level"},
{Lgopid, "gopid"},
}
func (f Flags) Bits() uint32 {
if !f.defined {
return Ldefault
}
return f.flags
}
func (f Flags) String() string {
flags := f.Bits()
var flagNames []string
for _, it := range flagComboToString {
if flags&it.flag == it.flag {
flags &^= it.flag
flagNames = append(flagNames, it.name)
}
}
if len(flagNames) == 0 {
return "none"
}
sort.Strings(flagNames)
return strings.Join(flagNames, ",")
}
func (f *Flags) UnmarshalJSON(bytes []byte) error {
var s string
if err := json.Unmarshal(bytes, &s); err != nil {
return err
}
*f = FlagsFromString(s)
return nil
}
func (f Flags) MarshalJSON() ([]byte, error) {
return []byte(`"` + f.String() + `"`), nil
}
func FlagsFromString(from string, def ...uint32) Flags {
from = strings.TrimSpace(from)
if from == "" && len(def) > 0 {
return Flags{defined: true, flags: def[0]}
}
flags := uint32(0)
for _, flag := range strings.Split(strings.ToLower(from), ",") {
flags |= flagFromString[strings.TrimSpace(flag)]
}
return Flags{defined: true, flags: flags}
}
func FlagsFromBits(flags uint32) Flags {
return Flags{defined: true, flags: flags}
} }

30
modules/log/flags_test.go Normal file
View file

@ -0,0 +1,30 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"testing"
"code.gitea.io/gitea/modules/json"
"github.com/stretchr/testify/assert"
)
func TestFlags(t *testing.T) {
assert.EqualValues(t, Ldefault, Flags{}.Bits())
assert.EqualValues(t, 0, FlagsFromString("").Bits())
assert.EqualValues(t, Lgopid, FlagsFromString("", Lgopid).Bits())
assert.EqualValues(t, 0, FlagsFromString("none", Lgopid).Bits())
assert.EqualValues(t, Ldate|Ltime, FlagsFromString("date,time", Lgopid).Bits())
assert.EqualValues(t, "stdflags", FlagsFromString("stdflags").String())
assert.EqualValues(t, "medfile", FlagsFromString("medfile").String())
bs, err := json.Marshal(FlagsFromString("utc,level"))
assert.NoError(t, err)
assert.EqualValues(t, `"level,utc"`, string(bs))
var flags Flags
assert.NoError(t, json.Unmarshal(bs, &flags))
assert.EqualValues(t, LUTC|Llevel, flags.Bits())
}

35
modules/log/init.go Normal file
View file

@ -0,0 +1,35 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"runtime"
"strings"
"code.gitea.io/gitea/modules/process"
"code.gitea.io/gitea/modules/util/rotatingfilewriter"
)
var projectPackagePrefix string
func init() {
_, filename, _, _ := runtime.Caller(0)
projectPackagePrefix = strings.TrimSuffix(filename, "modules/log/init.go")
if projectPackagePrefix == filename {
// in case the source code file is moved, we can not trim the suffix, the code above should also be updated.
panic("unable to detect correct package prefix, please update file: " + filename)
}
rotatingfilewriter.ErrorPrintf = FallbackErrorf
process.Trace = func(start bool, pid process.IDType, description string, parentPID process.IDType, typ string) {
if start && parentPID != "" {
Log(1, TRACE, "Start %s: %s (from %s) (%s)", NewColoredValue(pid, FgHiYellow), description, NewColoredValue(parentPID, FgYellow), NewColoredValue(typ, Reset))
} else if start {
Log(1, TRACE, "Start %s: %s (%s)", NewColoredValue(pid, FgHiYellow), description, NewColoredValue(typ, Reset))
} else {
Log(1, TRACE, "Done %s: %s", NewColoredValue(pid, FgHiYellow), NewColoredValue(description, Reset))
}
}
}

View file

@ -5,8 +5,6 @@ package log
import ( import (
"bytes" "bytes"
"fmt"
"os"
"strings" "strings"
"code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/json"
@ -16,53 +14,53 @@ import (
type Level int type Level int
const ( const (
// TRACE represents the lowest log level UNDEFINED Level = iota
TRACE Level = iota TRACE
// DEBUG is for debug logging
DEBUG DEBUG
// INFO is for information
INFO INFO
// WARN is for warning information
WARN WARN
// ERROR is for error reporting
ERROR ERROR
// CRITICAL is for critical errors
CRITICAL
// FATAL is for fatal errors
FATAL FATAL
// NONE is for no logging
NONE NONE
) )
const CRITICAL = ERROR // most logger frameworks doesn't support CRITICAL, and it doesn't seem useful
var toString = map[Level]string{ var toString = map[Level]string{
UNDEFINED: "undefined",
TRACE: "trace", TRACE: "trace",
DEBUG: "debug", DEBUG: "debug",
INFO: "info", INFO: "info",
WARN: "warn", WARN: "warn",
ERROR: "error", ERROR: "error",
CRITICAL: "critical",
FATAL: "fatal", FATAL: "fatal",
NONE: "none", NONE: "none",
} }
var toLevel = map[string]Level{ var toLevel = map[string]Level{
"undefined": UNDEFINED,
"trace": TRACE, "trace": TRACE,
"debug": DEBUG, "debug": DEBUG,
"info": INFO, "info": INFO,
"warn": WARN, "warn": WARN,
"warning": WARN,
"error": ERROR, "error": ERROR,
"critical": CRITICAL,
"fatal": FATAL, "fatal": FATAL,
"none": NONE, "none": NONE,
} }
// Levels returns all the possible logging levels var levelToColor = map[Level][]ColorAttribute{
func Levels() []string { TRACE: {Bold, FgCyan},
keys := make([]string, 0) DEBUG: {Bold, FgBlue},
for key := range toLevel { INFO: {Bold, FgGreen},
keys = append(keys, key) WARN: {Bold, FgYellow},
} ERROR: {Bold, FgRed},
return keys FATAL: {Bold, BgRed},
NONE: {Reset},
} }
func (l Level) String() string { func (l Level) String() string {
@ -73,14 +71,13 @@ func (l Level) String() string {
return "info" return "info"
} }
// Color returns the color string for this Level func (l Level) ColorAttributes() []ColorAttribute {
func (l Level) Color() *[]byte {
color, ok := levelToColor[l] color, ok := levelToColor[l]
if ok { if ok {
return &(color) return color
} }
none := levelToColor[NONE] none := levelToColor[NONE]
return &none return none
} }
// MarshalJSON takes a Level and turns it into text // MarshalJSON takes a Level and turns it into text
@ -91,31 +88,29 @@ func (l Level) MarshalJSON() ([]byte, error) {
return buffer.Bytes(), nil return buffer.Bytes(), nil
} }
// FromString takes a level string and returns a Level
func FromString(level string) Level {
temp, ok := toLevel[strings.ToLower(level)]
if !ok {
return INFO
}
return temp
}
// UnmarshalJSON takes text and turns it into a Level // UnmarshalJSON takes text and turns it into a Level
func (l *Level) UnmarshalJSON(b []byte) error { func (l *Level) UnmarshalJSON(b []byte) error {
var tmp interface{} var tmp any
err := json.Unmarshal(b, &tmp) err := json.Unmarshal(b, &tmp)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Err: %v", err)
return err return err
} }
switch v := tmp.(type) { switch v := tmp.(type) {
case string: case string:
*l = FromString(v) *l = LevelFromString(v)
case int: case int:
*l = FromString(Level(v).String()) *l = LevelFromString(Level(v).String())
default: default:
*l = INFO *l = INFO
} }
return nil return nil
} }
// LevelFromString takes a level string and returns a Level
func LevelFromString(level string) Level {
if l, ok := toLevel[strings.ToLower(level)]; ok {
return l
}
return INFO
}

View file

@ -1,305 +0,0 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"os"
"runtime"
"strings"
"sync"
"code.gitea.io/gitea/modules/process"
)
type loggerMap struct {
sync.Map
}
func (m *loggerMap) Load(k string) (*MultiChannelledLogger, bool) {
v, ok := m.Map.Load(k)
if !ok {
return nil, false
}
l, ok := v.(*MultiChannelledLogger)
return l, ok
}
func (m *loggerMap) Store(k string, v *MultiChannelledLogger) {
m.Map.Store(k, v)
}
func (m *loggerMap) Delete(k string) {
m.Map.Delete(k)
}
var (
// DEFAULT is the name of the default logger
DEFAULT = "default"
// NamedLoggers map of named loggers
NamedLoggers loggerMap
prefix string
)
// NewLogger create a logger for the default logger
func NewLogger(bufLen int64, name, provider, config string) *MultiChannelledLogger {
err := NewNamedLogger(DEFAULT, bufLen, name, provider, config)
if err != nil {
CriticalWithSkip(1, "Unable to create default logger: %v", err)
panic(err)
}
l, _ := NamedLoggers.Load(DEFAULT)
return l
}
// NewNamedLogger creates a new named logger for a given configuration
func NewNamedLogger(name string, bufLen int64, subname, provider, config string) error {
logger, ok := NamedLoggers.Load(name)
if !ok {
logger = newLogger(name, bufLen)
NamedLoggers.Store(name, logger)
}
return logger.SetLogger(subname, provider, config)
}
// DelNamedLogger closes and deletes the named logger
func DelNamedLogger(name string) {
l, ok := NamedLoggers.Load(name)
if ok {
NamedLoggers.Delete(name)
l.Close()
}
}
// DelLogger removes the named sublogger from the default logger
func DelLogger(name string) error {
logger, _ := NamedLoggers.Load(DEFAULT)
found, err := logger.DelLogger(name)
if !found {
Trace("Log %s not found, no need to delete", name)
}
return err
}
// GetLogger returns either a named logger or the default logger
func GetLogger(name string) *MultiChannelledLogger {
logger, ok := NamedLoggers.Load(name)
if ok {
return logger
}
logger, _ = NamedLoggers.Load(DEFAULT)
return logger
}
// GetLevel returns the minimum logger level
func GetLevel() Level {
l, _ := NamedLoggers.Load(DEFAULT)
return l.GetLevel()
}
// GetStacktraceLevel returns the minimum logger level
func GetStacktraceLevel() Level {
l, _ := NamedLoggers.Load(DEFAULT)
return l.GetStacktraceLevel()
}
// Trace records trace log
func Trace(format string, v ...interface{}) {
Log(1, TRACE, format, v...)
}
// IsTrace returns true if at least one logger is TRACE
func IsTrace() bool {
return GetLevel() <= TRACE
}
// Debug records debug log
func Debug(format string, v ...interface{}) {
Log(1, DEBUG, format, v...)
}
// IsDebug returns true if at least one logger is DEBUG
func IsDebug() bool {
return GetLevel() <= DEBUG
}
// Info records info log
func Info(format string, v ...interface{}) {
Log(1, INFO, format, v...)
}
// IsInfo returns true if at least one logger is INFO
func IsInfo() bool {
return GetLevel() <= INFO
}
// Warn records warning log
func Warn(format string, v ...interface{}) {
Log(1, WARN, format, v...)
}
// IsWarn returns true if at least one logger is WARN
func IsWarn() bool {
return GetLevel() <= WARN
}
// Error records error log
func Error(format string, v ...interface{}) {
Log(1, ERROR, format, v...)
}
// ErrorWithSkip records error log from "skip" calls back from this function
func ErrorWithSkip(skip int, format string, v ...interface{}) {
Log(skip+1, ERROR, format, v...)
}
// IsError returns true if at least one logger is ERROR
func IsError() bool {
return GetLevel() <= ERROR
}
// Critical records critical log
func Critical(format string, v ...interface{}) {
Log(1, CRITICAL, format, v...)
}
// CriticalWithSkip records critical log from "skip" calls back from this function
func CriticalWithSkip(skip int, format string, v ...interface{}) {
Log(skip+1, CRITICAL, format, v...)
}
// IsCritical returns true if at least one logger is CRITICAL
func IsCritical() bool {
return GetLevel() <= CRITICAL
}
// Fatal records fatal log and exit process
func Fatal(format string, v ...interface{}) {
Log(1, FATAL, format, v...)
Close()
os.Exit(1)
}
// FatalWithSkip records fatal log from "skip" calls back from this function
func FatalWithSkip(skip int, format string, v ...interface{}) {
Log(skip+1, FATAL, format, v...)
Close()
os.Exit(1)
}
// IsFatal returns true if at least one logger is FATAL
func IsFatal() bool {
return GetLevel() <= FATAL
}
// Pause pauses all the loggers
func Pause() {
NamedLoggers.Range(func(key, value interface{}) bool {
logger := value.(*MultiChannelledLogger)
logger.Pause()
logger.Flush()
return true
})
}
// Resume resumes all the loggers
func Resume() {
NamedLoggers.Range(func(key, value interface{}) bool {
logger := value.(*MultiChannelledLogger)
logger.Resume()
return true
})
}
// ReleaseReopen releases and reopens logging files
func ReleaseReopen() error {
var accumulatedErr error
NamedLoggers.Range(func(key, value interface{}) bool {
logger := value.(*MultiChannelledLogger)
if err := logger.ReleaseReopen(); err != nil {
if accumulatedErr == nil {
accumulatedErr = fmt.Errorf("Error reopening %s: %w", key.(string), err)
} else {
accumulatedErr = fmt.Errorf("Error reopening %s: %v & %w", key.(string), err, accumulatedErr)
}
}
return true
})
return accumulatedErr
}
// Close closes all the loggers
func Close() {
l, ok := NamedLoggers.Load(DEFAULT)
if !ok {
return
}
NamedLoggers.Delete(DEFAULT)
l.Close()
}
// Log a message with defined skip and at logging level
// A skip of 0 refers to the caller of this command
func Log(skip int, level Level, format string, v ...interface{}) {
l, ok := NamedLoggers.Load(DEFAULT)
if ok {
l.Log(skip+1, level, format, v...) //nolint:errcheck
}
}
// LoggerAsWriter is a io.Writer shim around the gitea log
type LoggerAsWriter struct {
ourLoggers []*MultiChannelledLogger
level Level
}
// NewLoggerAsWriter creates a Writer representation of the logger with setable log level
func NewLoggerAsWriter(level string, ourLoggers ...*MultiChannelledLogger) *LoggerAsWriter {
if len(ourLoggers) == 0 {
l, _ := NamedLoggers.Load(DEFAULT)
ourLoggers = []*MultiChannelledLogger{l}
}
l := &LoggerAsWriter{
ourLoggers: ourLoggers,
level: FromString(level),
}
return l
}
// Write implements the io.Writer interface to allow spoofing of chi
func (l *LoggerAsWriter) Write(p []byte) (int, error) {
for _, logger := range l.ourLoggers {
// Skip = 3 because this presumes that we have been called by log.Println()
// If the caller has used log.Output or the like this will be wrong
logger.Log(3, l.level, string(p)) //nolint:errcheck
}
return len(p), nil
}
// Log takes a given string and logs it at the set log-level
func (l *LoggerAsWriter) Log(msg string) {
for _, logger := range l.ourLoggers {
// Set the skip to reference the call just above this
_ = logger.Log(1, l.level, msg)
}
}
func init() {
process.Trace = func(start bool, pid process.IDType, description string, parentPID process.IDType, typ string) {
if start && parentPID != "" {
Log(1, TRACE, "Start %s: %s (from %s) (%s)", NewColoredValue(pid, FgHiYellow), description, NewColoredValue(parentPID, FgYellow), NewColoredValue(typ, Reset))
} else if start {
Log(1, TRACE, "Start %s: %s (%s)", NewColoredValue(pid, FgHiYellow), description, NewColoredValue(typ, Reset))
} else {
Log(1, TRACE, "Done %s: %s", NewColoredValue(pid, FgHiYellow), NewColoredValue(description, Reset))
}
}
_, filename, _, _ := runtime.Caller(0)
prefix = strings.TrimSuffix(filename, "modules/log/log.go")
if prefix == filename {
// in case the source code file is moved, we can not trim the suffix, the code above should also be updated.
panic("unable to detect correct package prefix, please update file: " + filename)
}
}

View file

@ -1,152 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func baseConsoleTest(t *testing.T, logger *MultiChannelledLogger) (chan []byte, chan bool) {
written := make(chan []byte)
closed := make(chan bool)
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
written <- p
closed <- close
},
}
m := logger.MultiChannelledLog
channelledLog := m.GetEventLogger("console")
assert.NotEmpty(t, channelledLog)
realChanLog, ok := channelledLog.(*ChannelledLog)
assert.True(t, ok)
realCL, ok := realChanLog.loggerProvider.(*ConsoleLogger)
assert.True(t, ok)
assert.Equal(t, INFO, realCL.Level)
realCL.out = c
format := "test: %s"
args := []interface{}{"A"}
logger.Log(0, INFO, format, args...)
line := <-written
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
assert.False(t, <-closed)
format = "test2: %s"
logger.Warn(format, args...)
line = <-written
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
assert.False(t, <-closed)
format = "testerror: %s"
logger.Error(format, args...)
line = <-written
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
assert.False(t, <-closed)
return written, closed
}
func TestNewLoggerUnexported(t *testing.T) {
level := INFO
logger := newLogger("UNEXPORTED", 0)
err := logger.SetLogger("console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
assert.NoError(t, err)
out := logger.MultiChannelledLog.GetEventLogger("console")
assert.NotEmpty(t, out)
chanlog, ok := out.(*ChannelledLog)
assert.True(t, ok)
assert.Equal(t, "console", chanlog.provider)
assert.Equal(t, INFO, logger.GetLevel())
baseConsoleTest(t, logger)
}
func TestNewLoggger(t *testing.T) {
level := INFO
logger := NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
assert.Equal(t, INFO, GetLevel())
assert.False(t, IsTrace())
assert.False(t, IsDebug())
assert.True(t, IsInfo())
assert.True(t, IsWarn())
assert.True(t, IsError())
written, closed := baseConsoleTest(t, logger)
format := "test: %s"
args := []interface{}{"A"}
Log(0, INFO, format, args...)
line := <-written
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
assert.False(t, <-closed)
Info(format, args...)
line = <-written
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
assert.False(t, <-closed)
go DelLogger("console")
line = <-written
assert.Equal(t, "", string(line))
assert.True(t, <-closed)
}
func TestNewLogggerRecreate(t *testing.T) {
level := INFO
NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
assert.Equal(t, INFO, GetLevel())
assert.False(t, IsTrace())
assert.False(t, IsDebug())
assert.True(t, IsInfo())
assert.True(t, IsWarn())
assert.True(t, IsError())
format := "test: %s"
args := []interface{}{"A"}
Log(0, INFO, format, args...)
NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
assert.Equal(t, INFO, GetLevel())
assert.False(t, IsTrace())
assert.False(t, IsDebug())
assert.True(t, IsInfo())
assert.True(t, IsWarn())
assert.True(t, IsError())
Log(0, INFO, format, args...)
assert.Panics(t, func() {
NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"`, level.String()))
})
go DelLogger("console")
// We should be able to redelete without a problem
go DelLogger("console")
}
func TestNewNamedLogger(t *testing.T) {
level := INFO
err := NewNamedLogger("test", 0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
assert.NoError(t, err)
logger, _ := NamedLoggers.Load("test")
assert.Equal(t, level, logger.GetLevel())
written, closed := baseConsoleTest(t, logger)
go DelNamedLogger("test")
line := <-written
assert.Equal(t, "", string(line))
assert.True(t, <-closed)
}

View file

@ -1,141 +1,50 @@
// Copyright 2019 The Gitea Authors. All rights reserved. // Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT // SPDX-License-Identifier: MIT
// Package log provides logging capabilities for Gitea.
// Concepts:
//
// * Logger: a Logger provides logging functions and dispatches log events to all its writers
//
// * EventWriter: written log Event to a destination (eg: file, console)
// - EventWriterBase: the base struct of a writer, it contains common fields and functions for all writers
// - WriterType: the type name of a writer, eg: console, file
// - WriterName: aka Mode Name in document, the name of a writer instance, it's usually defined by the config file.
// It is called "mode name" because old code use MODE as config key, to keep compatibility, keep this concept.
//
// * WriterMode: the common options for all writers, eg: log level.
// - WriterConsoleOption and others: the specified options for a writer, eg: file path, remote address.
//
// Call graph:
// -> log.Info()
// -> LoggerImpl.Log()
// -> LoggerImpl.SendLogEvent, then the event goes into writer's goroutines
// -> EventWriter.Run() handles the events
package log package log
import "os" // BaseLogger provides the basic logging functions
type BaseLogger interface {
// Logger is the basic interface for logging Log(skip int, level Level, format string, v ...any)
type Logger interface {
LevelLogger
Trace(format string, v ...interface{})
IsTrace() bool
Debug(format string, v ...interface{})
IsDebug() bool
Info(format string, v ...interface{})
IsInfo() bool
Warn(format string, v ...interface{})
IsWarn() bool
Error(format string, v ...interface{})
ErrorWithSkip(skip int, format string, v ...interface{})
IsError() bool
Critical(format string, v ...interface{})
CriticalWithSkip(skip int, format string, v ...interface{})
IsCritical() bool
Fatal(format string, v ...interface{})
FatalWithSkip(skip int, format string, v ...interface{})
IsFatal() bool
}
// LevelLogger is the simplest logging interface
type LevelLogger interface {
Flush()
Close()
GetLevel() Level GetLevel() Level
Log(skip int, level Level, format string, v ...interface{}) error
} }
// SettableLogger is the interface of loggers which have subloggers // LevelLogger provides level-related logging functions
type SettableLogger interface { type LevelLogger interface {
SetLogger(name, provider, config string) error LevelEnabled(level Level) bool
DelLogger(name string) (bool, error)
Trace(format string, v ...any)
Debug(format string, v ...any)
Info(format string, v ...any)
Warn(format string, v ...any)
Error(format string, v ...any)
Critical(format string, v ...any)
} }
// StacktraceLogger is a logger that can log stacktraces type Logger interface {
type StacktraceLogger interface { BaseLogger
GetStacktraceLevel() Level
}
// LevelLoggerLogger wraps a LevelLogger as a Logger
type LevelLoggerLogger struct {
LevelLogger LevelLogger
} }
// Trace records trace log type LogStringer interface { //nolint:revive
func (l *LevelLoggerLogger) Trace(format string, v ...interface{}) { LogString() string
l.Log(1, TRACE, format, v...) //nolint:errcheck
}
// IsTrace returns true if the logger is TRACE
func (l *LevelLoggerLogger) IsTrace() bool {
return l.GetLevel() <= TRACE
}
// Debug records debug log
func (l *LevelLoggerLogger) Debug(format string, v ...interface{}) {
l.Log(1, DEBUG, format, v...) //nolint:errcheck
}
// IsDebug returns true if the logger is DEBUG
func (l *LevelLoggerLogger) IsDebug() bool {
return l.GetLevel() <= DEBUG
}
// Info records information log
func (l *LevelLoggerLogger) Info(format string, v ...interface{}) {
l.Log(1, INFO, format, v...) //nolint:errcheck
}
// IsInfo returns true if the logger is INFO
func (l *LevelLoggerLogger) IsInfo() bool {
return l.GetLevel() <= INFO
}
// Warn records warning log
func (l *LevelLoggerLogger) Warn(format string, v ...interface{}) {
l.Log(1, WARN, format, v...) //nolint:errcheck
}
// IsWarn returns true if the logger is WARN
func (l *LevelLoggerLogger) IsWarn() bool {
return l.GetLevel() <= WARN
}
// Error records error log
func (l *LevelLoggerLogger) Error(format string, v ...interface{}) {
l.Log(1, ERROR, format, v...) //nolint:errcheck
}
// ErrorWithSkip records error log from "skip" calls back from this function
func (l *LevelLoggerLogger) ErrorWithSkip(skip int, format string, v ...interface{}) {
l.Log(skip+1, ERROR, format, v...) //nolint:errcheck
}
// IsError returns true if the logger is ERROR
func (l *LevelLoggerLogger) IsError() bool {
return l.GetLevel() <= ERROR
}
// Critical records critical log
func (l *LevelLoggerLogger) Critical(format string, v ...interface{}) {
l.Log(1, CRITICAL, format, v...) //nolint:errcheck
}
// CriticalWithSkip records critical log from "skip" calls back from this function
func (l *LevelLoggerLogger) CriticalWithSkip(skip int, format string, v ...interface{}) {
l.Log(skip+1, CRITICAL, format, v...) //nolint:errcheck
}
// IsCritical returns true if the logger is CRITICAL
func (l *LevelLoggerLogger) IsCritical() bool {
return l.GetLevel() <= CRITICAL
}
// Fatal records fatal log and exit the process
func (l *LevelLoggerLogger) Fatal(format string, v ...interface{}) {
l.Log(1, FATAL, format, v...) //nolint:errcheck
l.Close()
os.Exit(1)
}
// FatalWithSkip records fatal log from "skip" calls back from this function and exits the process
func (l *LevelLoggerLogger) FatalWithSkip(skip int, format string, v ...interface{}) {
l.Log(skip+1, FATAL, format, v...) //nolint:errcheck
l.Close()
os.Exit(1)
}
// IsFatal returns true if the logger is FATAL
func (l *LevelLoggerLogger) IsFatal() bool {
return l.GetLevel() <= FATAL
} }

View file

@ -0,0 +1,83 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"os"
)
// FallbackErrorf is the last chance to show an error if the logger has internal errors
func FallbackErrorf(format string, args ...any) {
_, _ = fmt.Fprintf(os.Stderr, format+"\n", args)
}
func GetLevel() Level {
return GetLogger(DEFAULT).GetLevel()
}
func Log(skip int, level Level, format string, v ...any) {
GetLogger(DEFAULT).Log(skip+1, level, format, v...)
}
func Trace(format string, v ...any) {
Log(1, TRACE, format, v...)
}
func IsTrace() bool {
return GetLevel() <= TRACE
}
func Debug(format string, v ...any) {
Log(1, DEBUG, format, v...)
}
func IsDebug() bool {
return GetLevel() <= DEBUG
}
func Info(format string, v ...any) {
Log(1, INFO, format, v...)
}
func Warn(format string, v ...any) {
Log(1, WARN, format, v...)
}
func Error(format string, v ...any) {
Log(1, ERROR, format, v...)
}
func ErrorWithSkip(skip int, format string, v ...any) {
Log(skip+1, ERROR, format, v...)
}
func Critical(format string, v ...any) {
Log(1, ERROR, format, v...)
}
// Fatal records fatal log and exit process
func Fatal(format string, v ...any) {
Log(1, FATAL, format, v...)
GetManager().Close()
os.Exit(1)
}
func GetLogger(name string) Logger {
return GetManager().GetLogger(name)
}
func IsLoggerEnabled(name string) bool {
return GetManager().GetLogger(name).IsEnabled()
}
func SetConsoleLogger(loggerName, writerName string, level Level) {
writer := NewEventWriterConsole(writerName, WriterMode{
Level: level,
Flags: FlagsFromBits(LstdFlags),
Colorize: CanColorStdout,
WriterOption: WriterConsoleOption{},
})
GetManager().GetLogger(loggerName).RemoveAllWriters().AddWriters(writer)
}

239
modules/log/logger_impl.go Normal file
View file

@ -0,0 +1,239 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"context"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/util"
)
type LoggerImpl struct {
LevelLogger
ctx context.Context
ctxCancel context.CancelFunc
level atomic.Int32
stacktraceLevel atomic.Int32
eventWriterMu sync.RWMutex
eventWriters map[string]EventWriter
}
var (
_ BaseLogger = (*LoggerImpl)(nil)
_ LevelLogger = (*LoggerImpl)(nil)
)
// SendLogEvent sends a log event to all writers
func (l *LoggerImpl) SendLogEvent(event *Event) {
l.eventWriterMu.RLock()
defer l.eventWriterMu.RUnlock()
if len(l.eventWriters) == 0 {
FallbackErrorf("[no logger writer]: %s", event.MsgSimpleText)
return
}
// the writers have their own goroutines, the message arguments (with Stringer) shouldn't be used in other goroutines
// so the event message must be formatted here
msgFormat, msgArgs := event.msgFormat, event.msgArgs
event.msgFormat, event.msgArgs = "(already processed by formatters)", nil
for _, w := range l.eventWriters {
if event.Level < w.GetLevel() {
continue
}
formatted := &EventFormatted{
Origin: event,
Msg: w.Base().FormatMessage(w.Base().Mode, event, msgFormat, msgArgs...),
}
select {
case w.Base().Queue <- formatted:
default:
bs, _ := json.Marshal(event)
FallbackErrorf("log writer %q queue is full, event: %v", w.GetWriterName(), string(bs))
}
}
}
// syncLevelInternal syncs the level of the logger with the levels of the writers
func (l *LoggerImpl) syncLevelInternal() {
lowestLevel := NONE
for _, w := range l.eventWriters {
if w.GetLevel() < lowestLevel {
lowestLevel = w.GetLevel()
}
}
l.level.Store(int32(lowestLevel))
lowestLevel = NONE
for _, w := range l.eventWriters {
if w.Base().Mode.StacktraceLevel < lowestLevel {
lowestLevel = w.GetLevel()
}
}
l.stacktraceLevel.Store(int32(lowestLevel))
}
// removeWriterInternal removes a writer from the logger, and stops it if it's not shared
func (l *LoggerImpl) removeWriterInternal(w EventWriter) {
if !w.Base().shared {
eventWriterStopWait(w) // only stop non-shared writers, shared writers are managed by the manager
}
delete(l.eventWriters, w.GetWriterName())
}
// AddWriters adds writers to the logger, and starts them. Existing writers will be replaced by new ones.
func (l *LoggerImpl) AddWriters(writer ...EventWriter) {
l.eventWriterMu.Lock()
defer l.eventWriterMu.Unlock()
for _, w := range writer {
if old, ok := l.eventWriters[w.GetWriterName()]; ok {
l.removeWriterInternal(old)
}
}
for _, w := range writer {
l.eventWriters[w.GetWriterName()] = w
eventWriterStartGo(l.ctx, w, false)
}
l.syncLevelInternal()
}
// RemoveWriter removes a writer from the logger, and the writer is closed and flushed if it is not shared
func (l *LoggerImpl) RemoveWriter(modeName string) error {
l.eventWriterMu.Lock()
defer l.eventWriterMu.Unlock()
w, ok := l.eventWriters[modeName]
if !ok {
return util.ErrNotExist
}
l.removeWriterInternal(w)
l.syncLevelInternal()
return nil
}
// RemoveAllWriters removes all writers from the logger, non-shared writers are closed and flushed
func (l *LoggerImpl) RemoveAllWriters() *LoggerImpl {
l.eventWriterMu.Lock()
defer l.eventWriterMu.Unlock()
for _, w := range l.eventWriters {
l.removeWriterInternal(w)
}
l.eventWriters = map[string]EventWriter{}
l.syncLevelInternal()
return l
}
// DumpWriters dumps the writers as a JSON map, it's used for debugging and display purposes.
func (l *LoggerImpl) DumpWriters() map[string]any {
l.eventWriterMu.RLock()
defer l.eventWriterMu.RUnlock()
writers := make(map[string]any, len(l.eventWriters))
for k, w := range l.eventWriters {
bs, err := json.Marshal(w.Base().Mode)
if err != nil {
FallbackErrorf("marshal writer %q to dump failed: %v", k, err)
continue
}
m := map[string]any{}
_ = json.Unmarshal(bs, &m)
m["WriterType"] = w.GetWriterType()
writers[k] = m
}
return writers
}
// Close closes the logger, non-shared writers are closed and flushed
func (l *LoggerImpl) Close() {
l.RemoveAllWriters()
l.ctxCancel()
}
// IsEnabled returns true if the logger is enabled: it has a working level and has writers
// Fatal is not considered as enabled, because it's a special case and the process just exits
func (l *LoggerImpl) IsEnabled() bool {
l.eventWriterMu.RLock()
defer l.eventWriterMu.RUnlock()
return l.level.Load() < int32(FATAL) && len(l.eventWriters) > 0
}
// Log prepares the log event, if the level matches, the event will be sent to the writers
func (l *LoggerImpl) Log(skip int, level Level, format string, logArgs ...any) {
if Level(l.level.Load()) > level {
return
}
event := &Event{
Time: time.Now(),
Level: level,
Caller: "?()",
}
pc, filename, line, ok := runtime.Caller(skip + 1)
if ok {
fn := runtime.FuncForPC(pc)
if fn != nil {
event.Caller = fn.Name() + "()"
}
}
event.Filename, event.Line = strings.TrimPrefix(filename, projectPackagePrefix), line
if l.stacktraceLevel.Load() <= int32(level) {
event.Stacktrace = Stack(skip + 1)
}
labels := getGoroutineLabels()
if labels != nil {
event.GoroutinePid = labels["pid"]
}
// get a simple text message without color
msgArgs := make([]any, len(logArgs))
copy(msgArgs, logArgs)
// handle LogStringer values
for i, v := range msgArgs {
if cv, ok := v.(*ColoredValue); ok {
if s, ok := cv.v.(LogStringer); ok {
cv.v = logStringFormatter{v: s}
}
} else if s, ok := v.(LogStringer); ok {
msgArgs[i] = logStringFormatter{v: s}
}
}
event.MsgSimpleText = colorSprintf(false, format, msgArgs...)
event.msgFormat = format
event.msgArgs = msgArgs
l.SendLogEvent(event)
}
func (l *LoggerImpl) GetLevel() Level {
return Level(l.level.Load())
}
func NewLoggerWithWriters(ctx context.Context, writer ...EventWriter) *LoggerImpl {
l := &LoggerImpl{}
l.ctx, l.ctxCancel = context.WithCancel(ctx)
l.LevelLogger = BaseLoggerToGeneralLogger(l)
l.eventWriters = map[string]EventWriter{}
l.syncLevelInternal()
l.AddWriters(writer...)
return l
}

145
modules/log/logger_test.go Normal file
View file

@ -0,0 +1,145 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"context"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type dummyWriter struct {
*EventWriterBaseImpl
delay time.Duration
mu sync.Mutex
logs []string
}
func (d *dummyWriter) Write(p []byte) (n int, err error) {
if d.delay > 0 {
time.Sleep(d.delay)
}
d.mu.Lock()
defer d.mu.Unlock()
d.logs = append(d.logs, string(p))
return len(p), nil
}
func (d *dummyWriter) Close() error {
return nil
}
func (d *dummyWriter) GetLogs() []string {
d.mu.Lock()
defer d.mu.Unlock()
logs := make([]string, len(d.logs))
copy(logs, d.logs)
return logs
}
func newDummyWriter(name string, level Level, delay time.Duration) *dummyWriter {
w := &dummyWriter{
EventWriterBaseImpl: NewEventWriterBase(name, "dummy", WriterMode{Level: level, Flags: FlagsFromBits(0)}),
}
w.delay = delay
w.Base().OutputWriteCloser = w
return w
}
func TestLogger(t *testing.T) {
logger := NewLoggerWithWriters(context.Background())
dump := logger.DumpWriters()
assert.EqualValues(t, 0, len(dump))
assert.EqualValues(t, NONE, logger.GetLevel())
assert.False(t, logger.IsEnabled())
w1 := newDummyWriter("dummy-1", DEBUG, 0)
logger.AddWriters(w1)
assert.EqualValues(t, DEBUG, logger.GetLevel())
w2 := newDummyWriter("dummy-2", WARN, 200*time.Millisecond)
logger.AddWriters(w2)
assert.EqualValues(t, DEBUG, logger.GetLevel())
dump = logger.DumpWriters()
assert.EqualValues(t, 2, len(dump))
logger.Trace("trace-level") // this level is not logged
logger.Debug("debug-level")
logger.Error("error-level")
// w2 is slow, so only w1 has logs
time.Sleep(100 * time.Millisecond)
assert.Equal(t, []string{"debug-level\n", "error-level\n"}, w1.GetLogs())
assert.Equal(t, []string{}, w2.GetLogs())
logger.Close()
// after Close, all logs are flushed
assert.Equal(t, []string{"debug-level\n", "error-level\n"}, w1.GetLogs())
assert.Equal(t, []string{"error-level\n"}, w2.GetLogs())
}
func TestLoggerPause(t *testing.T) {
logger := NewLoggerWithWriters(context.Background())
w1 := newDummyWriter("dummy-1", DEBUG, 0)
logger.AddWriters(w1)
GetManager().PauseAll()
logger.Info("info-level")
time.Sleep(100 * time.Millisecond)
assert.Equal(t, []string{}, w1.GetLogs())
GetManager().ResumeAll()
time.Sleep(100 * time.Millisecond)
assert.Equal(t, []string{"info-level\n"}, w1.GetLogs())
logger.Close()
}
type testLogString struct {
Field string
}
func (t testLogString) LogString() string {
return "log-string"
}
func TestLoggerLogString(t *testing.T) {
logger := NewLoggerWithWriters(context.Background())
w1 := newDummyWriter("dummy-1", DEBUG, 0)
w1.Mode.Colorize = true
logger.AddWriters(w1)
logger.Info("%s %s %#v %v", testLogString{}, &testLogString{}, testLogString{Field: "detail"}, NewColoredValue(testLogString{}, FgRed))
logger.Close()
assert.Equal(t, []string{"log-string log-string log.testLogString{Field:\"detail\"} \x1b[31mlog-string\x1b[0m\n"}, w1.GetLogs())
}
func TestLoggerExpressionFilter(t *testing.T) {
logger := NewLoggerWithWriters(context.Background())
w1 := newDummyWriter("dummy-1", DEBUG, 0)
w1.Mode.Expression = "foo.*"
logger.AddWriters(w1)
logger.Info("foo")
logger.Info("bar")
logger.Info("foo bar")
logger.SendLogEvent(&Event{Level: INFO, Filename: "foo.go", MsgSimpleText: "by filename"})
logger.Close()
assert.Equal(t, []string{"foo\n", "foo bar\n", "by filename\n"}, w1.GetLogs())
}

142
modules/log/manager.go Normal file
View file

@ -0,0 +1,142 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"context"
"fmt"
"sync"
"sync/atomic"
)
const DEFAULT = "default"
// LoggerManager manages loggers and shared event writers
type LoggerManager struct {
ctx context.Context
ctxCancel context.CancelFunc
mu sync.Mutex
writers map[string]EventWriter
loggers map[string]*LoggerImpl
defaultLogger atomic.Pointer[LoggerImpl]
pauseMu sync.RWMutex
pauseChan chan struct{}
}
// GetLogger returns a logger with the given name. If the logger doesn't exist, a new empty one will be created.
func (m *LoggerManager) GetLogger(name string) *LoggerImpl {
if name == DEFAULT {
if logger := m.defaultLogger.Load(); logger != nil {
return logger
}
}
m.mu.Lock()
defer m.mu.Unlock()
logger := m.loggers[name]
if logger == nil {
logger = NewLoggerWithWriters(m.ctx)
m.loggers[name] = logger
if name == DEFAULT {
m.defaultLogger.Store(logger)
}
}
return logger
}
// PauseAll pauses all event writers
func (m *LoggerManager) PauseAll() {
m.pauseMu.Lock()
m.pauseChan = make(chan struct{})
m.pauseMu.Unlock()
}
// ResumeAll resumes all event writers
func (m *LoggerManager) ResumeAll() {
m.pauseMu.Lock()
close(m.pauseChan)
m.pauseChan = nil
m.pauseMu.Unlock()
}
// GetPauseChan returns a channel for writer pausing
func (m *LoggerManager) GetPauseChan() chan struct{} {
m.pauseMu.RLock()
defer m.pauseMu.RUnlock()
return m.pauseChan
}
// Close closes the logger manager, all loggers and writers will be closed, the messages are flushed.
func (m *LoggerManager) Close() {
m.mu.Lock()
defer m.mu.Unlock()
for _, logger := range m.loggers {
logger.Close()
}
m.loggers = map[string]*LoggerImpl{}
for _, writer := range m.writers {
eventWriterStopWait(writer)
}
m.writers = map[string]EventWriter{}
m.ctxCancel()
}
// DumpLoggers returns a map of all loggers and their event writers, for debugging and display purposes.
func (m *LoggerManager) DumpLoggers() map[string]any {
m.mu.Lock()
defer m.mu.Unlock()
dump := map[string]any{}
for name, logger := range m.loggers {
loggerDump := map[string]any{
"IsEnabled": logger.IsEnabled(),
"EventWriters": logger.DumpWriters(),
}
dump[name] = loggerDump
}
return dump
}
// NewSharedWriter creates a new shared event writer, it can be used by multiple loggers, and a shared writer won't be closed if a logger is closed.
func (m *LoggerManager) NewSharedWriter(writerName, writerType string, mode WriterMode) (writer EventWriter, err error) {
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.writers[writerName]; ok {
return nil, fmt.Errorf("log event writer %q has been added before", writerName)
}
if writer, err = NewEventWriter(writerName, writerType, mode); err != nil {
return nil, err
}
m.writers[writerName] = writer
eventWriterStartGo(m.ctx, writer, true)
return writer, nil
}
func (m *LoggerManager) GetSharedWriter(writerName string) EventWriter {
m.mu.Lock()
defer m.mu.Unlock()
return m.writers[writerName]
}
var loggerManager = NewManager()
func GetManager() *LoggerManager {
return loggerManager
}
func NewManager() *LoggerManager {
m := &LoggerManager{writers: map[string]EventWriter{}, loggers: map[string]*LoggerImpl{}}
m.ctx, m.ctxCancel = context.WithCancel(context.Background())
return m
}

View file

@ -0,0 +1,42 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestSharedWorker(t *testing.T) {
RegisterEventWriter("dummy", func(writerName string, writerMode WriterMode) EventWriter {
return newDummyWriter(writerName, writerMode.Level, 0)
})
m := NewManager()
_, err := m.NewSharedWriter("dummy-1", "dummy", WriterMode{Level: DEBUG, Flags: FlagsFromBits(0)})
assert.NoError(t, err)
w := m.GetSharedWriter("dummy-1")
assert.NotNil(t, w)
loggerTest := m.GetLogger("test")
loggerTest.AddWriters(w)
loggerTest.Info("msg-1")
loggerTest.RemoveAllWriters() // the shared writer is not closed here
loggerTest.Info("never seen")
// the shared writer can still be used later
w = m.GetSharedWriter("dummy-1")
assert.NotNil(t, w)
loggerTest.AddWriters(w)
loggerTest.Info("msg-2")
m.GetLogger("test-another").AddWriters(w)
m.GetLogger("test-another").Info("msg-3")
m.Close()
logs := w.(*dummyWriter).GetLogs()
assert.Equal(t, []string{"msg-1\n", "msg-2\n", "msg-3\n"}, logs)
}

78
modules/log/misc.go Normal file
View file

@ -0,0 +1,78 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"io"
)
type baseToLogger struct {
base BaseLogger
}
// BaseLoggerToGeneralLogger wraps a BaseLogger (which only has Log() function) to a Logger (which has Info() function)
func BaseLoggerToGeneralLogger(b BaseLogger) Logger {
l := &baseToLogger{base: b}
return l
}
var _ Logger = (*baseToLogger)(nil)
func (s *baseToLogger) Log(skip int, level Level, format string, v ...any) {
s.base.Log(skip+1, level, format, v...)
}
func (s *baseToLogger) GetLevel() Level {
return s.base.GetLevel()
}
func (s *baseToLogger) LevelEnabled(level Level) bool {
return s.base.GetLevel() <= level
}
func (s *baseToLogger) Trace(format string, v ...any) {
s.base.Log(1, TRACE, format, v...)
}
func (s *baseToLogger) Debug(format string, v ...any) {
s.base.Log(1, DEBUG, format, v...)
}
func (s *baseToLogger) Info(format string, v ...any) {
s.base.Log(1, INFO, format, v...)
}
func (s *baseToLogger) Warn(format string, v ...any) {
s.base.Log(1, WARN, format, v...)
}
func (s *baseToLogger) Error(format string, v ...any) {
s.base.Log(1, ERROR, format, v...)
}
func (s *baseToLogger) Critical(format string, v ...any) {
s.base.Log(1, CRITICAL, format, v...)
}
type PrintfLogger struct {
Logf func(format string, args ...any)
}
func (p *PrintfLogger) Printf(format string, args ...any) {
p.Logf(format, args...)
}
type loggerToWriter struct {
logf func(format string, args ...any)
}
func (p *loggerToWriter) Write(bs []byte) (int, error) {
p.logf("%s", string(bs))
return len(bs), nil
}
// LoggerToWriter wraps a log function to an io.Writer
func LoggerToWriter(logf func(format string, args ...any)) io.Writer {
return &loggerToWriter{logf: logf}
}

View file

@ -1,104 +0,0 @@
// Copyright 2020 The Gogs Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"runtime"
"strings"
"time"
)
// MultiChannelledLogger is default logger in the Gitea application.
// it can contain several providers and log message into all providers.
type MultiChannelledLogger struct {
LevelLoggerLogger
*MultiChannelledLog
bufferLength int64
}
// newLogger initializes and returns a new logger.
func newLogger(name string, buffer int64) *MultiChannelledLogger {
l := &MultiChannelledLogger{
MultiChannelledLog: NewMultiChannelledLog(name, buffer),
bufferLength: buffer,
}
l.LevelLogger = l
return l
}
// SetLogger sets new logger instance with given logger provider and config.
func (l *MultiChannelledLogger) SetLogger(name, provider, config string) error {
eventLogger, err := NewChannelledLog(l.ctx, name, provider, config, l.bufferLength)
if err != nil {
return fmt.Errorf("failed to create sublogger (%s): %w", name, err)
}
l.MultiChannelledLog.DelLogger(name)
err = l.MultiChannelledLog.AddLogger(eventLogger)
if err != nil {
if IsErrDuplicateName(err) {
return fmt.Errorf("%w other names: %v", err, l.MultiChannelledLog.GetEventLoggerNames())
}
return fmt.Errorf("failed to add sublogger (%s): %w", name, err)
}
return nil
}
// DelLogger deletes a sublogger from this logger.
func (l *MultiChannelledLogger) DelLogger(name string) (bool, error) {
return l.MultiChannelledLog.DelLogger(name), nil
}
// Log msg at the provided level with the provided caller defined by skip (0 being the function that calls this function)
func (l *MultiChannelledLogger) Log(skip int, level Level, format string, v ...interface{}) error {
if l.GetLevel() > level {
return nil
}
caller := "?()"
pc, filename, line, ok := runtime.Caller(skip + 1)
if ok {
// Get caller function name.
fn := runtime.FuncForPC(pc)
if fn != nil {
caller = fn.Name() + "()"
}
}
msg := format
if len(v) > 0 {
msg = ColorSprintf(format, v...)
}
labels := getGoroutineLabels()
if labels != nil {
pid, ok := labels["pid"]
if ok {
msg = "[" + ColorString(FgHiYellow) + pid + ColorString(Reset) + "] " + msg
}
}
stack := ""
if l.GetStacktraceLevel() <= level {
stack = Stack(skip + 1)
}
return l.SendLog(level, caller, strings.TrimPrefix(filename, prefix), line, msg, stack)
}
// SendLog sends a log event at the provided level with the information given
func (l *MultiChannelledLogger) SendLog(level Level, caller, filename string, line int, msg, stack string) error {
if l.GetLevel() > level {
return nil
}
event := &Event{
level: level,
caller: caller,
filename: filename,
line: line,
msg: msg,
time: time.Now(),
stacktrace: stack,
}
l.LogEvent(event) //nolint:errcheck
return nil
}

View file

@ -1,25 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
// LoggerProvider represents behaviors of a logger provider.
type LoggerProvider interface {
Init(config string) error
EventLogger
}
type loggerProvider func() LoggerProvider
var providers = make(map[string]loggerProvider)
// Register registers given logger provider to providers.
func Register(name string, log loggerProvider) {
if log == nil {
panic("log: register provider is nil")
}
if _, dup := providers[name]; dup {
panic("log: register called twice for provider \"" + name + "\"")
}
providers[name] = log
}

View file

@ -1,114 +0,0 @@
// Copyright 2014 The Gogs Authors. All rights reserved.
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"net/smtp"
"strings"
"code.gitea.io/gitea/modules/json"
)
type smtpWriter struct {
owner *SMTPLogger
}
// Write sends the message as an email
func (s *smtpWriter) Write(p []byte) (int, error) {
return s.owner.sendMail(p)
}
// Close does nothing
func (s *smtpWriter) Close() error {
return nil
}
// SMTPLogger implements LoggerProvider and is used to send emails via given SMTP-server.
type SMTPLogger struct {
WriterLogger
Username string `json:"Username"`
Password string `json:"password"`
Host string `json:"host"`
Subject string `json:"subject"`
RecipientAddresses []string `json:"sendTos"`
sendMailFn func(string, smtp.Auth, string, []string, []byte) error
}
// NewSMTPLogger creates smtp writer.
func NewSMTPLogger() LoggerProvider {
s := &SMTPLogger{}
s.Level = TRACE
s.sendMailFn = smtp.SendMail
return s
}
// Init smtp writer with json config.
// config like:
//
// {
// "Username":"example@gmail.com",
// "password:"password",
// "host":"smtp.gmail.com:465",
// "subject":"email title",
// "sendTos":["email1","email2"],
// "level":LevelError
// }
func (log *SMTPLogger) Init(jsonconfig string) error {
err := json.Unmarshal([]byte(jsonconfig), log)
if err != nil {
return fmt.Errorf("Unable to parse JSON: %w", err)
}
log.NewWriterLogger(&smtpWriter{
owner: log,
})
log.sendMailFn = smtp.SendMail
return nil
}
// WriteMsg writes message in smtp writer.
// it will send an email with subject and only this message.
func (log *SMTPLogger) sendMail(p []byte) (int, error) {
hp := strings.Split(log.Host, ":")
// Set up authentication information.
auth := smtp.PlainAuth(
"",
log.Username,
log.Password,
hp[0],
)
// Connect to the server, authenticate, set the sender and recipient,
// and send the email all in one step.
contentType := "Content-Type: text/plain" + "; charset=UTF-8"
mailmsg := []byte("To: " + strings.Join(log.RecipientAddresses, ";") + "\r\nFrom: " + log.Username + "<" + log.Username +
">\r\nSubject: " + log.Subject + "\r\n" + contentType + "\r\n\r\n")
mailmsg = append(mailmsg, p...)
return len(p), log.sendMailFn(
log.Host,
auth,
log.Username,
log.RecipientAddresses,
mailmsg,
)
}
// Flush when log should be flushed
func (log *SMTPLogger) Flush() {
}
// ReleaseReopen does nothing
func (log *SMTPLogger) ReleaseReopen() error {
return nil
}
// GetName returns the default name for this implementation
func (log *SMTPLogger) GetName() string {
return "smtp"
}
func init() {
Register("smtp", NewSMTPLogger)
}

View file

@ -1,85 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"net/smtp"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestSMTPLogger(t *testing.T) {
prefix := "TestPrefix "
level := INFO
flags := LstdFlags | LUTC | Lfuncname
username := "testuser"
password := "testpassword"
host := "testhost"
subject := "testsubject"
sendTos := []string{"testto1", "testto2"}
logger := NewSMTPLogger()
smtpLogger, ok := logger.(*SMTPLogger)
assert.True(t, ok)
err := logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"username\":\"%s\",\"password\":\"%s\",\"host\":\"%s\",\"subject\":\"%s\",\"sendTos\":[\"%s\",\"%s\"]}", prefix, level.String(), flags, username, password, host, subject, sendTos[0], sendTos[1]))
assert.NoError(t, err)
assert.Equal(t, flags, smtpLogger.Flags)
assert.Equal(t, level, smtpLogger.Level)
assert.Equal(t, level, logger.GetLevel())
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
var envToHost string
var envFrom string
var envTo []string
var envMsg []byte
smtpLogger.sendMailFn = func(addr string, a smtp.Auth, from string, to []string, msg []byte) error {
envToHost = addr
envFrom = from
envTo = to
envMsg = msg
return nil
}
err = logger.LogEvent(&event)
assert.NoError(t, err)
assert.Equal(t, host, envToHost)
assert.Equal(t, username, envFrom)
assert.Equal(t, sendTos, envTo)
assert.Contains(t, string(envMsg), expected)
logger.Flush()
event.level = WARN
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
err = logger.LogEvent(&event)
assert.NoError(t, err)
assert.Equal(t, host, envToHost)
assert.Equal(t, username, envFrom)
assert.Equal(t, sendTos, envTo)
assert.Contains(t, string(envMsg), expected)
logger.Close()
}

View file

@ -32,19 +32,19 @@ func Stack(skip int) string {
} }
// Print equivalent of debug.Stack() // Print equivalent of debug.Stack()
fmt.Fprintf(buf, "%s:%d (0x%x)\n", filename, lineNumber, programCounter) _, _ = fmt.Fprintf(buf, "%s:%d (0x%x)\n", filename, lineNumber, programCounter)
// Now try to print the offending line // Now try to print the offending line
if filename != lastFilename { if filename != lastFilename {
data, err := os.ReadFile(filename) data, err := os.ReadFile(filename)
if err != nil { if err != nil {
// can't read this sourcefile // can't read this source file
// likely we don't have the sourcecode available // likely we don't have the sourcecode available
continue continue
} }
lines = bytes.Split(data, []byte{'\n'}) lines = bytes.Split(data, []byte{'\n'})
lastFilename = filename lastFilename = filename
} }
fmt.Fprintf(buf, "\t%s: %s\n", functionName(programCounter), source(lines, lineNumber)) _, _ = fmt.Fprintf(buf, "\t%s: %s\n", functionName(programCounter), source(lines, lineNumber))
} }
return buf.String() return buf.String()
} }

View file

@ -1,269 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"bytes"
"fmt"
"io"
"regexp"
"strings"
"sync"
)
type byteArrayWriter []byte
func (b *byteArrayWriter) Write(p []byte) (int, error) {
*b = append(*b, p...)
return len(p), nil
}
// WriterLogger represent a basic logger for Gitea
type WriterLogger struct {
out io.WriteCloser
mu sync.Mutex
Level Level `json:"level"`
StacktraceLevel Level `json:"stacktraceLevel"`
Flags int `json:"flags"`
Prefix string `json:"prefix"`
Colorize bool `json:"colorize"`
Expression string `json:"expression"`
regexp *regexp.Regexp
}
// NewWriterLogger creates a new WriterLogger from the provided WriteCloser.
// Optionally the level can be changed at the same time.
func (logger *WriterLogger) NewWriterLogger(out io.WriteCloser, level ...Level) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.out = out
switch logger.Flags {
case 0:
logger.Flags = LstdFlags
case -1:
logger.Flags = 0
}
if len(level) > 0 {
logger.Level = level[0]
}
logger.createExpression()
}
func (logger *WriterLogger) createExpression() {
if len(logger.Expression) > 0 {
var err error
logger.regexp, err = regexp.Compile(logger.Expression)
if err != nil {
logger.regexp = nil
}
}
}
// GetLevel returns the logging level for this logger
func (logger *WriterLogger) GetLevel() Level {
return logger.Level
}
// GetStacktraceLevel returns the stacktrace logging level for this logger
func (logger *WriterLogger) GetStacktraceLevel() Level {
return logger.StacktraceLevel
}
// Copy of cheap integer to fixed-width decimal to ascii from logger.
func itoa(buf *[]byte, i, wid int) {
var logger [20]byte
bp := len(logger) - 1
for i >= 10 || wid > 1 {
wid--
q := i / 10
logger[bp] = byte('0' + i - q*10)
bp--
i = q
}
// i < 10
logger[bp] = byte('0' + i)
*buf = append(*buf, logger[bp:]...)
}
func (logger *WriterLogger) createMsg(buf *[]byte, event *Event) {
*buf = append(*buf, logger.Prefix...)
t := event.time
if logger.Flags&(Ldate|Ltime|Lmicroseconds) != 0 {
if logger.Colorize {
*buf = append(*buf, fgCyanBytes...)
}
if logger.Flags&LUTC != 0 {
t = t.UTC()
}
if logger.Flags&Ldate != 0 {
year, month, day := t.Date()
itoa(buf, year, 4)
*buf = append(*buf, '/')
itoa(buf, int(month), 2)
*buf = append(*buf, '/')
itoa(buf, day, 2)
*buf = append(*buf, ' ')
}
if logger.Flags&(Ltime|Lmicroseconds) != 0 {
hour, min, sec := t.Clock()
itoa(buf, hour, 2)
*buf = append(*buf, ':')
itoa(buf, min, 2)
*buf = append(*buf, ':')
itoa(buf, sec, 2)
if logger.Flags&Lmicroseconds != 0 {
*buf = append(*buf, '.')
itoa(buf, t.Nanosecond()/1e3, 6)
}
*buf = append(*buf, ' ')
}
if logger.Colorize {
*buf = append(*buf, resetBytes...)
}
}
if logger.Flags&(Lshortfile|Llongfile) != 0 {
if logger.Colorize {
*buf = append(*buf, fgGreenBytes...)
}
file := event.filename
if logger.Flags&Lmedfile == Lmedfile {
startIndex := len(file) - 20
if startIndex > 0 {
file = "..." + file[startIndex:]
}
} else if logger.Flags&Lshortfile != 0 {
startIndex := strings.LastIndexByte(file, '/')
if startIndex > 0 && startIndex < len(file) {
file = file[startIndex+1:]
}
}
*buf = append(*buf, file...)
*buf = append(*buf, ':')
itoa(buf, event.line, -1)
if logger.Flags&(Lfuncname|Lshortfuncname) != 0 {
*buf = append(*buf, ':')
} else {
if logger.Colorize {
*buf = append(*buf, resetBytes...)
}
*buf = append(*buf, ' ')
}
}
if logger.Flags&(Lfuncname|Lshortfuncname) != 0 {
if logger.Colorize {
*buf = append(*buf, fgGreenBytes...)
}
funcname := event.caller
if logger.Flags&Lshortfuncname != 0 {
lastIndex := strings.LastIndexByte(funcname, '.')
if lastIndex > 0 && len(funcname) > lastIndex+1 {
funcname = funcname[lastIndex+1:]
}
}
*buf = append(*buf, funcname...)
if logger.Colorize {
*buf = append(*buf, resetBytes...)
}
*buf = append(*buf, ' ')
}
if logger.Flags&(Llevel|Llevelinitial) != 0 {
level := strings.ToUpper(event.level.String())
if logger.Colorize {
*buf = append(*buf, levelToColor[event.level]...)
}
*buf = append(*buf, '[')
if logger.Flags&Llevelinitial != 0 {
*buf = append(*buf, level[0])
} else {
*buf = append(*buf, level...)
}
*buf = append(*buf, ']')
if logger.Colorize {
*buf = append(*buf, resetBytes...)
}
*buf = append(*buf, ' ')
}
msg := []byte(event.msg)
if len(msg) > 0 && msg[len(msg)-1] == '\n' {
msg = msg[:len(msg)-1]
}
pawMode := allowColor
if !logger.Colorize {
pawMode = removeColor
}
baw := byteArrayWriter(*buf)
(&protectedANSIWriter{
w: &baw,
mode: pawMode,
}).Write(msg) //nolint:errcheck
*buf = baw
if event.stacktrace != "" && logger.StacktraceLevel <= event.level {
lines := bytes.Split([]byte(event.stacktrace), []byte("\n"))
if len(lines) > 1 {
for _, line := range lines {
*buf = append(*buf, "\n\t"...)
*buf = append(*buf, line...)
}
}
*buf = append(*buf, '\n')
}
*buf = append(*buf, '\n')
}
// LogEvent logs the event to the internal writer
func (logger *WriterLogger) LogEvent(event *Event) error {
if logger.Level > event.level {
return nil
}
logger.mu.Lock()
defer logger.mu.Unlock()
if !logger.Match(event) {
return nil
}
var buf []byte
logger.createMsg(&buf, event)
_, err := logger.out.Write(buf)
return err
}
// Match checks if the given event matches the logger's regexp expression
func (logger *WriterLogger) Match(event *Event) bool {
if logger.regexp == nil {
return true
}
if logger.regexp.Match([]byte(fmt.Sprintf("%s:%d:%s", event.filename, event.line, event.caller))) {
return true
}
// Match on the non-colored msg - therefore strip out colors
var msg []byte
baw := byteArrayWriter(msg)
(&protectedANSIWriter{
w: &baw,
mode: removeColor,
}).Write([]byte(event.msg)) //nolint:errcheck
msg = baw
return logger.regexp.Match(msg)
}
// Close the base logger
func (logger *WriterLogger) Close() {
logger.mu.Lock()
defer logger.mu.Unlock()
if logger.out != nil {
logger.out.Close()
}
}
// GetName returns empty for these provider loggers
func (logger *WriterLogger) GetName() string {
return ""
}

View file

@ -1,275 +0,0 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package log
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type CallbackWriteCloser struct {
callback func([]byte, bool)
}
func (c CallbackWriteCloser) Write(p []byte) (int, error) {
c.callback(p, false)
return len(p), nil
}
func (c CallbackWriteCloser) Close() error {
c.callback(nil, true)
return nil
}
func TestBaseLogger(t *testing.T) {
var written []byte
var closed bool
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
written = p
closed = close
},
}
prefix := "TestPrefix "
b := WriterLogger{
out: c,
Level: INFO,
Flags: LstdFlags | LUTC,
Prefix: prefix,
}
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
dateString := date.UTC().Format("2006/01/02 15:04:05")
event := Event{
level: INFO,
msg: "TEST MSG",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
assert.Equal(t, INFO, b.GetLevel())
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = DEBUG
expected = ""
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
event.level = TRACE
expected = ""
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
event.level = WARN
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = ERROR
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = CRITICAL
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
b.Close()
assert.True(t, closed)
}
func TestBaseLoggerDated(t *testing.T) {
var written []byte
var closed bool
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
written = p
closed = close
},
}
prefix := ""
b := WriterLogger{
out: c,
Level: WARN,
Flags: Ldate | Ltime | Lmicroseconds | Lshortfile | Llevel,
Prefix: prefix,
}
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 115, location)
dateString := date.Format("2006/01/02 15:04:05.000000")
event := Event{
level: WARN,
msg: "TEST MESSAGE TEST\n",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
assert.Equal(t, WARN, b.GetLevel())
expected := fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = INFO
expected = ""
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = ERROR
expected = fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = DEBUG
expected = ""
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = CRITICAL
expected = fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.level = TRACE
expected = ""
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
b.Close()
assert.True(t, closed)
}
func TestBaseLoggerMultiLineNoFlagsRegexp(t *testing.T) {
var written []byte
var closed bool
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
written = p
closed = close
},
}
prefix := ""
b := WriterLogger{
Level: DEBUG,
StacktraceLevel: ERROR,
Flags: -1,
Prefix: prefix,
Expression: "FILENAME",
}
b.NewWriterLogger(c)
location, _ := time.LoadLocation("EST")
date := time.Date(2019, time.January, 13, 22, 3, 30, 115, location)
event := Event{
level: DEBUG,
msg: "TEST\nMESSAGE\nTEST",
caller: "CALLER",
filename: "FULL/FILENAME",
line: 1,
time: date,
}
assert.Equal(t, DEBUG, b.GetLevel())
expected := "TEST\n\tMESSAGE\n\tTEST\n"
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event.filename = "ELSEWHERE"
b.LogEvent(&event)
assert.Equal(t, "", string(written))
assert.False(t, closed)
written = written[:0]
event.caller = "FILENAME"
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
event = Event{
level: DEBUG,
msg: "TEST\nFILENAME\nTEST",
caller: "CALLER",
filename: "FULL/ELSEWHERE",
line: 1,
time: date,
}
expected = "TEST\n\tFILENAME\n\tTEST\n"
b.LogEvent(&event)
assert.Equal(t, expected, string(written))
assert.False(t, closed)
written = written[:0]
}
func TestBrokenRegexp(t *testing.T) {
var closed bool
c := CallbackWriteCloser{
callback: func(p []byte, close bool) {
closed = close
},
}
b := WriterLogger{
Level: DEBUG,
StacktraceLevel: ERROR,
Flags: -1,
Prefix: prefix,
Expression: "\\",
}
b.NewWriterLogger(c)
assert.Empty(t, b.regexp)
b.Close()
assert.True(t, closed)
}

View file

@ -75,18 +75,18 @@ func SetLogSQL(ctx context.Context, on bool) ResponseExtra {
// LoggerOptions represents the options for the add logger call // LoggerOptions represents the options for the add logger call
type LoggerOptions struct { type LoggerOptions struct {
Group string Logger string
Name string Writer string
Mode string Mode string
Config map[string]interface{} Config map[string]interface{}
} }
// AddLogger adds a logger // AddLogger adds a logger
func AddLogger(ctx context.Context, group, name, mode string, config map[string]interface{}) ResponseExtra { func AddLogger(ctx context.Context, logger, writer, mode string, config map[string]interface{}) ResponseExtra {
reqURL := setting.LocalURL + "api/internal/manager/add-logger" reqURL := setting.LocalURL + "api/internal/manager/add-logger"
req := newInternalRequest(ctx, reqURL, "POST", LoggerOptions{ req := newInternalRequest(ctx, reqURL, "POST", LoggerOptions{
Group: group, Logger: logger,
Name: name, Writer: writer,
Mode: mode, Mode: mode,
Config: config, Config: config,
}) })
@ -94,8 +94,8 @@ func AddLogger(ctx context.Context, group, name, mode string, config map[string]
} }
// RemoveLogger removes a logger // RemoveLogger removes a logger
func RemoveLogger(ctx context.Context, group, name string) ResponseExtra { func RemoveLogger(ctx context.Context, logger, writer string) ResponseExtra {
reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/remove-logger/%s/%s", url.PathEscape(group), url.PathEscape(name)) reqURL := setting.LocalURL + fmt.Sprintf("api/internal/manager/remove-logger/%s/%s", url.PathEscape(logger), url.PathEscape(writer))
req := newInternalRequest(ctx, reqURL, "POST") req := newInternalRequest(ctx, reqURL, "POST")
return requestJSONUserMsg(req, "Removed") return requestJSONUserMsg(req, "Removed")
} }

View file

@ -33,6 +33,58 @@ type ConfigProvider interface {
Save() error Save() error
} }
// ConfigSectionKey only searches the keys in the given section, but it is O(n).
// ini package has a special behavior: with "[sec] a=1" and an empty "[sec.sub]",
// then in "[sec.sub]", Key()/HasKey() can always see "a=1" because it always tries parent sections.
// It returns nil if the key doesn't exist.
func ConfigSectionKey(sec ConfigSection, key string) *ini.Key {
if sec == nil {
return nil
}
for _, k := range sec.Keys() {
if k.Name() == key {
return k
}
}
return nil
}
func ConfigSectionKeyString(sec ConfigSection, key string, def ...string) string {
k := ConfigSectionKey(sec, key)
if k != nil && k.String() != "" {
return k.String()
}
if len(def) > 0 {
return def[0]
}
return ""
}
// ConfigInheritedKey works like ini.Section.Key(), but it always returns a new key instance, it is O(n) because NewKey is O(n)
// and the returned key is safe to be used with "MustXxx", it doesn't change the parent's values.
// Otherwise, ini.Section.Key().MustXxx would pollute the parent section's keys.
// It never returns nil.
func ConfigInheritedKey(sec ConfigSection, key string) *ini.Key {
k := sec.Key(key)
if k != nil && k.String() != "" {
newKey, _ := sec.NewKey(k.Name(), k.String())
return newKey
}
newKey, _ := sec.NewKey(key, "")
return newKey
}
func ConfigInheritedKeyString(sec ConfigSection, key string, def ...string) string {
k := sec.Key(key)
if k != nil && k.String() != "" {
return k.String()
}
if len(def) > 0 {
return def[0]
}
return ""
}
type iniFileConfigProvider struct { type iniFileConfigProvider struct {
opts *Options opts *Options
*ini.File *ini.File

View file

@ -0,0 +1,66 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestConfigProviderBehaviors(t *testing.T) {
t.Run("BuggyKeyOverwritten", func(t *testing.T) {
cfg, _ := NewConfigProviderFromData(`
[foo]
key =
`)
sec := cfg.Section("foo")
secSub := cfg.Section("foo.bar")
secSub.Key("key").MustString("1") // try to read a key from subsection
assert.Equal(t, "1", sec.Key("key").String()) // TODO: BUGGY! the key in [foo] is overwritten
})
t.Run("SubsectionSeeParentKeys", func(t *testing.T) {
cfg, _ := NewConfigProviderFromData(`
[foo]
key = 123
`)
secSub := cfg.Section("foo.bar.xxx")
assert.Equal(t, "123", secSub.Key("key").String())
})
}
func TestConfigProviderHelper(t *testing.T) {
cfg, _ := NewConfigProviderFromData(`
[foo]
empty =
key = 123
`)
sec := cfg.Section("foo")
secSub := cfg.Section("foo.bar")
// test empty key
assert.Equal(t, "def", ConfigSectionKeyString(sec, "empty", "def"))
assert.Equal(t, "xyz", ConfigSectionKeyString(secSub, "empty", "xyz"))
// test non-inherited key, only see the keys in current section
assert.NotNil(t, ConfigSectionKey(sec, "key"))
assert.Nil(t, ConfigSectionKey(secSub, "key"))
// test default behavior
assert.Equal(t, "123", ConfigSectionKeyString(sec, "key"))
assert.Equal(t, "", ConfigSectionKeyString(secSub, "key"))
assert.Equal(t, "def", ConfigSectionKeyString(secSub, "key", "def"))
assert.Equal(t, "123", ConfigInheritedKeyString(secSub, "key"))
// Workaround for ini package's BuggyKeyOverwritten behavior
assert.Equal(t, "", ConfigSectionKeyString(sec, "empty"))
assert.Equal(t, "", ConfigSectionKeyString(secSub, "empty"))
assert.Equal(t, "def", ConfigInheritedKey(secSub, "empty").MustString("def"))
assert.Equal(t, "def", ConfigInheritedKey(secSub, "empty").MustString("xyz"))
assert.Equal(t, "", ConfigSectionKeyString(sec, "empty"))
assert.Equal(t, "def", ConfigSectionKeyString(secSub, "empty"))
}

View file

@ -92,7 +92,7 @@ func loadDBSetting(rootCfg ConfigProvider) {
Database.MaxOpenConns = sec.Key("MAX_OPEN_CONNS").MustInt(0) Database.MaxOpenConns = sec.Key("MAX_OPEN_CONNS").MustInt(0)
Database.IterateBufferSize = sec.Key("ITERATE_BUFFER_SIZE").MustInt(50) Database.IterateBufferSize = sec.Key("ITERATE_BUFFER_SIZE").MustInt(50)
Database.LogSQL = sec.Key("LOG_SQL").MustBool(true) Database.LogSQL = sec.Key("LOG_SQL").MustBool(false)
Database.DBConnectRetries = sec.Key("DB_RETRIES").MustInt(10) Database.DBConnectRetries = sec.Key("DB_RETRIES").MustInt(10)
Database.DBConnectBackoff = sec.Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second) Database.DBConnectBackoff = sec.Key("DB_RETRY_BACKOFF").MustDuration(3 * time.Second)
Database.AutoMigration = sec.Key("AUTO_MIGRATION").MustBool(true) Database.AutoMigration = sec.Key("AUTO_MIGRATION").MustBool(true)

View file

@ -10,384 +10,251 @@ import (
"path" "path"
"path/filepath" "path/filepath"
"strings" "strings"
"sync"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/modules/util"
) )
var ( type LogGlobalConfig struct {
filenameSuffix = ""
descriptionLock = sync.RWMutex{}
logDescriptions = make(map[string]*LogDescription)
)
// Log settings
var Log struct {
Level log.Level
StacktraceLogLevel string
RootPath string RootPath string
Mode string
Level log.Level
StacktraceLogLevel log.Level
BufferLen int
EnableSSHLog bool EnableSSHLog bool
EnableXORMLog bool
DisableRouterLog bool
EnableAccessLog bool
AccessLogTemplate string AccessLogTemplate string
BufferLength int64
RequestIDHeaders []string RequestIDHeaders []string
} }
// GetLogDescriptions returns a race safe set of descriptions var Log LogGlobalConfig
func GetLogDescriptions() map[string]*LogDescription {
descriptionLock.RLock()
defer descriptionLock.RUnlock()
descs := make(map[string]*LogDescription, len(logDescriptions))
for k, v := range logDescriptions {
subLogDescriptions := make([]SubLogDescription, len(v.SubLogDescriptions))
copy(subLogDescriptions, v.SubLogDescriptions)
descs[k] = &LogDescription{ const accessLogTemplateDefault = `{{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"`
Name: v.Name,
SubLogDescriptions: subLogDescriptions,
}
}
return descs
}
// AddLogDescription adds a set of descriptions to the complete description func loadLogGlobalFrom(rootCfg ConfigProvider) {
func AddLogDescription(key string, description *LogDescription) {
descriptionLock.Lock()
defer descriptionLock.Unlock()
logDescriptions[key] = description
}
// AddSubLogDescription adds a sub log description
func AddSubLogDescription(key string, subLogDescription SubLogDescription) bool {
descriptionLock.Lock()
defer descriptionLock.Unlock()
desc, ok := logDescriptions[key]
if !ok {
return false
}
for i, sub := range desc.SubLogDescriptions {
if sub.Name == subLogDescription.Name {
desc.SubLogDescriptions[i] = subLogDescription
return true
}
}
desc.SubLogDescriptions = append(desc.SubLogDescriptions, subLogDescription)
return true
}
// RemoveSubLogDescription removes a sub log description
func RemoveSubLogDescription(key, name string) bool {
descriptionLock.Lock()
defer descriptionLock.Unlock()
desc, ok := logDescriptions[key]
if !ok {
return false
}
for i, sub := range desc.SubLogDescriptions {
if sub.Name == name {
desc.SubLogDescriptions = append(desc.SubLogDescriptions[:i], desc.SubLogDescriptions[i+1:]...)
return true
}
}
return false
}
type defaultLogOptions struct {
levelName string // LogLevel
flags string
filename string // path.Join(LogRootPath, "gitea.log")
bufferLength int64
disableConsole bool
}
func newDefaultLogOptions() defaultLogOptions {
return defaultLogOptions{
levelName: Log.Level.String(),
flags: "stdflags",
filename: filepath.Join(Log.RootPath, "gitea.log"),
bufferLength: 10000,
disableConsole: false,
}
}
// SubLogDescription describes a sublogger
type SubLogDescription struct {
Name string
Provider string
Config string
}
// LogDescription describes a named logger
type LogDescription struct {
Name string
SubLogDescriptions []SubLogDescription
}
func getLogLevel(section ConfigSection, key string, defaultValue log.Level) log.Level {
value := section.Key(key).MustString(defaultValue.String())
return log.FromString(value)
}
func getStacktraceLogLevel(section ConfigSection, key, defaultValue string) string {
value := section.Key(key).MustString(defaultValue)
return log.FromString(value).String()
}
func loadLogFrom(rootCfg ConfigProvider) {
sec := rootCfg.Section("log") sec := rootCfg.Section("log")
Log.Level = getLogLevel(sec, "LEVEL", log.INFO)
Log.StacktraceLogLevel = getStacktraceLogLevel(sec, "STACKTRACE_LEVEL", "None") Log.Level = log.LevelFromString(sec.Key("LEVEL").MustString(log.INFO.String()))
Log.StacktraceLogLevel = log.LevelFromString(sec.Key("STACKTRACE_LEVEL").MustString(log.NONE.String()))
Log.BufferLen = sec.Key("BUFFER_LEN").MustInt(10000)
Log.Mode = sec.Key("MODE").MustString("console")
Log.RootPath = sec.Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log")) Log.RootPath = sec.Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
forcePathSeparator(Log.RootPath) if !filepath.IsAbs(Log.RootPath) {
Log.BufferLength = sec.Key("BUFFER_LEN").MustInt64(10000) Log.RootPath = filepath.Join(AppWorkPath, Log.RootPath)
}
Log.RootPath = util.FilePathJoinAbs(Log.RootPath)
Log.EnableSSHLog = sec.Key("ENABLE_SSH_LOG").MustBool(false) Log.EnableSSHLog = sec.Key("ENABLE_SSH_LOG").MustBool(false)
Log.EnableAccessLog = sec.Key("ENABLE_ACCESS_LOG").MustBool(false)
Log.AccessLogTemplate = sec.Key("ACCESS_LOG_TEMPLATE").MustString( Log.AccessLogTemplate = sec.Key("ACCESS_LOG_TEMPLATE").MustString(accessLogTemplateDefault)
`{{.Ctx.RemoteHost}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.URL.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}" "{{.Ctx.Req.UserAgent}}"`,
)
Log.RequestIDHeaders = sec.Key("REQUEST_ID_HEADERS").Strings(",") Log.RequestIDHeaders = sec.Key("REQUEST_ID_HEADERS").Strings(",")
// the `MustString` updates the default value, and `log.ACCESS` is used by `generateNamedLogger("access")` later
_ = rootCfg.Section("log").Key("ACCESS").MustString("file")
sec.Key("ROUTER").MustString("console")
// Allow [log] DISABLE_ROUTER_LOG to override [server] DISABLE_ROUTER_LOG
Log.DisableRouterLog = sec.Key("DISABLE_ROUTER_LOG").MustBool(Log.DisableRouterLog)
Log.EnableXORMLog = rootCfg.Section("log").Key("ENABLE_XORM_LOG").MustBool(true)
} }
func generateLogConfig(sec ConfigSection, name string, defaults defaultLogOptions) (mode, jsonConfig, levelName string) { func prepareLoggerConfig(rootCfg ConfigProvider) {
level := getLogLevel(sec, "LEVEL", Log.Level)
levelName = level.String()
stacktraceLevelName := getStacktraceLogLevel(sec, "STACKTRACE_LEVEL", Log.StacktraceLogLevel)
stacktraceLevel := log.FromString(stacktraceLevelName)
mode = name
keys := sec.Keys()
logPath := defaults.filename
flags := log.FlagsFromString(defaults.flags)
expression := ""
prefix := ""
for _, key := range keys {
switch key.Name() {
case "MODE":
mode = key.MustString(name)
case "FILE_NAME":
logPath = key.MustString(defaults.filename)
forcePathSeparator(logPath)
if !filepath.IsAbs(logPath) {
logPath = path.Join(Log.RootPath, logPath)
}
case "FLAGS":
flags = log.FlagsFromString(key.MustString(defaults.flags))
case "EXPRESSION":
expression = key.MustString("")
case "PREFIX":
prefix = key.MustString("")
}
}
logConfig := map[string]interface{}{
"level": level.String(),
"expression": expression,
"prefix": prefix,
"flags": flags,
"stacktraceLevel": stacktraceLevel.String(),
}
// Generate log configuration.
switch mode {
case "console":
useStderr := sec.Key("STDERR").MustBool(false)
logConfig["stderr"] = useStderr
if useStderr {
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(log.CanColorStderr)
} else {
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(log.CanColorStdout)
}
case "file":
if err := os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
panic(err.Error())
}
logConfig["filename"] = logPath + filenameSuffix
logConfig["rotate"] = sec.Key("LOG_ROTATE").MustBool(true)
logConfig["maxsize"] = 1 << uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28))
logConfig["daily"] = sec.Key("DAILY_ROTATE").MustBool(true)
logConfig["maxdays"] = sec.Key("MAX_DAYS").MustInt(7)
logConfig["compress"] = sec.Key("COMPRESS").MustBool(true)
logConfig["compressionLevel"] = sec.Key("COMPRESSION_LEVEL").MustInt(-1)
case "conn":
logConfig["reconnectOnMsg"] = sec.Key("RECONNECT_ON_MSG").MustBool()
logConfig["reconnect"] = sec.Key("RECONNECT").MustBool()
logConfig["net"] = sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"})
logConfig["addr"] = sec.Key("ADDR").MustString(":7020")
case "smtp":
logConfig["username"] = sec.Key("USER").MustString("example@example.com")
logConfig["password"] = sec.Key("PASSWD").MustString("******")
logConfig["host"] = sec.Key("HOST").MustString("127.0.0.1:25")
sendTos := strings.Split(sec.Key("RECEIVERS").MustString(""), ",")
for i, address := range sendTos {
sendTos[i] = strings.TrimSpace(address)
}
logConfig["sendTos"] = sendTos
logConfig["subject"] = sec.Key("SUBJECT").MustString("Diagnostic message from Gitea")
}
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(false)
byteConfig, err := json.Marshal(logConfig)
if err != nil {
log.Error("Failed to marshal log configuration: %v %v", logConfig, err)
return
}
jsonConfig = string(byteConfig)
return mode, jsonConfig, levelName
}
func generateNamedLogger(rootCfg ConfigProvider, key string, options defaultLogOptions) *LogDescription {
description := LogDescription{
Name: key,
}
sections := strings.Split(rootCfg.Section("log").Key(strings.ToUpper(key)).MustString(""), ",")
for i := 0; i < len(sections); i++ {
sections[i] = strings.TrimSpace(sections[i])
}
for _, name := range sections {
if len(name) == 0 || (name == "console" && options.disableConsole) {
continue
}
sec, err := rootCfg.GetSection("log." + name + "." + key)
if err != nil {
sec, _ = rootCfg.NewSection("log." + name + "." + key)
}
provider, config, levelName := generateLogConfig(sec, name, options)
if err := log.NewNamedLogger(key, options.bufferLength, name, provider, config); err != nil {
// Maybe panic here?
log.Error("Could not create new named logger: %v", err.Error())
}
description.SubLogDescriptions = append(description.SubLogDescriptions, SubLogDescription{
Name: name,
Provider: provider,
Config: config,
})
log.Info("%s Log: %s(%s:%s)", util.ToTitleCase(key), util.ToTitleCase(name), provider, levelName)
}
AddLogDescription(key, &description)
return &description
}
// initLogFrom initializes logging with settings from configuration provider
func initLogFrom(rootCfg ConfigProvider) {
sec := rootCfg.Section("log") sec := rootCfg.Section("log")
options := newDefaultLogOptions()
options.bufferLength = Log.BufferLength
description := LogDescription{ if !sec.HasKey("logger.default.MODE") {
Name: log.DEFAULT, sec.Key("logger.default.MODE").MustString(",")
} }
sections := strings.Split(sec.Key("MODE").MustString("console"), ",") deprecatedSetting(rootCfg, "log", "ACCESS", "log", "logger.access.MODE", "1.21")
deprecatedSetting(rootCfg, "log", "ENABLE_ACCESS_LOG", "log", "logger.access.MODE", "1.21")
useConsole := false if val := sec.Key("ACCESS").String(); val != "" {
for _, name := range sections { sec.Key("logger.access.MODE").MustString(val)
name = strings.TrimSpace(name)
if name == "" {
continue
} }
if name == "console" { if sec.HasKey("ENABLE_ACCESS_LOG") && !sec.Key("ENABLE_ACCESS_LOG").MustBool() {
useConsole = true sec.Key("logger.access.MODE").SetValue("")
} }
sec, err := rootCfg.GetSection("log." + name + ".default") deprecatedSetting(rootCfg, "log", "ROUTER", "log", "logger.router.MODE", "1.21")
if err != nil { deprecatedSetting(rootCfg, "log", "DISABLE_ROUTER_LOG", "log", "logger.router.MODE", "1.21")
sec, err = rootCfg.GetSection("log." + name) if val := sec.Key("ROUTER").String(); val != "" {
if err != nil { sec.Key("logger.router.MODE").MustString(val)
sec, _ = rootCfg.NewSection("log." + name)
} }
if !sec.HasKey("logger.router.MODE") {
sec.Key("logger.router.MODE").MustString(",") // use default logger
}
if sec.HasKey("DISABLE_ROUTER_LOG") && sec.Key("DISABLE_ROUTER_LOG").MustBool() {
sec.Key("logger.router.MODE").SetValue("")
} }
provider, config, levelName := generateLogConfig(sec, name, options) deprecatedSetting(rootCfg, "log", "XORM", "log", "logger.xorm.MODE", "1.21")
log.NewLogger(options.bufferLength, name, provider, config) deprecatedSetting(rootCfg, "log", "ENABLE_XORM_LOG", "log", "logger.xorm.MODE", "1.21")
description.SubLogDescriptions = append(description.SubLogDescriptions, SubLogDescription{ if val := sec.Key("XORM").String(); val != "" {
Name: name, sec.Key("logger.xorm.MODE").MustString(val)
Provider: provider,
Config: config,
})
log.Info("Gitea Log Mode: %s(%s:%s)", util.ToTitleCase(name), util.ToTitleCase(provider), levelName)
} }
if !sec.HasKey("logger.xorm.MODE") {
AddLogDescription(log.DEFAULT, &description) sec.Key("logger.xorm.MODE").MustString(",") // use default logger
if !useConsole {
log.Info("According to the configuration, subsequent logs will not be printed to the console")
if err := log.DelLogger("console"); err != nil {
log.Fatal("Cannot delete console logger: %v", err)
} }
if sec.HasKey("ENABLE_XORM_LOG") && !sec.Key("ENABLE_XORM_LOG").MustBool() {
sec.Key("logger.xorm.MODE").SetValue("")
} }
// Finally redirect the default golog to here
golog.SetFlags(0)
golog.SetPrefix("")
golog.SetOutput(log.NewLoggerAsWriter("INFO", log.GetLogger(log.DEFAULT)))
} }
func LogPrepareFilenameForWriter(fileName, defaultFileName string) string {
if fileName == "" {
fileName = defaultFileName
}
if !filepath.IsAbs(fileName) {
fileName = filepath.Join(Log.RootPath, fileName)
} else {
fileName = filepath.Clean(fileName)
}
if err := os.MkdirAll(filepath.Dir(fileName), os.ModePerm); err != nil {
panic(fmt.Sprintf("unable to create directory for log %q: %v", fileName, err.Error()))
}
return fileName
}
func loadLogModeByName(rootCfg ConfigProvider, loggerName, modeName string) (writerName, writerType string, writerMode log.WriterMode, err error) {
sec := rootCfg.Section("log." + modeName)
writerMode = log.WriterMode{}
writerType = ConfigSectionKeyString(sec, "MODE")
if writerType == "" {
writerType = modeName
}
writerName = modeName
defaultFlags := "stdflags"
defaultFilaName := "gitea.log"
if loggerName == "access" {
// "access" logger is special, by default it doesn't have output flags, so it also needs a new writer name to avoid conflicting with other writers.
// so "access" logger's writer name is usually "file.access" or "console.access"
writerName += ".access"
defaultFlags = "none"
defaultFilaName = "access.log"
}
writerMode.Level = log.LevelFromString(ConfigInheritedKeyString(sec, "LEVEL", Log.Level.String()))
writerMode.StacktraceLevel = log.LevelFromString(ConfigInheritedKeyString(sec, "STACKTRACE_LEVEL", Log.StacktraceLogLevel.String()))
writerMode.Prefix = ConfigInheritedKeyString(sec, "PREFIX")
writerMode.Expression = ConfigInheritedKeyString(sec, "EXPRESSION")
writerMode.Flags = log.FlagsFromString(ConfigInheritedKeyString(sec, "FLAGS", defaultFlags))
switch writerType {
case "console":
useStderr := ConfigInheritedKey(sec, "STDERR").MustBool(false)
defaultCanColor := log.CanColorStdout
if useStderr {
defaultCanColor = log.CanColorStderr
}
writerOption := log.WriterConsoleOption{Stderr: useStderr}
writerMode.Colorize = ConfigInheritedKey(sec, "COLORIZE").MustBool(defaultCanColor)
writerMode.WriterOption = writerOption
case "file":
fileName := LogPrepareFilenameForWriter(ConfigInheritedKey(sec, "FILE_NAME").String(), defaultFilaName)
writerOption := log.WriterFileOption{}
writerOption.FileName = fileName + filenameSuffix // FIXME: the suffix doesn't seem right, see its related comments
writerOption.LogRotate = ConfigInheritedKey(sec, "LOG_ROTATE").MustBool(true)
writerOption.MaxSize = 1 << uint(ConfigInheritedKey(sec, "MAX_SIZE_SHIFT").MustInt(28))
writerOption.DailyRotate = ConfigInheritedKey(sec, "DAILY_ROTATE").MustBool(true)
writerOption.MaxDays = ConfigInheritedKey(sec, "MAX_DAYS").MustInt(7)
writerOption.Compress = ConfigInheritedKey(sec, "COMPRESS").MustBool(true)
writerOption.CompressionLevel = ConfigInheritedKey(sec, "COMPRESSION_LEVEL").MustInt(-1)
writerMode.WriterOption = writerOption
case "conn":
writerOption := log.WriterConnOption{}
writerOption.ReconnectOnMsg = ConfigInheritedKey(sec, "RECONNECT_ON_MSG").MustBool()
writerOption.Reconnect = ConfigInheritedKey(sec, "RECONNECT").MustBool()
writerOption.Protocol = ConfigInheritedKey(sec, "PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"})
writerOption.Addr = ConfigInheritedKey(sec, "ADDR").MustString(":7020")
writerMode.WriterOption = writerOption
default:
if !log.HasEventWriter(writerType) {
return "", "", writerMode, fmt.Errorf("invalid log writer type (mode): %s", writerType)
}
}
return writerName, writerType, writerMode, nil
}
var filenameSuffix = ""
// RestartLogsWithPIDSuffix restarts the logs with a PID suffix on files // RestartLogsWithPIDSuffix restarts the logs with a PID suffix on files
// FIXME: it seems not right, it breaks log rotating or log collectors
func RestartLogsWithPIDSuffix() { func RestartLogsWithPIDSuffix() {
filenameSuffix = fmt.Sprintf(".%d", os.Getpid()) filenameSuffix = fmt.Sprintf(".%d", os.Getpid())
InitLogs(false) initAllLoggers() // when forking, before restarting, rename logger file and re-init all loggers
} }
// InitLogs creates all the log services func InitLoggersForTest() {
func InitLogs(disableConsole bool) { initAllLoggers()
initLogFrom(CfgProvider) }
if !Log.DisableRouterLog { // initAllLoggers creates all the log services
options := newDefaultLogOptions() func initAllLoggers() {
options.filename = filepath.Join(Log.RootPath, "router.log") initManagedLoggers(log.GetManager(), CfgProvider)
options.flags = "date,time" // For the router we don't want any prefixed flags
options.bufferLength = Log.BufferLength golog.SetFlags(0)
generateNamedLogger(CfgProvider, "router", options) golog.SetPrefix("")
golog.SetOutput(log.LoggerToWriter(log.GetLogger(log.DEFAULT).Info))
}
func initManagedLoggers(manager *log.LoggerManager, cfg ConfigProvider) {
loadLogGlobalFrom(cfg)
prepareLoggerConfig(cfg)
initLoggerByName(manager, cfg, log.DEFAULT) // default
initLoggerByName(manager, cfg, "access")
initLoggerByName(manager, cfg, "router")
initLoggerByName(manager, cfg, "xorm")
}
func initLoggerByName(manager *log.LoggerManager, rootCfg ConfigProvider, loggerName string) {
sec := rootCfg.Section("log")
keyPrefix := "logger." + loggerName
disabled := sec.HasKey(keyPrefix+".MODE") && sec.Key(keyPrefix+".MODE").String() == ""
if disabled {
return
} }
if Log.EnableAccessLog { modeVal := sec.Key(keyPrefix + ".MODE").String()
options := newDefaultLogOptions() if modeVal == "," {
options.filename = filepath.Join(Log.RootPath, "access.log") modeVal = Log.Mode
options.flags = "" // For the router we don't want any prefixed flags
options.bufferLength = Log.BufferLength
generateNamedLogger(CfgProvider, "access", options)
} }
initSQLLogFrom(CfgProvider, disableConsole) var eventWriters []log.EventWriter
} modes := strings.Split(modeVal, ",")
for _, modeName := range modes {
// InitSQLLog initializes xorm logger setting modeName = strings.TrimSpace(modeName)
func InitSQLLog(disableConsole bool) { if modeName == "" {
initSQLLogFrom(CfgProvider, disableConsole) continue
}
func initSQLLogFrom(rootCfg ConfigProvider, disableConsole bool) {
if Log.EnableXORMLog {
options := newDefaultLogOptions()
options.filename = filepath.Join(Log.RootPath, "xorm.log")
options.bufferLength = Log.BufferLength
options.disableConsole = disableConsole
rootCfg.Section("log").Key("XORM").MustString(",")
generateNamedLogger(rootCfg, "xorm", options)
} }
writerName, writerType, writerMode, err := loadLogModeByName(rootCfg, loggerName, modeName)
if err != nil {
log.FallbackErrorf("Failed to load writer mode %q for logger %s: %v", modeName, loggerName, err)
continue
}
if writerMode.BufferLen == 0 {
writerMode.BufferLen = Log.BufferLen
}
eventWriter := manager.GetSharedWriter(writerName)
if eventWriter == nil {
eventWriter, err = manager.NewSharedWriter(writerName, writerType, writerMode)
if err != nil {
log.FallbackErrorf("Failed to create event writer for logger %s: %v", loggerName, err)
continue
}
}
eventWriters = append(eventWriters, eventWriter)
}
manager.GetLogger(loggerName).RemoveAllWriters().AddWriters(eventWriters...)
}
func InitSQLLoggersForCli(level log.Level) {
log.SetConsoleLogger("xorm", "console", level)
}
func IsAccessLogEnabled() bool {
return log.IsLoggerEnabled("access")
}
func IsRouteLogEnabled() bool {
return log.IsLoggerEnabled("router")
} }

387
modules/setting/log_test.go Normal file
View file

@ -0,0 +1,387 @@
// Copyright 2019 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package setting
import (
"path/filepath"
"strings"
"testing"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func initLoggersByConfig(t *testing.T, config string) (*log.LoggerManager, func()) {
oldLogConfig := Log
Log = LogGlobalConfig{}
defer func() {
Log = oldLogConfig
}()
cfg, err := NewConfigProviderFromData(config)
assert.NoError(t, err)
manager := log.NewManager()
initManagedLoggers(manager, cfg)
return manager, manager.Close
}
func toJSON(v interface{}) string {
b, _ := json.MarshalIndent(v, "", "\t")
return string(b)
}
func TestLogConfigDefault(t *testing.T) {
manager, managerClose := initLoggersByConfig(t, ``)
defer managerClose()
writerDump := `
{
"console": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "info",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Stderr": false
},
"WriterType": "console"
}
}
`
dump := manager.GetLogger(log.DEFAULT).DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
dump = manager.GetLogger("access").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
dump = manager.GetLogger("router").DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
dump = manager.GetLogger("xorm").DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
}
func TestLogConfigDisable(t *testing.T) {
manager, managerClose := initLoggersByConfig(t, `
[log]
logger.router.MODE =
logger.xorm.MODE =
`)
defer managerClose()
writerDump := `
{
"console": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "info",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Stderr": false
},
"WriterType": "console"
}
}
`
dump := manager.GetLogger(log.DEFAULT).DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
dump = manager.GetLogger("access").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
dump = manager.GetLogger("router").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
dump = manager.GetLogger("xorm").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
}
func TestLogConfigLegacyDefault(t *testing.T) {
manager, managerClose := initLoggersByConfig(t, `
[log]
MODE = console
`)
defer managerClose()
writerDump := `
{
"console": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "info",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Stderr": false
},
"WriterType": "console"
}
}
`
dump := manager.GetLogger(log.DEFAULT).DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
dump = manager.GetLogger("access").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
dump = manager.GetLogger("router").DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
dump = manager.GetLogger("xorm").DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
}
func TestLogConfigLegacyMode(t *testing.T) {
tempDir := t.TempDir()
tempPath := func(file string) string {
return filepath.Join(tempDir, file)
}
manager, managerClose := initLoggersByConfig(t, `
[log]
ROOT_PATH = `+tempDir+`
MODE = file
ROUTER = file
ACCESS = file
`)
defer managerClose()
writerDump := `
{
"file": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "info",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Compress": true,
"CompressionLevel": -1,
"DailyRotate": true,
"FileName": "$FILENAME",
"LogRotate": true,
"MaxDays": 7,
"MaxSize": 268435456
},
"WriterType": "file"
}
}
`
writerDumpAccess := `
{
"file.access": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "none",
"Level": "info",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Compress": true,
"CompressionLevel": -1,
"DailyRotate": true,
"FileName": "$FILENAME",
"LogRotate": true,
"MaxDays": 7,
"MaxSize": 268435456
},
"WriterType": "file"
}
}
`
dump := manager.GetLogger(log.DEFAULT).DumpWriters()
require.JSONEq(t, strings.ReplaceAll(writerDump, "$FILENAME", tempPath("gitea.log")), toJSON(dump))
dump = manager.GetLogger("access").DumpWriters()
require.JSONEq(t, strings.ReplaceAll(writerDumpAccess, "$FILENAME", tempPath("access.log")), toJSON(dump))
dump = manager.GetLogger("router").DumpWriters()
require.JSONEq(t, strings.ReplaceAll(writerDump, "$FILENAME", tempPath("gitea.log")), toJSON(dump))
}
func TestLogConfigLegacyModeDisable(t *testing.T) {
manager, managerClose := initLoggersByConfig(t, `
[log]
ROUTER = file
ACCESS = file
DISABLE_ROUTER_LOG = true
ENABLE_ACCESS_LOG = false
`)
defer managerClose()
dump := manager.GetLogger("access").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
dump = manager.GetLogger("router").DumpWriters()
require.JSONEq(t, "{}", toJSON(dump))
}
func TestLogConfigNewConfig(t *testing.T) {
manager, managerClose := initLoggersByConfig(t, `
[log]
logger.access.MODE = console
logger.xorm.MODE = console, console-1
[log.console]
LEVEL = warn
[log.console-1]
MODE = console
LEVEL = error
STDERR = true
`)
defer managerClose()
writerDump := `
{
"console": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "warn",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Stderr": false
},
"WriterType": "console"
},
"console-1": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "error",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Stderr": true
},
"WriterType": "console"
}
}
`
writerDumpAccess := `
{
"console.access": {
"BufferLen": 10000,
"Colorize": false,
"Expression": "",
"Flags": "none",
"Level": "warn",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Stderr": false
},
"WriterType": "console"
}
}
`
dump := manager.GetLogger("xorm").DumpWriters()
require.JSONEq(t, writerDump, toJSON(dump))
dump = manager.GetLogger("access").DumpWriters()
require.JSONEq(t, writerDumpAccess, toJSON(dump))
}
func TestLogConfigModeFile(t *testing.T) {
tempDir := t.TempDir()
tempPath := func(file string) string {
return filepath.Join(tempDir, file)
}
manager, managerClose := initLoggersByConfig(t, `
[log]
ROOT_PATH = `+tempDir+`
BUFFER_LEN = 10
MODE = file, file1
[log.file1]
MODE = file
LEVEL = error
STACKTRACE_LEVEL = fatal
EXPRESSION = filter
FLAGS = medfile
PREFIX = "[Prefix] "
FILE_NAME = file-xxx.log
LOG_ROTATE = false
MAX_SIZE_SHIFT = 1
DAILY_ROTATE = false
MAX_DAYS = 90
COMPRESS = false
COMPRESSION_LEVEL = 4
`)
defer managerClose()
writerDump := `
{
"file": {
"BufferLen": 10,
"Colorize": false,
"Expression": "",
"Flags": "stdflags",
"Level": "info",
"Prefix": "",
"StacktraceLevel": "none",
"WriterOption": {
"Compress": true,
"CompressionLevel": -1,
"DailyRotate": true,
"FileName": "$FILENAME-0",
"LogRotate": true,
"MaxDays": 7,
"MaxSize": 268435456
},
"WriterType": "file"
},
"file1": {
"BufferLen": 10,
"Colorize": false,
"Expression": "filter",
"Flags": "medfile",
"Level": "error",
"Prefix": "[Prefix] ",
"StacktraceLevel": "fatal",
"WriterOption": {
"Compress": false,
"CompressionLevel": 4,
"DailyRotate": false,
"FileName": "$FILENAME-1",
"LogRotate": false,
"MaxDays": 90,
"MaxSize": 2
},
"WriterType": "file"
}
}
`
dump := manager.GetLogger(log.DEFAULT).DumpWriters()
expected := writerDump
expected = strings.ReplaceAll(expected, "$FILENAME-0", tempPath("gitea.log"))
expected = strings.ReplaceAll(expected, "$FILENAME-1", tempPath("file-xxx.log"))
require.JSONEq(t, expected, toJSON(dump))
}

View file

@ -278,7 +278,6 @@ func loadRepositoryFrom(rootCfg ConfigProvider) {
Repository.MaxCreationLimit = sec.Key("MAX_CREATION_LIMIT").MustInt(-1) Repository.MaxCreationLimit = sec.Key("MAX_CREATION_LIMIT").MustInt(-1)
Repository.DefaultBranch = sec.Key("DEFAULT_BRANCH").MustString(Repository.DefaultBranch) Repository.DefaultBranch = sec.Key("DEFAULT_BRANCH").MustString(Repository.DefaultBranch)
RepoRootPath = sec.Key("ROOT").MustString(path.Join(AppDataPath, "gitea-repositories")) RepoRootPath = sec.Key("ROOT").MustString(path.Join(AppDataPath, "gitea-repositories"))
forcePathSeparator(RepoRootPath)
if !filepath.IsAbs(RepoRootPath) { if !filepath.IsAbs(RepoRootPath) {
RepoRootPath = filepath.Join(AppWorkPath, RepoRootPath) RepoRootPath = filepath.Join(AppWorkPath, RepoRootPath)
} else { } else {

View file

@ -317,7 +317,6 @@ func loadServerFrom(rootCfg ConfigProvider) {
PortToRedirect = sec.Key("PORT_TO_REDIRECT").MustString("80") PortToRedirect = sec.Key("PORT_TO_REDIRECT").MustString("80")
RedirectorUseProxyProtocol = sec.Key("REDIRECTOR_USE_PROXY_PROTOCOL").MustBool(UseProxyProtocol) RedirectorUseProxyProtocol = sec.Key("REDIRECTOR_USE_PROXY_PROTOCOL").MustBool(UseProxyProtocol)
OfflineMode = sec.Key("OFFLINE_MODE").MustBool() OfflineMode = sec.Key("OFFLINE_MODE").MustBool()
Log.DisableRouterLog = sec.Key("DISABLE_ROUTER_LOG").MustBool()
if len(StaticRootPath) == 0 { if len(StaticRootPath) == 0 {
StaticRootPath = AppWorkPath StaticRootPath = AppWorkPath
} }

View file

@ -115,7 +115,7 @@ func init() {
// We can rely on log.CanColorStdout being set properly because modules/log/console_windows.go comes before modules/setting/setting.go lexicographically // We can rely on log.CanColorStdout being set properly because modules/log/console_windows.go comes before modules/setting/setting.go lexicographically
// By default set this logger at Info - we'll change it later, but we need to start with something. // By default set this logger at Info - we'll change it later, but we need to start with something.
log.NewLogger(0, "console", "console", fmt.Sprintf(`{"level": "info", "colorize": %t, "stacktraceLevel": "none"}`, log.CanColorStdout)) log.SetConsoleLogger(log.DEFAULT, "console", log.INFO)
var err error var err error
if AppPath, err = getAppPath(); err != nil { if AppPath, err = getAppPath(); err != nil {
@ -124,12 +124,6 @@ func init() {
AppWorkPath = getWorkPath(AppPath) AppWorkPath = getWorkPath(AppPath)
} }
func forcePathSeparator(path string) {
if strings.Contains(path, "\\") {
log.Fatal("Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
}
}
// IsRunUserMatchCurrentUser returns false if configured run user does not match // IsRunUserMatchCurrentUser returns false if configured run user does not match
// actual user that runs the app. The first return value is the actual user name. // actual user that runs the app. The first return value is the actual user name.
// This check is ignored under Windows since SSH remote login is not the main // This check is ignored under Windows since SSH remote login is not the main
@ -218,9 +212,9 @@ func Init(opts *Options) {
// loadCommonSettingsFrom loads common configurations from a configuration provider. // loadCommonSettingsFrom loads common configurations from a configuration provider.
func loadCommonSettingsFrom(cfg ConfigProvider) { func loadCommonSettingsFrom(cfg ConfigProvider) {
// WARNNING: don't change the sequence except you know what you are doing. // WARNING: don't change the sequence except you know what you are doing.
loadRunModeFrom(cfg) loadRunModeFrom(cfg)
loadLogFrom(cfg) loadLogGlobalFrom(cfg)
loadServerFrom(cfg) loadServerFrom(cfg)
loadSSHFrom(cfg) loadSSHFrom(cfg)
@ -282,10 +276,11 @@ func mustCurrentRunUserMatch(rootCfg ConfigProvider) {
// LoadSettings initializes the settings for normal start up // LoadSettings initializes the settings for normal start up
func LoadSettings() { func LoadSettings() {
initAllLoggers()
loadDBSetting(CfgProvider) loadDBSetting(CfgProvider)
loadServiceFrom(CfgProvider) loadServiceFrom(CfgProvider)
loadOAuth2ClientFrom(CfgProvider) loadOAuth2ClientFrom(CfgProvider)
InitLogs(false)
loadCacheFrom(CfgProvider) loadCacheFrom(CfgProvider)
loadSessionFrom(CfgProvider) loadSessionFrom(CfgProvider)
loadCorsFrom(CfgProvider) loadCorsFrom(CfgProvider)

View file

@ -223,9 +223,7 @@ func publicKeyHandler(ctx ssh.Context, key ssh.PublicKey) bool {
// validate the cert for this principal // validate the cert for this principal
if err := c.CheckCert(principal, cert); err != nil { if err := c.CheckCert(principal, cert); err != nil {
// User is presenting an invalid certificate - STOP any further processing // User is presenting an invalid certificate - STOP any further processing
if log.IsError() {
log.Error("Invalid Certificate KeyID %s with Signature Fingerprint %s presented for Principal: %s from %s", cert.KeyId, gossh.FingerprintSHA256(cert.SignatureKey), principal, ctx.RemoteAddr()) log.Error("Invalid Certificate KeyID %s with Signature Fingerprint %s presented for Principal: %s from %s", cert.KeyId, gossh.FingerprintSHA256(cert.SignatureKey), principal, ctx.RemoteAddr())
}
log.Warn("Failed authentication attempt from %s", ctx.RemoteAddr()) log.Warn("Failed authentication attempt from %s", ctx.RemoteAddr())
return false return false
@ -239,10 +237,8 @@ func publicKeyHandler(ctx ssh.Context, key ssh.PublicKey) bool {
return true return true
} }
if log.IsWarn() {
log.Warn("From %s Fingerprint: %s is a certificate, but no valid principals found", ctx.RemoteAddr(), gossh.FingerprintSHA256(key)) log.Warn("From %s Fingerprint: %s is a certificate, but no valid principals found", ctx.RemoteAddr(), gossh.FingerprintSHA256(key))
log.Warn("Failed authentication attempt from %s", ctx.RemoteAddr()) log.Warn("Failed authentication attempt from %s", ctx.RemoteAddr())
}
return false return false
} }
@ -253,10 +249,8 @@ func publicKeyHandler(ctx ssh.Context, key ssh.PublicKey) bool {
pkey, err := asymkey_model.SearchPublicKeyByContent(ctx, strings.TrimSpace(string(gossh.MarshalAuthorizedKey(key)))) pkey, err := asymkey_model.SearchPublicKeyByContent(ctx, strings.TrimSpace(string(gossh.MarshalAuthorizedKey(key))))
if err != nil { if err != nil {
if asymkey_model.IsErrKeyNotExist(err) { if asymkey_model.IsErrKeyNotExist(err) {
if log.IsWarn() {
log.Warn("Unknown public key: %s from %s", gossh.FingerprintSHA256(key), ctx.RemoteAddr()) log.Warn("Unknown public key: %s from %s", gossh.FingerprintSHA256(key), ctx.RemoteAddr())
log.Warn("Failed authentication attempt from %s", ctx.RemoteAddr()) log.Warn("Failed authentication attempt from %s", ctx.RemoteAddr())
}
return false return false
} }
log.Error("SearchPublicKeyByContent: %v", err) log.Error("SearchPublicKeyByContent: %v", err)

View file

@ -126,7 +126,7 @@ func wrapFatal(msg string) {
if msg == "" { if msg == "" {
return return
} }
log.FatalWithSkip(1, "Unable to compile templates, %s", msg) log.Fatal("Unable to compile templates, %s", msg)
} }
type templateErrorPrettier struct { type templateErrorPrettier struct {

View file

@ -4,7 +4,8 @@
package test package test
import ( import (
"strconv" "context"
"fmt"
"strings" "strings"
"sync" "sync"
"sync/atomic" "sync/atomic"
@ -14,9 +15,7 @@ import (
) )
type LogChecker struct { type LogChecker struct {
logger *log.MultiChannelledLogger *log.EventWriterBaseImpl
loggerName string
eventLoggerName string
filterMessages []string filterMessages []string
filtered []bool filtered []bool
@ -27,54 +26,44 @@ type LogChecker struct {
mu sync.Mutex mu sync.Mutex
} }
func (lc *LogChecker) LogEvent(event *log.Event) error { func (lc *LogChecker) Run(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case event, ok := <-lc.Queue:
if !ok {
return
}
lc.checkLogEvent(event)
}
}
}
func (lc *LogChecker) checkLogEvent(event *log.EventFormatted) {
lc.mu.Lock() lc.mu.Lock()
defer lc.mu.Unlock() defer lc.mu.Unlock()
for i, msg := range lc.filterMessages { for i, msg := range lc.filterMessages {
if strings.Contains(event.GetMsg(), msg) { if strings.Contains(event.Origin.MsgSimpleText, msg) {
lc.filtered[i] = true lc.filtered[i] = true
} }
} }
if strings.Contains(event.GetMsg(), lc.stopMark) { if strings.Contains(event.Origin.MsgSimpleText, lc.stopMark) {
lc.stopped = true lc.stopped = true
} }
return nil
}
func (lc *LogChecker) Close() {}
func (lc *LogChecker) Flush() {}
func (lc *LogChecker) GetLevel() log.Level {
return log.TRACE
}
func (lc *LogChecker) GetStacktraceLevel() log.Level {
return log.NONE
}
func (lc *LogChecker) GetName() string {
return lc.eventLoggerName
}
func (lc *LogChecker) ReleaseReopen() error {
return nil
} }
var checkerIndex int64 var checkerIndex int64
func NewLogChecker(loggerName string) (logChecker *LogChecker, cancel func()) { func NewLogChecker(namePrefix string) (logChecker *LogChecker, cancel func()) {
logger := log.GetLogger(loggerName) logger := log.GetManager().GetLogger(namePrefix)
newCheckerIndex := atomic.AddInt64(&checkerIndex, 1) newCheckerIndex := atomic.AddInt64(&checkerIndex, 1)
lc := &LogChecker{ writerName := namePrefix + "-" + fmt.Sprint(newCheckerIndex)
logger: logger,
loggerName: loggerName, lc := &LogChecker{}
eventLoggerName: "TestLogChecker-" + strconv.FormatInt(newCheckerIndex, 10), lc.EventWriterBaseImpl = log.NewEventWriterBase(writerName, "test-log-checker", log.WriterMode{})
} logger.AddWriters(lc)
if err := logger.AddLogger(lc); err != nil { return lc, func() { _ = logger.RemoveWriter(writerName) }
panic(err) // it's impossible
}
return lc, func() { _, _ = logger.DelLogger(lc.GetName()) }
} }
// Filter will make the `Check` function to check if these logs are outputted. // Filter will make the `Check` function to check if these logs are outputted.

View file

@ -13,8 +13,6 @@ import (
) )
func TestLogChecker(t *testing.T) { func TestLogChecker(t *testing.T) {
_ = log.NewLogger(1000, "console", "console", `{"level":"info","stacktracelevel":"NONE","stderr":true}`)
lc, cleanup := NewLogChecker(log.DEFAULT) lc, cleanup := NewLogChecker(log.DEFAULT)
defer cleanup() defer cleanup()

View file

@ -13,7 +13,6 @@ import (
"testing" "testing"
"time" "time"
"code.gitea.io/gitea/modules/json"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/queue"
) )
@ -24,19 +23,14 @@ var (
SlowFlush = 5 * time.Second SlowFlush = 5 * time.Second
) )
// TestLogger is a logger which will write to the testing log
type TestLogger struct {
log.WriterLogger
}
var WriterCloser = &testLoggerWriterCloser{} var WriterCloser = &testLoggerWriterCloser{}
type testLoggerWriterCloser struct { type testLoggerWriterCloser struct {
sync.RWMutex sync.RWMutex
t []*testing.TB t []testing.TB
} }
func (w *testLoggerWriterCloser) pushT(t *testing.TB) { func (w *testLoggerWriterCloser) pushT(t testing.TB) {
w.Lock() w.Lock()
w.t = append(w.t, t) w.t = append(w.t, t)
w.Unlock() w.Unlock()
@ -48,7 +42,7 @@ func (w *testLoggerWriterCloser) Write(p []byte) (int, error) {
w.RLock() w.RLock()
defer w.RUnlock() defer w.RUnlock()
var t *testing.TB var t testing.TB
if len(w.t) > 0 { if len(w.t) > 0 {
t = w.t[len(w.t)-1] t = w.t[len(w.t)-1]
} }
@ -57,33 +51,13 @@ func (w *testLoggerWriterCloser) Write(p []byte) (int, error) {
p = p[:len(p)-1] p = p[:len(p)-1]
} }
if t == nil || *t == nil { if t == nil {
// if there is no running test, the log message should be outputted to console, to avoid losing important information. // if there is no running test, the log message should be outputted to console, to avoid losing important information.
// the "???" prefix is used to match the "===" and "+++" in PrintCurrentTest // the "???" prefix is used to match the "===" and "+++" in PrintCurrentTest
return fmt.Fprintf(os.Stdout, "??? [TestLogger] %s\n", p) return fmt.Fprintf(os.Stdout, "??? [TestLogger] %s\n", p)
} }
defer func() { t.Log(string(p))
err := recover()
if err == nil {
return
}
var errString string
errErr, ok := err.(error)
if ok {
errString = errErr.Error()
} else {
errString, ok = err.(string)
}
if !ok {
panic(err)
}
if !strings.HasPrefix(errString, "Log in goroutine after ") {
panic(err)
}
}()
(*t).Log(string(p))
return len(p), nil return len(p), nil
} }
@ -106,8 +80,8 @@ func (w *testLoggerWriterCloser) Reset() {
if t == nil { if t == nil {
continue continue
} }
fmt.Fprintf(os.Stdout, "Unclosed logger writer in test: %s", (*t).Name()) _, _ = fmt.Fprintf(os.Stdout, "Unclosed logger writer in test: %s", t.Name())
(*t).Errorf("Unclosed logger writer in test: %s", (*t).Name()) t.Errorf("Unclosed logger writer in test: %s", t.Name())
} }
w.t = nil w.t = nil
} }
@ -124,25 +98,25 @@ func PrintCurrentTest(t testing.TB, skip ...int) func() {
_, filename, line, _ := runtime.Caller(actualSkip) _, filename, line, _ := runtime.Caller(actualSkip)
if log.CanColorStdout { if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line) _, _ = fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line)
} else { } else {
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line) _, _ = fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line)
} }
WriterCloser.pushT(&t) WriterCloser.pushT(t)
return func() { return func() {
took := time.Since(start) took := time.Since(start)
if took > SlowTest { if took > SlowTest {
if log.CanColorStdout { if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgYellow)), fmt.Formatter(log.NewColoredValue(took, log.Bold, log.FgYellow))) _, _ = fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgYellow)), fmt.Formatter(log.NewColoredValue(took, log.Bold, log.FgYellow)))
} else { } else {
fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", t.Name(), took) _, _ = fmt.Fprintf(os.Stdout, "+++ %s is a slow test (took %v)\n", t.Name(), took)
} }
} }
timer := time.AfterFunc(SlowFlush, func() { timer := time.AfterFunc(SlowFlush, func() {
if log.CanColorStdout { if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), SlowFlush) _, _ = fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), SlowFlush)
} else { } else {
fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", t.Name(), SlowFlush) _, _ = fmt.Fprintf(os.Stdout, "+++ %s ... still flushing after %v ...\n", t.Name(), SlowFlush)
} }
}) })
if err := queue.GetManager().FlushAll(context.Background(), time.Minute); err != nil { if err := queue.GetManager().FlushAll(context.Background(), time.Minute); err != nil {
@ -152,9 +126,9 @@ func PrintCurrentTest(t testing.TB, skip ...int) func() {
flushTook := time.Since(start) - took flushTook := time.Since(start) - took
if flushTook > SlowFlush { if flushTook > SlowFlush {
if log.CanColorStdout { if log.CanColorStdout {
fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), fmt.Formatter(log.NewColoredValue(flushTook, log.Bold, log.FgRed))) _, _ = fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", fmt.Formatter(log.NewColoredValue(t.Name(), log.Bold, log.FgRed)), fmt.Formatter(log.NewColoredValue(flushTook, log.Bold, log.FgRed)))
} else { } else {
fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", t.Name(), flushTook) _, _ = fmt.Fprintf(os.Stdout, "+++ %s had a slow clean-up flush (took %v)\n", t.Name(), flushTook)
} }
} }
WriterCloser.popT() WriterCloser.popT()
@ -168,40 +142,20 @@ func Printf(format string, args ...interface{}) {
args[i] = log.NewColoredValue(args[i]) args[i] = log.NewColoredValue(args[i])
} }
} }
fmt.Fprintf(os.Stdout, "\t"+format, args...) _, _ = fmt.Fprintf(os.Stdout, "\t"+format, args...)
} }
// NewTestLogger creates a TestLogger as a log.LoggerProvider // TestLogEventWriter is a logger which will write to the testing log
func NewTestLogger() log.LoggerProvider { type TestLogEventWriter struct {
logger := &TestLogger{} *log.EventWriterBaseImpl
logger.Colorize = log.CanColorStdout
logger.Level = log.TRACE
return logger
} }
// Init inits connection writer with json config. // NewTestLoggerWriter creates a TestLogEventWriter as a log.LoggerProvider
// json config only need key "level". func NewTestLoggerWriter(name string, mode log.WriterMode) log.EventWriter {
func (log *TestLogger) Init(config string) error { w := &TestLogEventWriter{}
err := json.Unmarshal([]byte(config), log) w.EventWriterBaseImpl = log.NewEventWriterBase(name, "test-log-writer", mode)
if err != nil { w.OutputWriteCloser = WriterCloser
return err return w
}
log.NewWriterLogger(WriterCloser)
return nil
}
// Flush when log should be flushed
func (log *TestLogger) Flush() {
}
// ReleaseReopen does nothing
func (log *TestLogger) ReleaseReopen() error {
return nil
}
// GetName returns the default name for this implementation
func (log *TestLogger) GetName() string {
return "test"
} }
func init() { func init() {

View file

@ -0,0 +1,246 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package rotatingfilewriter
import (
"bufio"
"compress/gzip"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"code.gitea.io/gitea/modules/graceful/releasereopen"
"code.gitea.io/gitea/modules/util"
)
type Options struct {
Rotate bool
MaximumSize int64
RotateDaily bool
KeepDays int
Compress bool
CompressionLevel int
}
type RotatingFileWriter struct {
mu sync.Mutex
fd *os.File
currentSize int64
openDate int
options Options
cancelReleaseReopen func()
}
var ErrorPrintf func(format string, args ...interface{})
// errorf tries to print error messages. Since this writer could be used by a logger system, this is the last chance to show the error in some cases
func errorf(format string, args ...interface{}) {
if ErrorPrintf != nil {
ErrorPrintf("rotatingfilewriter: "+format+"\n", args...)
}
}
// Open creates a new rotating file writer.
// Notice: if a file is opened by two rotators, there will be conflicts when rotating.
// In the future, there should be "rotating file manager"
func Open(filename string, options *Options) (*RotatingFileWriter, error) {
if options == nil {
options = &Options{}
}
rfw := &RotatingFileWriter{
options: *options,
}
if err := rfw.open(filename); err != nil {
return nil, err
}
rfw.cancelReleaseReopen = releasereopen.GetManager().Register(rfw)
return rfw, nil
}
func (rfw *RotatingFileWriter) Write(b []byte) (int, error) {
if rfw.options.Rotate && ((rfw.options.MaximumSize > 0 && rfw.currentSize >= rfw.options.MaximumSize) || (rfw.options.RotateDaily && time.Now().Day() != rfw.openDate)) {
if err := rfw.DoRotate(); err != nil {
// if this writer is used by a logger system, it's the logger system's responsibility to handle/show the error
return 0, err
}
}
n, err := rfw.fd.Write(b)
if err == nil {
rfw.currentSize += int64(n)
}
return n, err
}
func (rfw *RotatingFileWriter) Flush() error {
return rfw.fd.Sync()
}
func (rfw *RotatingFileWriter) Close() error {
rfw.mu.Lock()
if rfw.cancelReleaseReopen != nil {
rfw.cancelReleaseReopen()
rfw.cancelReleaseReopen = nil
}
rfw.mu.Unlock()
return rfw.fd.Close()
}
func (rfw *RotatingFileWriter) open(filename string) error {
fd, err := os.OpenFile(filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o660)
if err != nil {
return err
}
rfw.fd = fd
finfo, err := fd.Stat()
if err != nil {
return err
}
rfw.currentSize = finfo.Size()
rfw.openDate = finfo.ModTime().Day()
return nil
}
func (rfw *RotatingFileWriter) ReleaseReopen() error {
return errors.Join(
rfw.fd.Close(),
rfw.open(rfw.fd.Name()),
)
}
// DoRotate the log file creating a backup like xx.2013-01-01.2
func (rfw *RotatingFileWriter) DoRotate() error {
if !rfw.options.Rotate {
return nil
}
rfw.mu.Lock()
defer rfw.mu.Unlock()
prefix := fmt.Sprintf("%s.%s.", rfw.fd.Name(), time.Now().Format("2006-01-02"))
var err error
fname := ""
for i := 1; err == nil && i <= 999; i++ {
fname = prefix + fmt.Sprintf("%03d", i)
_, err = os.Lstat(fname)
if rfw.options.Compress && err != nil {
_, err = os.Lstat(fname + ".gz")
}
}
// return error if the last file checked still existed
if err == nil {
return fmt.Errorf("cannot find free file to rename %s", rfw.fd.Name())
}
fd := rfw.fd
if err := fd.Close(); err != nil { // close file before rename
return err
}
if err := util.Rename(fd.Name(), fname); err != nil {
return err
}
if rfw.options.Compress {
go func() {
err := compressOldFile(fname, rfw.options.CompressionLevel)
if err != nil {
errorf("DoRotate: %v", err)
}
}()
}
if err := rfw.open(fd.Name()); err != nil {
return err
}
go deleteOldFiles(
filepath.Dir(fd.Name()),
filepath.Base(fd.Name()),
time.Now().AddDate(0, 0, -rfw.options.KeepDays),
)
return nil
}
func compressOldFile(fname string, compressionLevel int) error {
reader, err := os.Open(fname)
if err != nil {
return fmt.Errorf("compressOldFile: failed to open existing file %s: %w", fname, err)
}
defer reader.Close()
buffer := bufio.NewReader(reader)
fnameGz := fname + ".gz"
fw, err := os.OpenFile(fnameGz, os.O_WRONLY|os.O_CREATE, 0o660)
if err != nil {
return fmt.Errorf("compressOldFile: failed to open new file %s: %w", fnameGz, err)
}
defer fw.Close()
zw, err := gzip.NewWriterLevel(fw, compressionLevel)
if err != nil {
return fmt.Errorf("compressOldFile: failed to create gzip writer: %w", err)
}
defer zw.Close()
_, err = buffer.WriteTo(zw)
if err != nil {
_ = zw.Close()
_ = fw.Close()
_ = util.Remove(fname + ".gz")
return fmt.Errorf("compressOldFile: failed to write to gz file: %w", err)
}
_ = reader.Close()
err = util.Remove(fname)
if err != nil {
return fmt.Errorf("compressOldFile: failed to delete old file: %w", err)
}
return nil
}
func deleteOldFiles(dir, prefix string, removeBefore time.Time) {
err := filepath.WalkDir(dir, func(path string, d os.DirEntry, err error) (returnErr error) {
defer func() {
if r := recover(); r != nil {
returnErr = fmt.Errorf("unable to delete old file '%s', error: %+v", path, r)
}
}()
if err != nil {
return err
}
if d.IsDir() {
return nil
}
info, err := d.Info()
if err != nil {
return err
}
if info.ModTime().Before(removeBefore) {
if strings.HasPrefix(filepath.Base(path), prefix) {
return util.Remove(path)
}
}
return nil
})
if err != nil {
errorf("deleteOldFiles: failed to delete old file: %v", err)
}
}

View file

@ -0,0 +1,48 @@
// Copyright 2023 The Gitea Authors. All rights reserved.
// SPDX-License-Identifier: MIT
package rotatingfilewriter
import (
"compress/gzip"
"io"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCompressOldFile(t *testing.T) {
tmpDir := t.TempDir()
fname := filepath.Join(tmpDir, "test")
nonGzip := filepath.Join(tmpDir, "test-nonGzip")
f, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY, 0o660)
assert.NoError(t, err)
ng, err := os.OpenFile(nonGzip, os.O_CREATE|os.O_WRONLY, 0o660)
assert.NoError(t, err)
for i := 0; i < 999; i++ {
f.WriteString("This is a test file\n")
ng.WriteString("This is a test file\n")
}
f.Close()
ng.Close()
err = compressOldFile(fname, gzip.DefaultCompression)
assert.NoError(t, err)
_, err = os.Lstat(fname + ".gz")
assert.NoError(t, err)
f, err = os.Open(fname + ".gz")
assert.NoError(t, err)
zr, err := gzip.NewReader(f)
assert.NoError(t, err)
data, err := io.ReadAll(zr)
assert.NoError(t, err)
original, err := os.ReadFile(nonGzip)
assert.NoError(t, err)
assert.Equal(t, original, data)
}

View file

@ -5,6 +5,7 @@ package routing
import ( import (
"net/http" "net/http"
"strings"
"time" "time"
"code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/context"
@ -25,18 +26,18 @@ func NewLoggerHandler() func(next http.Handler) http.Handler {
} }
var ( var (
startMessage = log.NewColoredValueBytes("started ", log.DEBUG.Color()) startMessage = log.NewColoredValue("started ", log.DEBUG.ColorAttributes()...)
slowMessage = log.NewColoredValueBytes("slow ", log.WARN.Color()) slowMessage = log.NewColoredValue("slow ", log.WARN.ColorAttributes()...)
pollingMessage = log.NewColoredValueBytes("polling ", log.INFO.Color()) pollingMessage = log.NewColoredValue("polling ", log.INFO.ColorAttributes()...)
failedMessage = log.NewColoredValueBytes("failed ", log.WARN.Color()) failedMessage = log.NewColoredValue("failed ", log.WARN.ColorAttributes()...)
completedMessage = log.NewColoredValueBytes("completed", log.INFO.Color()) completedMessage = log.NewColoredValue("completed", log.INFO.ColorAttributes()...)
unknownHandlerMessage = log.NewColoredValueBytes("completed", log.ERROR.Color()) unknownHandlerMessage = log.NewColoredValue("completed", log.ERROR.ColorAttributes()...)
) )
func logPrinter(logger log.Logger) func(trigger Event, record *requestRecord) { func logPrinter(logger log.Logger) func(trigger Event, record *requestRecord) {
return func(trigger Event, record *requestRecord) { return func(trigger Event, record *requestRecord) {
if trigger == StartEvent { if trigger == StartEvent {
if !logger.IsTrace() { if !logger.LevelEnabled(log.TRACE) {
// for performance, if the "started" message shouldn't be logged, we just return as early as possible // for performance, if the "started" message shouldn't be logged, we just return as early as possible
// developers can set the router log level to TRACE to get the "started" request messages. // developers can set the router log level to TRACE to get the "started" request messages.
return return
@ -59,12 +60,12 @@ func logPrinter(logger log.Logger) func(trigger Event, record *requestRecord) {
if trigger == StillExecutingEvent { if trigger == StillExecutingEvent {
message := slowMessage message := slowMessage
level := log.WARN logf := logger.Warn
if isLongPolling { if isLongPolling {
level = log.INFO logf = logger.Info
message = pollingMessage message = pollingMessage
} }
_ = logger.Log(0, level, "router: %s %v %s for %s, elapsed %v @ %s", logf("router: %s %v %s for %s, elapsed %v @ %s",
message, message,
log.ColoredMethod(req.Method), req.RequestURI, req.RemoteAddr, log.ColoredMethod(req.Method), req.RequestURI, req.RemoteAddr,
log.ColoredTime(time.Since(record.startTime)), log.ColoredTime(time.Since(record.startTime)),
@ -74,7 +75,7 @@ func logPrinter(logger log.Logger) func(trigger Event, record *requestRecord) {
} }
if panicErr != nil { if panicErr != nil {
_ = logger.Log(0, log.WARN, "router: %s %v %s for %s, panic in %v @ %s, err=%v", logger.Warn("router: %s %v %s for %s, panic in %v @ %s, err=%v",
failedMessage, failedMessage,
log.ColoredMethod(req.Method), req.RequestURI, req.RemoteAddr, log.ColoredMethod(req.Method), req.RequestURI, req.RemoteAddr,
log.ColoredTime(time.Since(record.startTime)), log.ColoredTime(time.Since(record.startTime)),
@ -88,14 +89,17 @@ func logPrinter(logger log.Logger) func(trigger Event, record *requestRecord) {
if v, ok := record.responseWriter.(context.ResponseWriter); ok { if v, ok := record.responseWriter.(context.ResponseWriter); ok {
status = v.Status() status = v.Status()
} }
level := log.INFO logf := log.Info
if strings.HasPrefix(req.RequestURI, "/assets/") {
logf = log.Trace
}
message := completedMessage message := completedMessage
if isUnknownHandler { if isUnknownHandler {
level = log.ERROR logf = log.Error
message = unknownHandlerMessage message = unknownHandlerMessage
} }
_ = logger.Log(0, level, "router: %s %v %s for %s, %v %v in %v @ %s", logf("router: %s %v %s for %s, %v %v in %v @ %s",
message, message,
log.ColoredMethod(req.Method), req.RequestURI, req.RemoteAddr, log.ColoredMethod(req.Method), req.RequestURI, req.RemoteAddr,
log.ColoredStatus(status), log.ColoredStatus(status, http.StatusText(status)), log.ColoredTime(time.Since(record.startTime)), log.ColoredStatus(status), log.ColoredStatus(status, http.StatusText(status)), log.ColoredTime(time.Since(record.startTime)),

View file

@ -3036,15 +3036,10 @@ config.git_pull_timeout = Pull Operation Timeout
config.git_gc_timeout = GC Operation Timeout config.git_gc_timeout = GC Operation Timeout
config.log_config = Log Configuration config.log_config = Log Configuration
config.log_mode = Log Mode config.logger_name_fmt = Logger: %s
config.own_named_logger = Named Logger
config.routes_to_default_logger = Routes To Default Logger
config.go_log = Uses Go Log (redirected to default)
config.router_log_mode = Router Log Mode
config.disabled_logger = Disabled config.disabled_logger = Disabled
config.access_log_mode = Access Log Mode config.access_log_mode = Access Log Mode
config.access_log_template = Template config.access_log_template = Access Log Template
config.xorm_log_mode = XORM Log Mode
config.xorm_log_sql = Log SQL config.xorm_log_sql = Log SQL
config.get_setting_failed = Get setting %s failed config.get_setting_failed = Get setting %s failed

View file

@ -60,11 +60,11 @@ func ProtocolMiddlewares() (handlers []any) {
handlers = append(handlers, proxy.ForwardedHeaders(opt)) handlers = append(handlers, proxy.ForwardedHeaders(opt))
} }
if !setting.Log.DisableRouterLog { if setting.IsRouteLogEnabled() {
handlers = append(handlers, routing.NewLoggerHandler()) handlers = append(handlers, routing.NewLoggerHandler())
} }
if setting.Log.EnableAccessLog { if setting.IsAccessLogEnabled() {
handlers = append(handlers, context.AccessLogger()) handlers = append(handlers, context.AccessLogger())
} }

View file

@ -73,7 +73,7 @@ func Routes() *web.Route {
r.Post("/manager/release-and-reopen-logging", ReleaseReopenLogging) r.Post("/manager/release-and-reopen-logging", ReleaseReopenLogging)
r.Post("/manager/set-log-sql", SetLogSQL) r.Post("/manager/set-log-sql", SetLogSQL)
r.Post("/manager/add-logger", bind(private.LoggerOptions{}), AddLogger) r.Post("/manager/add-logger", bind(private.LoggerOptions{}), AddLogger)
r.Post("/manager/remove-logger/{group}/{name}", RemoveLogger) r.Post("/manager/remove-logger/{logger}/{writer}", RemoveLogger)
r.Get("/manager/processes", Processes) r.Get("/manager/processes", Processes)
r.Post("/mail/send", SendEmail) r.Post("/mail/send", SendEmail)
r.Post("/restore_repo", RestoreRepo) r.Post("/restore_repo", RestoreRepo)

View file

@ -10,7 +10,7 @@ import (
"code.gitea.io/gitea/models/db" "code.gitea.io/gitea/models/db"
"code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/graceful"
"code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/graceful/releasereopen"
"code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/private" "code.gitea.io/gitea/modules/private"
"code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/queue"
@ -46,19 +46,19 @@ func FlushQueues(ctx *context.PrivateContext) {
// PauseLogging pauses logging // PauseLogging pauses logging
func PauseLogging(ctx *context.PrivateContext) { func PauseLogging(ctx *context.PrivateContext) {
log.Pause() log.GetManager().PauseAll()
ctx.PlainText(http.StatusOK, "success") ctx.PlainText(http.StatusOK, "success")
} }
// ResumeLogging resumes logging // ResumeLogging resumes logging
func ResumeLogging(ctx *context.PrivateContext) { func ResumeLogging(ctx *context.PrivateContext) {
log.Resume() log.GetManager().ResumeAll()
ctx.PlainText(http.StatusOK, "success") ctx.PlainText(http.StatusOK, "success")
} }
// ReleaseReopenLogging releases and reopens logging files // ReleaseReopenLogging releases and reopens logging files
func ReleaseReopenLogging(ctx *context.PrivateContext) { func ReleaseReopenLogging(ctx *context.PrivateContext) {
if err := log.ReleaseReopen(); err != nil { if err := releasereopen.GetManager().ReleaseReopen(); err != nil {
ctx.JSON(http.StatusInternalServerError, private.Response{ ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Error during release and reopen: %v", err), Err: fmt.Sprintf("Error during release and reopen: %v", err),
}) })
@ -75,90 +75,108 @@ func SetLogSQL(ctx *context.PrivateContext) {
// RemoveLogger removes a logger // RemoveLogger removes a logger
func RemoveLogger(ctx *context.PrivateContext) { func RemoveLogger(ctx *context.PrivateContext) {
group := ctx.Params("group") logger := ctx.Params("logger")
name := ctx.Params("name") writer := ctx.Params("writer")
ok, err := log.GetLogger(group).DelLogger(name) err := log.GetManager().GetLogger(logger).RemoveWriter(writer)
if err != nil { if err != nil {
ctx.JSON(http.StatusInternalServerError, private.Response{ ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Failed to remove logger: %s %s %v", group, name, err), Err: fmt.Sprintf("Failed to remove log writer: %s %s %v", logger, writer, err),
}) })
return return
} }
if ok { ctx.PlainText(http.StatusOK, fmt.Sprintf("Removed %s %s", logger, writer))
setting.RemoveSubLogDescription(group, name)
}
ctx.PlainText(http.StatusOK, fmt.Sprintf("Removed %s %s", group, name))
} }
// AddLogger adds a logger // AddLogger adds a logger
func AddLogger(ctx *context.PrivateContext) { func AddLogger(ctx *context.PrivateContext) {
opts := web.GetForm(ctx).(*private.LoggerOptions) opts := web.GetForm(ctx).(*private.LoggerOptions)
if len(opts.Group) == 0 {
opts.Group = log.DEFAULT if len(opts.Logger) == 0 {
opts.Logger = log.DEFAULT
} }
if _, ok := opts.Config["flags"]; !ok {
switch opts.Group { writerMode := log.WriterMode{}
writerType := opts.Mode
var flags string
var ok bool
if flags, ok = opts.Config["flags"].(string); !ok {
switch opts.Logger {
case "access": case "access":
opts.Config["flags"] = log.FlagsFromString("") flags = ""
case "router": case "router":
opts.Config["flags"] = log.FlagsFromString("date,time") flags = "date,time"
default: default:
opts.Config["flags"] = log.FlagsFromString("stdflags") flags = "stdflags"
} }
} }
writerMode.Flags = log.FlagsFromString(flags)
if _, ok := opts.Config["colorize"]; !ok && opts.Mode == "console" { if writerMode.Colorize, ok = opts.Config["colorize"].(bool); !ok && opts.Mode == "console" {
if _, ok := opts.Config["stderr"]; ok { if _, ok := opts.Config["stderr"]; ok {
opts.Config["colorize"] = log.CanColorStderr writerMode.Colorize = log.CanColorStderr
} else { } else {
opts.Config["colorize"] = log.CanColorStdout writerMode.Colorize = log.CanColorStdout
} }
} }
if _, ok := opts.Config["level"]; !ok { writerMode.Level = setting.Log.Level
opts.Config["level"] = setting.Log.Level if level, ok := opts.Config["level"].(string); ok {
writerMode.Level = log.LevelFromString(level)
} }
if _, ok := opts.Config["stacktraceLevel"]; !ok { writerMode.StacktraceLevel = setting.Log.StacktraceLogLevel
opts.Config["stacktraceLevel"] = setting.Log.StacktraceLogLevel if stacktraceLevel, ok := opts.Config["level"].(string); ok {
writerMode.StacktraceLevel = log.LevelFromString(stacktraceLevel)
} }
if opts.Mode == "file" { writerMode.Prefix, _ = opts.Config["prefix"].(string)
if _, ok := opts.Config["maxsize"]; !ok { writerMode.Expression, _ = opts.Config["expression"].(string)
opts.Config["maxsize"] = 1 << 28
}
if _, ok := opts.Config["maxdays"]; !ok {
opts.Config["maxdays"] = 7
}
if _, ok := opts.Config["compressionLevel"]; !ok {
opts.Config["compressionLevel"] = -1
}
}
bufferLen := setting.Log.BufferLength switch writerType {
byteConfig, err := json.Marshal(opts.Config) case "console":
writerOption := log.WriterConsoleOption{}
writerOption.Stderr, _ = opts.Config["stderr"].(bool)
writerMode.WriterOption = writerOption
case "file":
writerOption := log.WriterFileOption{}
fileName, _ := opts.Config["filename"].(string)
writerOption.FileName = setting.LogPrepareFilenameForWriter(fileName, opts.Writer+".log")
writerOption.LogRotate = opts.Config["rotate"].(bool)
maxSizeShift, _ := opts.Config["maxsize"].(int)
if maxSizeShift == 0 {
maxSizeShift = 28
}
writerOption.MaxSize = 1 << maxSizeShift
writerOption.DailyRotate, _ = opts.Config["daily"].(bool)
writerOption.MaxDays, _ = opts.Config["maxdays"].(int)
if writerOption.MaxDays == 0 {
writerOption.MaxDays = 7
}
writerOption.Compress, _ = opts.Config["compress"].(bool)
writerOption.CompressionLevel, _ = opts.Config["compressionLevel"].(int)
if writerOption.CompressionLevel == 0 {
writerOption.CompressionLevel = -1
}
writerMode.WriterOption = writerOption
case "conn":
writerOption := log.WriterConnOption{}
writerOption.ReconnectOnMsg, _ = opts.Config["reconnectOnMsg"].(bool)
writerOption.Reconnect, _ = opts.Config["reconnect"].(bool)
writerOption.Protocol, _ = opts.Config["net"].(string)
writerOption.Addr, _ = opts.Config["address"].(string)
writerMode.WriterOption = writerOption
default:
panic(fmt.Sprintf("invalid log writer mode: %s", writerType))
}
writer, err := log.NewEventWriter(opts.Writer, writerType, writerMode)
if err != nil { if err != nil {
log.Error("Failed to marshal log configuration: %v %v", opts.Config, err) log.Error("Failed to create new log writer: %v", err)
ctx.JSON(http.StatusInternalServerError, private.Response{ ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Failed to marshal log configuration: %v %v", opts.Config, err), Err: fmt.Sprintf("Failed to create new log writer: %v", err),
}) })
return return
} }
config := string(byteConfig) log.GetManager().GetLogger(opts.Logger).AddWriters(writer)
if err := log.NewNamedLogger(opts.Group, bufferLen, opts.Name, opts.Mode, config); err != nil {
log.Error("Failed to create new named logger: %s %v", config, err)
ctx.JSON(http.StatusInternalServerError, private.Response{
Err: fmt.Sprintf("Failed to create new named logger: %s %v", config, err),
})
return
}
setting.AddSubLogDescription(opts.Group, setting.SubLogDescription{
Name: opts.Name,
Provider: opts.Mode,
Config: config,
})
ctx.PlainText(http.StatusOK, "success") ctx.PlainText(http.StatusOK, "success")
} }

View file

@ -117,7 +117,6 @@ func Config(ctx *context.Context) {
ctx.Data["AppBuiltWith"] = setting.AppBuiltWith ctx.Data["AppBuiltWith"] = setting.AppBuiltWith
ctx.Data["Domain"] = setting.Domain ctx.Data["Domain"] = setting.Domain
ctx.Data["OfflineMode"] = setting.OfflineMode ctx.Data["OfflineMode"] = setting.OfflineMode
ctx.Data["DisableRouterLog"] = setting.Log.DisableRouterLog
ctx.Data["RunUser"] = setting.RunUser ctx.Data["RunUser"] = setting.RunUser
ctx.Data["RunMode"] = util.ToTitleCase(setting.RunMode) ctx.Data["RunMode"] = util.ToTitleCase(setting.RunMode)
ctx.Data["GitVersion"] = git.VersionInfo() ctx.Data["GitVersion"] = git.VersionInfo()
@ -182,13 +181,11 @@ func Config(ctx *context.Context) {
} }
ctx.Data["EnvVars"] = envVars ctx.Data["EnvVars"] = envVars
ctx.Data["Loggers"] = setting.GetLogDescriptions()
ctx.Data["EnableAccessLog"] = setting.Log.EnableAccessLog
ctx.Data["AccessLogTemplate"] = setting.Log.AccessLogTemplate ctx.Data["AccessLogTemplate"] = setting.Log.AccessLogTemplate
ctx.Data["DisableRouterLog"] = setting.Log.DisableRouterLog
ctx.Data["EnableXORMLog"] = setting.Log.EnableXORMLog
ctx.Data["LogSQL"] = setting.Database.LogSQL ctx.Data["LogSQL"] = setting.Database.LogSQL
ctx.Data["Loggers"] = log.GetManager().DumpLoggers()
ctx.HTML(http.StatusOK, tplConfig) ctx.HTML(http.StatusOK, tplConfig)
} }

View file

@ -2742,7 +2742,7 @@ func NewComment(ctx *context.Context) {
log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+ log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+
"User in Repo has Permissions: %-+v", "User in Repo has Permissions: %-+v",
ctx.Doer, ctx.Doer,
log.NewColoredIDValue(issue.PosterID), issue.PosterID,
issueType, issueType,
ctx.Repo.Repository, ctx.Repo.Repository,
ctx.Repo.Permission) ctx.Repo.Permission)
@ -3020,7 +3020,7 @@ func ChangeIssueReaction(ctx *context.Context) {
log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+ log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+
"User in Repo has Permissions: %-+v", "User in Repo has Permissions: %-+v",
ctx.Doer, ctx.Doer,
log.NewColoredIDValue(issue.PosterID), issue.PosterID,
issueType, issueType,
ctx.Repo.Repository, ctx.Repo.Repository,
ctx.Repo.Permission) ctx.Repo.Permission)
@ -3122,7 +3122,7 @@ func ChangeCommentReaction(ctx *context.Context) {
log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+ log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+
"User in Repo has Permissions: %-+v", "User in Repo has Permissions: %-+v",
ctx.Doer, ctx.Doer,
log.NewColoredIDValue(comment.Issue.PosterID), comment.Issue.PosterID,
issueType, issueType,
ctx.Repo.Repository, ctx.Repo.Repository,
ctx.Repo.Permission) ctx.Repo.Permission)

View file

@ -29,7 +29,7 @@ func IssueWatch(ctx *context.Context) {
log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+ log.Trace("Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n"+
"User in Repo has Permissions: %-+v", "User in Repo has Permissions: %-+v",
ctx.Doer, ctx.Doer,
log.NewColoredIDValue(issue.PosterID), issue.PosterID,
issueType, issueType,
ctx.Repo.Repository, ctx.Repo.Repository,
ctx.Repo.Permission) ctx.Repo.Permission)

View file

@ -115,13 +115,11 @@ func (d *CodebaseDownloader) String() string {
return fmt.Sprintf("migration from codebase server %s %s/%s", d.baseURL, d.project, d.repoName) return fmt.Sprintf("migration from codebase server %s %s/%s", d.baseURL, d.project, d.repoName)
} }
// ColorFormat provides a basic color format for a GogsDownloader func (d *CodebaseDownloader) LogString() string {
func (d *CodebaseDownloader) ColorFormat(s fmt.State) {
if d == nil { if d == nil {
log.ColorFprintf(s, "<nil: CodebaseDownloader>") return "<CodebaseDownloader nil>"
return
} }
log.ColorFprintf(s, "migration from codebase server %s %s/%s", d.baseURL, d.project, d.repoName) return fmt.Sprintf("<CodebaseDownloader %s %s/%s>", d.baseURL, d.project, d.repoName)
} }
// FormatCloneURL add authentication into remote URLs // FormatCloneURL add authentication into remote URLs

View file

@ -62,13 +62,11 @@ func (g *GitBucketDownloader) String() string {
return fmt.Sprintf("migration from gitbucket server %s %s/%s", g.baseURL, g.repoOwner, g.repoName) return fmt.Sprintf("migration from gitbucket server %s %s/%s", g.baseURL, g.repoOwner, g.repoName)
} }
// ColorFormat provides a basic color format for a GitBucketDownloader func (g *GitBucketDownloader) LogString() string {
func (g *GitBucketDownloader) ColorFormat(s fmt.State) {
if g == nil { if g == nil {
log.ColorFprintf(s, "<nil: GitBucketDownloader>") return "<GitBucketDownloader nil>"
return
} }
log.ColorFprintf(s, "migration from gitbucket server %s %s/%s", g.baseURL, g.repoOwner, g.repoName) return fmt.Sprintf("<GitBucketDownloader %s %s/%s>", g.baseURL, g.repoOwner, g.repoName)
} }
// NewGitBucketDownloader creates a GitBucket downloader // NewGitBucketDownloader creates a GitBucket downloader

View file

@ -134,13 +134,11 @@ func (g *GiteaDownloader) String() string {
return fmt.Sprintf("migration from gitea server %s %s/%s", g.baseURL, g.repoOwner, g.repoName) return fmt.Sprintf("migration from gitea server %s %s/%s", g.baseURL, g.repoOwner, g.repoName)
} }
// ColorFormat provides a basic color format for a GiteaDownloader func (g *GiteaDownloader) LogString() string {
func (g *GiteaDownloader) ColorFormat(s fmt.State) {
if g == nil { if g == nil {
log.ColorFprintf(s, "<nil: GiteaDownloader>") return "<GiteaDownloader nil>"
return
} }
log.ColorFprintf(s, "migration from gitea server %s %s/%s", g.baseURL, g.repoOwner, g.repoName) return fmt.Sprintf("<GiteaDownloader %s %s/%s>", g.baseURL, g.repoOwner, g.repoName)
} }
// GetRepoInfo returns a repository information // GetRepoInfo returns a repository information

Some files were not shown because too many files have changed in this diff Show more