* Panic don't fatal on create new logger Fixes #5854 Signed-off-by: Andrew Thornton <art27@cantab.net> * partial broken * Update the logging infrastrcture Signed-off-by: Andrew Thornton <art27@cantab.net> * Reset the skip levels for Fatal and Error Signed-off-by: Andrew Thornton <art27@cantab.net> * broken ncsa * More log.Error fixes Signed-off-by: Andrew Thornton <art27@cantab.net> * Remove nal * set log-levels to lowercase * Make console_test test all levels * switch to lowercased levels * OK now working * Fix vetting issues * Fix lint * Fix tests * change default logging to match current gitea * Improve log testing Signed-off-by: Andrew Thornton <art27@cantab.net> * reset error skip levels to 0 * Update documentation and access logger configuration * Redirect the router log back to gitea if redirect macaron log but also allow setting the log level - i.e. TRACE * Fix broken level caching * Refactor the router log * Add Router logger * Add colorizing options * Adjust router colors * Only create logger if they will be used * update app.ini.sample * rename Attribute ColorAttribute * Change from white to green for function * Set fatal/error levels * Restore initial trace logger * Fix Trace arguments in modules/auth/auth.go * Properly handle XORMLogger * Improve admin/config page * fix fmt * Add auto-compression of old logs * Update error log levels * Remove the unnecessary skip argument from Error, Fatal and Critical * Add stacktrace support * Fix tests * Remove x/sync from vendors? * Add stderr option to console logger * Use filepath.ToSlash to protect against Windows in tests * Remove prefixed underscores from names in colors.go * Remove not implemented database logger This was removed from Gogs on 4 Mar 2016 but left in the configuration since then. * Ensure that log paths are relative to ROOT_PATH * use path.Join * rename jsonConfig to logConfig * Rename "config" to "jsonConfig" to make it clearer * Requested changes * Requested changes: XormLogger * Try to color the windows terminal If successful default to colorizing the console logs * fixup * Colorize initially too * update vendor * Colorize logs on default and remove if this is not a colorizing logger * Fix documentation * fix test * Use go-isatty to detect if on windows we are on msys or cygwin * Fix spelling mistake * Add missing vendors * More changes * Rationalise the ANSI writer protection * Adjust colors on advice from @0x5c * Make Flags a comma separated list * Move to use the windows constant for ENABLE_VIRTUAL_TERMINAL_PROCESSING * Ensure matching is done on the non-colored message - to simpify EXPRESSION
This commit is contained in:
parent
ef2a343e27
commit
704da08fdc
301 changed files with 36993 additions and 8244 deletions
|
@ -204,14 +204,14 @@ func runHookPostReceive(c *cli.Context) error {
|
||||||
RepoUserName: repoUser,
|
RepoUserName: repoUser,
|
||||||
RepoName: repoName,
|
RepoName: repoName,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.GitLogger.Error(2, "Update: %v", err)
|
log.GitLogger.Error("Update: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if newCommitID != git.EmptySHA && strings.HasPrefix(refFullName, git.BranchPrefix) {
|
if newCommitID != git.EmptySHA && strings.HasPrefix(refFullName, git.BranchPrefix) {
|
||||||
branch := strings.TrimPrefix(refFullName, git.BranchPrefix)
|
branch := strings.TrimPrefix(refFullName, git.BranchPrefix)
|
||||||
repo, pullRequestAllowed, err := private.GetRepository(repoID)
|
repo, pullRequestAllowed, err := private.GetRepository(repoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.GitLogger.Error(2, "get repo: %v", err)
|
log.GitLogger.Error("get repo: %v", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !pullRequestAllowed {
|
if !pullRequestAllowed {
|
||||||
|
@ -229,7 +229,7 @@ func runHookPostReceive(c *cli.Context) error {
|
||||||
|
|
||||||
pr, err := private.ActivePullRequest(baseRepo.ID, repo.ID, baseRepo.DefaultBranch, branch)
|
pr, err := private.ActivePullRequest(baseRepo.ID, repo.ID, baseRepo.DefaultBranch, branch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.GitLogger.Error(2, "get active pr: %v", err)
|
log.GitLogger.Error("get active pr: %v", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ func runMigrate(ctx *cli.Context) error {
|
||||||
models.LoadConfigs()
|
models.LoadConfigs()
|
||||||
|
|
||||||
if err := models.NewEngine(migrations.Migrate); err != nil {
|
if err := models.NewEngine(migrations.Migrate); err != nil {
|
||||||
log.Fatal(4, "Failed to initialize ORM engine: %v", err)
|
log.Fatal("Failed to initialize ORM engine: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -100,7 +100,7 @@ func fail(userMessage, logMessage string, args ...interface{}) {
|
||||||
if !setting.ProdMode {
|
if !setting.ProdMode {
|
||||||
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
|
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
|
||||||
}
|
}
|
||||||
log.GitLogger.Fatal(3, logMessage, args...)
|
log.GitLogger.Fatal(logMessage, args...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
14
cmd/web.go
14
cmd/web.go
|
@ -69,7 +69,7 @@ func runHTTPRedirector() {
|
||||||
var err = runHTTP(source, context2.ClearHandler(handler))
|
var err = runHTTP(source, context2.ClearHandler(handler))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to start port redirection: %v", err)
|
log.Fatal("Failed to start port redirection: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,7 +84,7 @@ func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler)
|
||||||
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
|
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
|
||||||
var err = http.ListenAndServe(setting.HTTPAddr+":"+setting.PortToRedirect, certManager.HTTPHandler(http.HandlerFunc(runLetsEncryptFallbackHandler))) // all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
|
var err = http.ListenAndServe(setting.HTTPAddr+":"+setting.PortToRedirect, certManager.HTTPHandler(http.HandlerFunc(runLetsEncryptFallbackHandler))) // all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to start the Let's Encrypt handler on port %s: %v", setting.PortToRedirect, err)
|
log.Fatal("Failed to start the Let's Encrypt handler on port %s: %v", setting.PortToRedirect, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
|
@ -192,13 +192,13 @@ func runWeb(ctx *cli.Context) error {
|
||||||
case setting.FCGI:
|
case setting.FCGI:
|
||||||
listener, err := net.Listen("tcp", listenAddr)
|
listener, err := net.Listen("tcp", listenAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to bind %s", listenAddr, err)
|
log.Fatal("Failed to bind %s: %v", listenAddr, err)
|
||||||
}
|
}
|
||||||
defer listener.Close()
|
defer listener.Close()
|
||||||
err = fcgi.Serve(listener, context2.ClearHandler(m))
|
err = fcgi.Serve(listener, context2.ClearHandler(m))
|
||||||
case setting.UnixSocket:
|
case setting.UnixSocket:
|
||||||
if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) {
|
if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) {
|
||||||
log.Fatal(4, "Failed to remove unix socket directory %s: %v", listenAddr, err)
|
log.Fatal("Failed to remove unix socket directory %s: %v", listenAddr, err)
|
||||||
}
|
}
|
||||||
var listener *net.UnixListener
|
var listener *net.UnixListener
|
||||||
listener, err = net.ListenUnix("unix", &net.UnixAddr{Name: listenAddr, Net: "unix"})
|
listener, err = net.ListenUnix("unix", &net.UnixAddr{Name: listenAddr, Net: "unix"})
|
||||||
|
@ -209,15 +209,15 @@ func runWeb(ctx *cli.Context) error {
|
||||||
// FIXME: add proper implementation of signal capture on all protocols
|
// FIXME: add proper implementation of signal capture on all protocols
|
||||||
// execute this on SIGTERM or SIGINT: listener.Close()
|
// execute this on SIGTERM or SIGINT: listener.Close()
|
||||||
if err = os.Chmod(listenAddr, os.FileMode(setting.UnixSocketPermission)); err != nil {
|
if err = os.Chmod(listenAddr, os.FileMode(setting.UnixSocketPermission)); err != nil {
|
||||||
log.Fatal(4, "Failed to set permission of unix socket: %v", err)
|
log.Fatal("Failed to set permission of unix socket: %v", err)
|
||||||
}
|
}
|
||||||
err = http.Serve(listener, context2.ClearHandler(m))
|
err = http.Serve(listener, context2.ClearHandler(m))
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Invalid protocol: %s", setting.Protocol)
|
log.Fatal("Invalid protocol: %s", setting.Protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to start server: %v", err)
|
log.Fatal("Failed to start server: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -34,7 +34,7 @@ func runHTTPS(listenAddr, certFile, keyFile string, m http.Handler) error {
|
||||||
var err error
|
var err error
|
||||||
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
|
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to load https cert file %s: %v", listenAddr, err)
|
log.Fatal("Failed to load https cert file %s: %v", listenAddr, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return gracehttp.Serve(&http.Server{
|
return gracehttp.Serve(&http.Server{
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
; This file lists the default values used by Gitea
|
; This file lists the default values used by Gitea
|
||||||
; Copy required sections to your own app.ini (default is custom/conf/app.ini)
|
; Copy required sections to your own app.ini (default is custom/conf/app.ini)
|
||||||
; and modify as needed.
|
; and modify as needed.
|
||||||
|
@ -33,7 +34,7 @@ PREFERRED_LICENSES = Apache License 2.0,MIT License
|
||||||
DISABLE_HTTP_GIT = false
|
DISABLE_HTTP_GIT = false
|
||||||
; Value for Access-Control-Allow-Origin header, default is not to present
|
; Value for Access-Control-Allow-Origin header, default is not to present
|
||||||
; WARNING: This maybe harmful to you website if you do not give it a right value.
|
; WARNING: This maybe harmful to you website if you do not give it a right value.
|
||||||
ACCESS_CONTROL_ALLOW_ORIGIN =
|
ACCESS_CONTROL_ALLOW_ORIGIN =
|
||||||
; Force ssh:// clone url instead of scp-style uri when default SSH port is used
|
; Force ssh:// clone url instead of scp-style uri when default SSH port is used
|
||||||
USE_COMPAT_SSH_URI = false
|
USE_COMPAT_SSH_URI = false
|
||||||
; Close issues as long as a commit on any branch marks it as fixed
|
; Close issues as long as a commit on any branch marks it as fixed
|
||||||
|
@ -260,7 +261,7 @@ ISSUE_INDEXER_TYPE = bleve
|
||||||
ISSUE_INDEXER_PATH = indexers/issues.bleve
|
ISSUE_INDEXER_PATH = indexers/issues.bleve
|
||||||
; Issue indexer queue, currently support: channel or levelqueue, default is levelqueue
|
; Issue indexer queue, currently support: channel or levelqueue, default is levelqueue
|
||||||
ISSUE_INDEXER_QUEUE_TYPE = levelqueue
|
ISSUE_INDEXER_QUEUE_TYPE = levelqueue
|
||||||
; When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path,
|
; When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path,
|
||||||
; default is indexers/issues.queue
|
; default is indexers/issues.queue
|
||||||
ISSUE_INDEXER_QUEUE_DIR = indexers/issues.queue
|
ISSUE_INDEXER_QUEUE_DIR = indexers/issues.queue
|
||||||
; Batch queue number, default is 20
|
; Batch queue number, default is 20
|
||||||
|
@ -390,8 +391,8 @@ NO_REPLY_ADDRESS = noreply.example.org
|
||||||
; Show Registration button
|
; Show Registration button
|
||||||
SHOW_REGISTRATION_BUTTON = true
|
SHOW_REGISTRATION_BUTTON = true
|
||||||
; Default value for AutoWatchNewRepos
|
; Default value for AutoWatchNewRepos
|
||||||
; When adding a repo to a team or creating a new repo all team members will watch the
|
; When adding a repo to a team or creating a new repo all team members will watch the
|
||||||
; repo automatically if enabled
|
; repo automatically if enabled
|
||||||
AUTO_WATCH_NEW_REPOS = true
|
AUTO_WATCH_NEW_REPOS = true
|
||||||
|
|
||||||
[webhook]
|
[webhook]
|
||||||
|
@ -516,17 +517,37 @@ ROOT_PATH =
|
||||||
MODE = console
|
MODE = console
|
||||||
; Buffer length of the channel, keep it as it is if you don't know what it is.
|
; Buffer length of the channel, keep it as it is if you don't know what it is.
|
||||||
BUFFER_LEN = 10000
|
BUFFER_LEN = 10000
|
||||||
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
|
|
||||||
LEVEL = Trace
|
|
||||||
REDIRECT_MACARON_LOG = false
|
REDIRECT_MACARON_LOG = false
|
||||||
|
MACARON = file
|
||||||
|
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
|
||||||
|
ROUTER_LOG_LEVEL = Info
|
||||||
|
ROUTER = console
|
||||||
|
ENABLE_ACCESS_LOG = false
|
||||||
|
ACCESS_LOG_TEMPLATE = {{.Ctx.RemoteAddr}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}\" \"{{.Ctx.Req.UserAgent}}"
|
||||||
|
ACCESS = file
|
||||||
|
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
|
||||||
|
LEVEL = Info
|
||||||
|
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "None"
|
||||||
|
STACKTRACE_LEVEL = None
|
||||||
|
|
||||||
|
; Generic log modes
|
||||||
|
[log.x]
|
||||||
|
FLAGS = stdflags
|
||||||
|
EXPRESSION =
|
||||||
|
PREFIX =
|
||||||
|
COLORIZE = false
|
||||||
|
|
||||||
; For "console" mode only
|
; For "console" mode only
|
||||||
[log.console]
|
[log.console]
|
||||||
LEVEL =
|
LEVEL =
|
||||||
|
STDERR = false
|
||||||
|
|
||||||
; For "file" mode only
|
; For "file" mode only
|
||||||
[log.file]
|
[log.file]
|
||||||
LEVEL =
|
LEVEL =
|
||||||
|
; Set the file_name for the logger. If this is a relative path this
|
||||||
|
; will be relative to ROOT_PATH
|
||||||
|
FILE_NAME =
|
||||||
; This enables automated log rotate(switch of following options), default is true
|
; This enables automated log rotate(switch of following options), default is true
|
||||||
LOG_ROTATE = true
|
LOG_ROTATE = true
|
||||||
; Max number of lines in a single file, default is 1000000
|
; Max number of lines in a single file, default is 1000000
|
||||||
|
@ -537,6 +558,10 @@ MAX_SIZE_SHIFT = 28
|
||||||
DAILY_ROTATE = true
|
DAILY_ROTATE = true
|
||||||
; delete the log file after n days, default is 7
|
; delete the log file after n days, default is 7
|
||||||
MAX_DAYS = 7
|
MAX_DAYS = 7
|
||||||
|
; compress logs with gzip
|
||||||
|
COMPRESS = true
|
||||||
|
; compression level see godoc for compress/gzip
|
||||||
|
COMPRESSION_LEVEL = -1
|
||||||
|
|
||||||
; For "conn" mode only
|
; For "conn" mode only
|
||||||
[log.conn]
|
[log.conn]
|
||||||
|
@ -564,14 +589,6 @@ PASSWD =
|
||||||
; Receivers, can be one or more, e.g. 1@example.com,2@example.com
|
; Receivers, can be one or more, e.g. 1@example.com,2@example.com
|
||||||
RECEIVERS =
|
RECEIVERS =
|
||||||
|
|
||||||
; For "database" mode only
|
|
||||||
[log.database]
|
|
||||||
LEVEL =
|
|
||||||
; Either "mysql" or "postgres"
|
|
||||||
DRIVER =
|
|
||||||
; Based on xorm, e.g.: root:root@localhost/gitea?charset=utf8
|
|
||||||
CONN =
|
|
||||||
|
|
||||||
[cron]
|
[cron]
|
||||||
; Enable running cron tasks periodically.
|
; Enable running cron tasks periodically.
|
||||||
ENABLED = true
|
ENABLED = true
|
||||||
|
|
|
@ -68,10 +68,12 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
|
||||||
- `DEFAULT_CLOSE_ISSUES_VIA_COMMITS_IN_ANY_BRANCH`: **false**: Close an issue if a commit on a non default branch marks it as closed.
|
- `DEFAULT_CLOSE_ISSUES_VIA_COMMITS_IN_ANY_BRANCH`: **false**: Close an issue if a commit on a non default branch marks it as closed.
|
||||||
|
|
||||||
### Repository - Pull Request (`repository.pull-request`)
|
### Repository - Pull Request (`repository.pull-request`)
|
||||||
|
|
||||||
- `WORK_IN_PROGRESS_PREFIXES`: **WIP:,\[WIP\]**: List of prefixes used in Pull Request
|
- `WORK_IN_PROGRESS_PREFIXES`: **WIP:,\[WIP\]**: List of prefixes used in Pull Request
|
||||||
title to mark them as Work In Progress
|
title to mark them as Work In Progress
|
||||||
|
|
||||||
### Repository - Issue (`repository.issue`)
|
### Repository - Issue (`repository.issue`)
|
||||||
|
|
||||||
- `LOCK_REASONS`: **Too heated,Off-topic,Resolved,Spam**: A list of reasons why a Pull Request or Issue can be locked
|
- `LOCK_REASONS`: **Too heated,Off-topic,Resolved,Spam**: A list of reasons why a Pull Request or Issue can be locked
|
||||||
|
|
||||||
## UI (`ui`)
|
## UI (`ui`)
|
||||||
|
@ -287,9 +289,65 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
|
||||||
## Log (`log`)
|
## Log (`log`)
|
||||||
|
|
||||||
- `ROOT_PATH`: **\<empty\>**: Root path for log files.
|
- `ROOT_PATH`: **\<empty\>**: Root path for log files.
|
||||||
- `MODE`: **console**: Logging mode. For multiple modes, use a comma to separate values.
|
- `MODE`: **console**: Logging mode. For multiple modes, use a comma to separate values. You can configure each mode in per mode log subsections `\[log.modename\]`. By default the file mode will log to `$ROOT_PATH/gitea.log`.
|
||||||
- `LEVEL`: **Trace**: General log level. \[Trace, Debug, Info, Warn, Error, Critical\]
|
- `LEVEL`: **Info**: General log level. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\]
|
||||||
- `REDIRECT_MACARON_LOG`: **false**: Redirects the Macaron log to the Gitea logger.
|
- `STACKTRACE_LEVEL`: **None**: Default log level at which to log create stack traces. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\]
|
||||||
|
- `REDIRECT_MACARON_LOG`: **false**: Redirects the Macaron log to its own logger or the default logger.
|
||||||
|
- `MACARON`: **file**: Logging mode for the macaron logger, use a comma to separate values. Configure each mode in per mode log subsections `\[log.modename.macaron\]`. By default the file mode will log to `$ROOT_PATH/macaron.log`. (If you set this to `,` it will log to default gitea logger.)
|
||||||
|
- `ROUTER_LOG_LEVEL`: **Info**: The log level that the router should log at. (If you are setting the access log, its recommended to place this at Debug.)
|
||||||
|
- `ROUTER`: **console**: The mode or name of the log the router should log to. (If you set this to `,` it will log to default gitea logger.)
|
||||||
|
NB: You must `REDIRECT_MACARON_LOG` and have `DISABLE_ROUTER_LOG` set to `false` for this option to take effect. Configure each mode in per mode log subsections `\[log.modename.router\]`.
|
||||||
|
- `ENABLE_ACCESS_LOG`: **false**: Creates an access.log in NCSA common log format, or as per the following template
|
||||||
|
- `ACCESS`: **file**: Logging mode for the access logger, use a comma to separate values. Configure each mode in per mode log subsections `\[log.modename.access\]`. By default the file mode will log to `$ROOT_PATH/access.log`. (If you set this to `,` it will log to the default gitea logger.)
|
||||||
|
- `ACCESS_LOG_TEMPLATE`: **`{{.Ctx.RemoteAddr}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}\" \"{{.Ctx.Req.UserAgent}}"`**: Sets the template used to create the access log.
|
||||||
|
- The following variables are available:
|
||||||
|
- `Ctx`: the `macaron.Context` of the request.
|
||||||
|
- `Identity`: the SignedUserName or `"-"` if not logged in.
|
||||||
|
- `Start`: the start time of the request.
|
||||||
|
- `ResponseWriter`: the responseWriter from the request.
|
||||||
|
- You must be very careful to ensure that this template does not throw errors or panics as this template runs outside of the panic/recovery script.
|
||||||
|
- `ENABLE_XORM_LOG`: **true**: Set whether to perform XORM logging. Please note SQL statement logging can be disabled by setting `LOG_SQL` to false in the `[database]` section.
|
||||||
|
|
||||||
|
### Log subsections (`log.name`, `log.name.*`)
|
||||||
|
|
||||||
|
- `LEVEL`: **log.LEVEL**: Sets the log-level of this sublogger. Defaults to the `LEVEL` set in the global `[log]` section.
|
||||||
|
- `STACKTRACE_LEVEL`: **log.STACKTRACE_LEVEL**: Sets the log level at which to log stack traces.
|
||||||
|
- `MODE`: **name**: Sets the mode of this sublogger - Defaults to the provided subsection name. This allows you to have two different file loggers at different levels.
|
||||||
|
- `EXPRESSION`: **""**: A regular expression to match either the function name, file or message. Defaults to empty. Only log messages that match the expression will be saved in the logger.
|
||||||
|
- `FLAGS`: **stdflags**: A comma separated string representing the log flags. Defaults to `stdflags` which represents the prefix: `2009/01/23 01:23:23 ...a/b/c/d.go:23:runtime.Caller() [I]: message`. `none` means don't prefix log lines. See `modules/log/base.go` for more information.
|
||||||
|
- `PREFIX`: **""**: An additional prefix for every log line in this logger. Defaults to empty.
|
||||||
|
- `COLORIZE`: **false**: Colorize the log lines by default
|
||||||
|
|
||||||
|
### Console log mode (`log.console`, `log.console.*`, or `MODE=console`)
|
||||||
|
|
||||||
|
- For the console logger `COLORIZE` will default to `true` if not on windows.
|
||||||
|
- `STDERR`: **false**: Use Stderr instead of Stdout.
|
||||||
|
|
||||||
|
### File log mode (`log.file`, `log.file.*` or `MODE=file`)
|
||||||
|
|
||||||
|
- `FILE_NAME`: Set the file name for this logger. Defaults as described above. If relative will be relative to the `ROOT_PATH`
|
||||||
|
- `LOG_ROTATE`: **true**: Rotate the log files.
|
||||||
|
- `MAX_SIZE_SHIFT`: **28**: Maximum size shift of a single file, 28 represents 256Mb.
|
||||||
|
- `DAILY_ROTATE`: **true**: Rotate logs daily.
|
||||||
|
- `MAX_DAYS`: **7**: Delete the log file after n days
|
||||||
|
- NB: `COLORIZE`: will default to `true` if not on windows.
|
||||||
|
- `COMPRESS`: **true**: Compress old log files by default with gzip
|
||||||
|
- `COMPRESSION_LEVEL`: **-1**: Compression level
|
||||||
|
|
||||||
|
### Conn log mode (`log.conn`, `log.conn.*` or `MODE=conn`)
|
||||||
|
|
||||||
|
- `RECONNECT_ON_MSG`: **false**: Reconnect host for every single message.
|
||||||
|
- `RECONNECT`: **false**: Try to reconnect when connection is lost.
|
||||||
|
- `PROTOCOL`: **tcp**: Set the protocol, either "tcp", "unix" or "udp".
|
||||||
|
- `ADDR`: **:7020**: Sets the address to connect to.
|
||||||
|
|
||||||
|
### SMTP log mode (`log.smtp`, `log.smtp.*` or `MODE=smtp`)
|
||||||
|
|
||||||
|
- `USER`: User email address to send from.
|
||||||
|
- `PASSWD`: Password for the smtp server.
|
||||||
|
- `HOST`: **127.0.0.1:25**: The SMTP host to connect to.
|
||||||
|
- `RECEIVERS`: Email addresses to send to.
|
||||||
|
- `SUBJECT`: **Diagnostic message from Gitea**
|
||||||
|
|
||||||
## Cron (`cron`)
|
## Cron (`cron`)
|
||||||
|
|
||||||
|
|
4
go.mod
4
go.mod
|
@ -79,6 +79,7 @@ require (
|
||||||
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect
|
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect
|
||||||
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af // indirect
|
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af // indirect
|
||||||
github.com/markbates/goth v1.49.0
|
github.com/markbates/goth v1.49.0
|
||||||
|
github.com/mattn/go-isatty v0.0.7
|
||||||
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d // indirect
|
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d // indirect
|
||||||
github.com/mattn/go-sqlite3 v1.10.0
|
github.com/mattn/go-sqlite3 v1.10.0
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
|
@ -114,8 +115,7 @@ require (
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
|
||||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519
|
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519
|
||||||
golang.org/x/oauth2 v0.0.0-20181101160152-c453e0c75759 // indirect
|
golang.org/x/oauth2 v0.0.0-20181101160152-c453e0c75759 // indirect
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223
|
||||||
golang.org/x/sys v0.0.0-20181026144532-2772b66316d2
|
|
||||||
golang.org/x/text v0.3.0
|
golang.org/x/text v0.3.0
|
||||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||||
gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect
|
gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect
|
||||||
|
|
6
go.sum
6
go.sum
|
@ -207,6 +207,8 @@ github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af/go.mod h1:Cqz6pqow14VOb
|
||||||
github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA=
|
github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA=
|
||||||
github.com/markbates/goth v1.49.0 h1:qQ4Ti4WaqAxNAggOC+4s5M85sMVfMJwQn/Xkp73wfgI=
|
github.com/markbates/goth v1.49.0 h1:qQ4Ti4WaqAxNAggOC+4s5M85sMVfMJwQn/Xkp73wfgI=
|
||||||
github.com/markbates/goth v1.49.0/go.mod h1:zZmAw0Es0Dpm7TT/4AdN14QrkiWLMrrU9Xei1o+/mdA=
|
github.com/markbates/goth v1.49.0/go.mod h1:zZmAw0Es0Dpm7TT/4AdN14QrkiWLMrrU9Xei1o+/mdA=
|
||||||
|
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
|
||||||
|
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||||
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d h1:m+dSK37rFf2fqppZhg15yI2IwC9BtucBiRwSDm9VL8g=
|
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d h1:m+dSK37rFf2fqppZhg15yI2IwC9BtucBiRwSDm9VL8g=
|
||||||
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d/go.mod h1:/M9VLO+lUPmxvoOK2PfWRZ8mTtB4q1Hy9lEGijv9Nr8=
|
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d/go.mod h1:/M9VLO+lUPmxvoOK2PfWRZ8mTtB4q1Hy9lEGijv9Nr8=
|
||||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
|
@ -311,8 +313,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6Zh
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20181026144532-2772b66316d2 h1:W7CqTdBJ1CmxLKe7LptKDnBYV6PHrVLiGnoyBjaG/JQ=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
|
||||||
golang.org/x/sys v0.0.0-20181026144532-2772b66316d2/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
|
2
main.go
2
main.go
|
@ -56,7 +56,7 @@ arguments - which can alternatively be run by running the subcommand web.`
|
||||||
app.Action = cmd.CmdWeb.Action
|
app.Action = cmd.CmdWeb.Action
|
||||||
err := app.Run(os.Args)
|
err := app.Run(os.Args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to run app with %s: %v", os.Args, err)
|
log.Fatal("Failed to run app with %s: %v", os.Args, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,7 @@ func (a *Action) loadActUser() {
|
||||||
} else if IsErrUserNotExist(err) {
|
} else if IsErrUserNotExist(err) {
|
||||||
a.ActUser = NewGhostUser()
|
a.ActUser = NewGhostUser()
|
||||||
} else {
|
} else {
|
||||||
log.Error(4, "GetUserByID(%d): %v", a.ActUserID, err)
|
log.Error("GetUserByID(%d): %v", a.ActUserID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,7 +121,7 @@ func (a *Action) loadRepo() {
|
||||||
var err error
|
var err error
|
||||||
a.Repo, err = GetRepositoryByID(a.RepoID)
|
a.Repo, err = GetRepositoryByID(a.RepoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetRepositoryByID(%d): %v", a.RepoID, err)
|
log.Error("GetRepositoryByID(%d): %v", a.RepoID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,7 +256,7 @@ func (a *Action) GetIssueTitle() string {
|
||||||
index := com.StrTo(a.GetIssueInfos()[0]).MustInt64()
|
index := com.StrTo(a.GetIssueInfos()[0]).MustInt64()
|
||||||
issue, err := GetIssueByIndex(a.RepoID, index)
|
issue, err := GetIssueByIndex(a.RepoID, index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetIssueByIndex: %v", err)
|
log.Error("GetIssueByIndex: %v", err)
|
||||||
return "500 when get issue"
|
return "500 when get issue"
|
||||||
}
|
}
|
||||||
return issue.Title
|
return issue.Title
|
||||||
|
@ -268,7 +268,7 @@ func (a *Action) GetIssueContent() string {
|
||||||
index := com.StrTo(a.GetIssueInfos()[0]).MustInt64()
|
index := com.StrTo(a.GetIssueInfos()[0]).MustInt64()
|
||||||
issue, err := GetIssueByIndex(a.RepoID, index)
|
issue, err := GetIssueByIndex(a.RepoID, index)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetIssueByIndex: %v", err)
|
log.Error("GetIssueByIndex: %v", err)
|
||||||
return "500 when get issue"
|
return "500 when get issue"
|
||||||
}
|
}
|
||||||
return issue.Content
|
return issue.Content
|
||||||
|
@ -419,7 +419,7 @@ func (pc *PushCommits) AvatarLink(email string) string {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pc.avatars[email] = base.AvatarLink(email)
|
pc.avatars[email] = base.AvatarLink(email)
|
||||||
if !IsErrUserNotExist(err) {
|
if !IsErrUserNotExist(err) {
|
||||||
log.Error(4, "GetUserByEmail: %v", err)
|
log.Error("GetUserByEmail: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -619,7 +619,7 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = UpdateIssuesCommit(pusher, repo, opts.Commits.Commits, refName); err != nil {
|
if err = UpdateIssuesCommit(pusher, repo, opts.Commits.Commits, refName); err != nil {
|
||||||
log.Error(4, "updateIssuesCommit: %v", err)
|
log.Error("updateIssuesCommit: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -661,12 +661,12 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
|
||||||
if isNewBranch {
|
if isNewBranch {
|
||||||
gitRepo, err := git.OpenRepository(repo.RepoPath())
|
gitRepo, err := git.OpenRepository(repo.RepoPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "OpenRepository[%s]: %v", repo.RepoPath(), err)
|
log.Error("OpenRepository[%s]: %v", repo.RepoPath(), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
shaSum, err = gitRepo.GetBranchCommitID(refName)
|
shaSum, err = gitRepo.GetBranchCommitID(refName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetBranchCommitID[%s]: %v", opts.RefFullName, err)
|
log.Error("GetBranchCommitID[%s]: %v", opts.RefFullName, err)
|
||||||
}
|
}
|
||||||
if err = PrepareWebhooks(repo, HookEventCreate, &api.CreatePayload{
|
if err = PrepareWebhooks(repo, HookEventCreate, &api.CreatePayload{
|
||||||
Ref: refName,
|
Ref: refName,
|
||||||
|
@ -697,11 +697,11 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
|
||||||
|
|
||||||
gitRepo, err := git.OpenRepository(repo.RepoPath())
|
gitRepo, err := git.OpenRepository(repo.RepoPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "OpenRepository[%s]: %v", repo.RepoPath(), err)
|
log.Error("OpenRepository[%s]: %v", repo.RepoPath(), err)
|
||||||
}
|
}
|
||||||
shaSum, err = gitRepo.GetTagCommitID(refName)
|
shaSum, err = gitRepo.GetTagCommitID(refName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetTagCommitID[%s]: %v", opts.RefFullName, err)
|
log.Error("GetTagCommitID[%s]: %v", opts.RefFullName, err)
|
||||||
}
|
}
|
||||||
if err = PrepareWebhooks(repo, HookEventCreate, &api.CreatePayload{
|
if err = PrepareWebhooks(repo, HookEventCreate, &api.CreatePayload{
|
||||||
Ref: refName,
|
Ref: refName,
|
||||||
|
|
|
@ -65,7 +65,7 @@ func removeAllWithNotice(e Engine, title, path string) {
|
||||||
desc := fmt.Sprintf("%s [%s]: %v", title, path, err)
|
desc := fmt.Sprintf("%s [%s]: %v", title, path, err)
|
||||||
log.Warn(desc)
|
log.Warn(desc)
|
||||||
if err = createNotice(e, NoticeRepository, desc); err != nil {
|
if err = createNotice(e, NoticeRepository, desc); err != nil {
|
||||||
log.Error(4, "CreateRepositoryNotice: %v", err)
|
log.Error("CreateRepositoryNotice: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ func (protectBranch *ProtectedBranch) CanUserPush(userID int64) bool {
|
||||||
|
|
||||||
in, err := IsUserInTeams(userID, protectBranch.WhitelistTeamIDs)
|
in, err := IsUserInTeams(userID, protectBranch.WhitelistTeamIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(1, "IsUserInTeams:", err)
|
log.Error("IsUserInTeams: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return in
|
return in
|
||||||
|
@ -83,7 +83,7 @@ func (protectBranch *ProtectedBranch) CanUserMerge(userID int64) bool {
|
||||||
|
|
||||||
in, err := IsUserInTeams(userID, protectBranch.MergeWhitelistTeamIDs)
|
in, err := IsUserInTeams(userID, protectBranch.MergeWhitelistTeamIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(1, "IsUserInTeams:", err)
|
log.Error("IsUserInTeams: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return in
|
return in
|
||||||
|
@ -101,7 +101,7 @@ func (protectBranch *ProtectedBranch) HasEnoughApprovals(pr *PullRequest) bool {
|
||||||
func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest) int64 {
|
func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest) int64 {
|
||||||
reviews, err := GetReviewersByPullID(pr.Issue.ID)
|
reviews, err := GetReviewersByPullID(pr.Issue.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(1, "GetReviewersByPullID:", err)
|
log.Error("GetReviewersByPullID: %v", err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest)
|
||||||
}
|
}
|
||||||
approvalTeamCount, err := UsersInTeamsCount(userIDs, protectBranch.ApprovalsWhitelistTeamIDs)
|
approvalTeamCount, err := UsersInTeamsCount(userIDs, protectBranch.ApprovalsWhitelistTeamIDs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(1, "UsersInTeamsCount:", err)
|
log.Error("UsersInTeamsCount: %v", err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return approvalTeamCount + approvals
|
return approvalTeamCount + approvals
|
||||||
|
@ -466,6 +466,6 @@ func RemoveOldDeletedBranches() {
|
||||||
deleteBefore := time.Now().Add(-setting.Cron.DeletedBranchesCleanup.OlderThan)
|
deleteBefore := time.Now().Add(-setting.Cron.DeletedBranchesCleanup.OlderThan)
|
||||||
_, err := x.Where("deleted_unix < ?", deleteBefore.Unix()).Delete(new(DeletedBranch))
|
_, err := x.Where("deleted_unix < ?", deleteBefore.Unix()).Delete(new(DeletedBranch))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "DeletedBranchesCleanup: %v", err)
|
log.Error("DeletedBranchesCleanup: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ func (key *GPGKey) BeforeInsert() {
|
||||||
func (key *GPGKey) AfterLoad(session *xorm.Session) {
|
func (key *GPGKey) AfterLoad(session *xorm.Session) {
|
||||||
err := session.Where("primary_key_id=?", key.KeyID).Find(&key.SubsKey)
|
err := session.Where("primary_key_id=?", key.KeyID).Find(&key.SubsKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "Find Sub GPGkeys[%d]: %v", key.KeyID, err)
|
log.Error("Find Sub GPGkeys[%s]: %v", key.KeyID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,7 +364,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
//Parsing signature
|
//Parsing signature
|
||||||
sig, err := extractSignature(c.Signature.Signature)
|
sig, err := extractSignature(c.Signature.Signature)
|
||||||
if err != nil { //Skipping failed to extract sign
|
if err != nil { //Skipping failed to extract sign
|
||||||
log.Error(3, "SignatureRead err: %v", err)
|
log.Error("SignatureRead err: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
Verified: false,
|
Verified: false,
|
||||||
Reason: "gpg.error.extract_sign",
|
Reason: "gpg.error.extract_sign",
|
||||||
|
@ -377,7 +377,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
// We can expect this to often be an ErrUserNotExist. in the case
|
// We can expect this to often be an ErrUserNotExist. in the case
|
||||||
// it is not, however, it is important to log it.
|
// it is not, however, it is important to log it.
|
||||||
if !IsErrUserNotExist(err) {
|
if !IsErrUserNotExist(err) {
|
||||||
log.Error(3, "GetUserByEmail: %v", err)
|
log.Error("GetUserByEmail: %v", err)
|
||||||
}
|
}
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
Verified: false,
|
Verified: false,
|
||||||
|
@ -387,7 +387,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
|
|
||||||
keys, err := ListGPGKeys(committer.ID)
|
keys, err := ListGPGKeys(committer.ID)
|
||||||
if err != nil { //Skipping failed to get gpg keys of user
|
if err != nil { //Skipping failed to get gpg keys of user
|
||||||
log.Error(3, "ListGPGKeys: %v", err)
|
log.Error("ListGPGKeys: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
Verified: false,
|
Verified: false,
|
||||||
Reason: "gpg.error.failed_retrieval_gpg_keys",
|
Reason: "gpg.error.failed_retrieval_gpg_keys",
|
||||||
|
@ -411,7 +411,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
//Generating hash of commit
|
//Generating hash of commit
|
||||||
hash, err := populateHash(sig.Hash, []byte(c.Signature.Payload))
|
hash, err := populateHash(sig.Hash, []byte(c.Signature.Payload))
|
||||||
if err != nil { //Skipping ailed to generate hash
|
if err != nil { //Skipping ailed to generate hash
|
||||||
log.Error(3, "PopulateHash: %v", err)
|
log.Error("PopulateHash: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
Verified: false,
|
Verified: false,
|
||||||
Reason: "gpg.error.generate_hash",
|
Reason: "gpg.error.generate_hash",
|
||||||
|
@ -432,7 +432,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
|
||||||
//Generating hash of commit
|
//Generating hash of commit
|
||||||
hash, err := populateHash(sig.Hash, []byte(c.Signature.Payload))
|
hash, err := populateHash(sig.Hash, []byte(c.Signature.Payload))
|
||||||
if err != nil { //Skipping ailed to generate hash
|
if err != nil { //Skipping ailed to generate hash
|
||||||
log.Error(3, "PopulateHash: %v", err)
|
log.Error("PopulateHash: %v", err)
|
||||||
return &CommitVerification{
|
return &CommitVerification{
|
||||||
Verified: false,
|
Verified: false,
|
||||||
Reason: "gpg.error.generate_hash",
|
Reason: "gpg.error.generate_hash",
|
||||||
|
|
|
@ -112,7 +112,7 @@ func (issue *Issue) IsTimetrackerEnabled() bool {
|
||||||
|
|
||||||
func (issue *Issue) isTimetrackerEnabled(e Engine) bool {
|
func (issue *Issue) isTimetrackerEnabled(e Engine) bool {
|
||||||
if err := issue.loadRepo(e); err != nil {
|
if err := issue.loadRepo(e); err != nil {
|
||||||
log.Error(4, fmt.Sprintf("loadRepo: %v", err))
|
log.Error(fmt.Sprintf("loadRepo: %v", err))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return issue.Repo.IsTimetrackerEnabled()
|
return issue.Repo.IsTimetrackerEnabled()
|
||||||
|
@ -423,23 +423,23 @@ func (issue *Issue) sendLabelUpdatedWebhook(doer *User) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if err = issue.loadRepo(x); err != nil {
|
if err = issue.loadRepo(x); err != nil {
|
||||||
log.Error(4, "loadRepo: %v", err)
|
log.Error("loadRepo: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issue.loadPoster(x); err != nil {
|
if err = issue.loadPoster(x); err != nil {
|
||||||
log.Error(4, "loadPoster: %v", err)
|
log.Error("loadPoster: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
mode, _ := AccessLevel(issue.Poster, issue.Repo)
|
mode, _ := AccessLevel(issue.Poster, issue.Repo)
|
||||||
if issue.IsPull {
|
if issue.IsPull {
|
||||||
if err = issue.loadPullRequest(x); err != nil {
|
if err = issue.loadPullRequest(x); err != nil {
|
||||||
log.Error(4, "loadPullRequest: %v", err)
|
log.Error("loadPullRequest: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err = issue.PullRequest.LoadIssue(); err != nil {
|
if err = issue.PullRequest.LoadIssue(); err != nil {
|
||||||
log.Error(4, "LoadIssue: %v", err)
|
log.Error("LoadIssue: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
||||||
|
@ -459,7 +459,7 @@ func (issue *Issue) sendLabelUpdatedWebhook(doer *User) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.RepoID)
|
go HookQueue.Add(issue.RepoID)
|
||||||
}
|
}
|
||||||
|
@ -584,7 +584,7 @@ func (issue *Issue) ClearLabels(doer *User) (err error) {
|
||||||
if issue.IsPull {
|
if issue.IsPull {
|
||||||
err = issue.PullRequest.LoadIssue()
|
err = issue.PullRequest.LoadIssue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LoadIssue: %v", err)
|
log.Error("LoadIssue: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
||||||
|
@ -604,7 +604,7 @@ func (issue *Issue) ClearLabels(doer *User) (err error) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.RepoID)
|
go HookQueue.Add(issue.RepoID)
|
||||||
}
|
}
|
||||||
|
@ -819,7 +819,7 @@ func (issue *Issue) ChangeStatus(doer *User, isClosed bool) (err error) {
|
||||||
err = PrepareWebhooks(issue.Repo, HookEventIssues, apiIssue)
|
err = PrepareWebhooks(issue.Repo, HookEventIssues, apiIssue)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v, is_closed: %v]: %v", issue.IsPull, isClosed, err)
|
log.Error("PrepareWebhooks [is_pull: %v, is_closed: %v]: %v", issue.IsPull, isClosed, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.Repo.ID)
|
go HookQueue.Add(issue.Repo.ID)
|
||||||
}
|
}
|
||||||
|
@ -888,7 +888,7 @@ func (issue *Issue) ChangeTitle(doer *User, title string) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.RepoID)
|
go HookQueue.Add(issue.RepoID)
|
||||||
}
|
}
|
||||||
|
@ -953,7 +953,7 @@ func (issue *Issue) ChangeContent(doer *User, content string) (err error) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.RepoID)
|
go HookQueue.Add(issue.RepoID)
|
||||||
}
|
}
|
||||||
|
@ -1169,7 +1169,7 @@ func NewIssue(repo *Repository, issue *Issue, labelIDs []int64, assigneeIDs []in
|
||||||
Repo: repo,
|
Repo: repo,
|
||||||
IsPrivate: repo.IsPrivate,
|
IsPrivate: repo.IsPrivate,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "NotifyWatchers: %v", err)
|
log.Error("NotifyWatchers: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
mode, _ := AccessLevel(issue.Poster, issue.Repo)
|
mode, _ := AccessLevel(issue.Poster, issue.Repo)
|
||||||
|
@ -1180,7 +1180,7 @@ func NewIssue(repo *Repository, issue *Issue, labelIDs []int64, assigneeIDs []in
|
||||||
Repository: repo.APIFormat(mode),
|
Repository: repo.APIFormat(mode),
|
||||||
Sender: issue.Poster.APIFormat(),
|
Sender: issue.Poster.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.RepoID)
|
go HookQueue.Add(issue.RepoID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -187,7 +187,7 @@ func (issue *Issue) changeAssignee(sess *xorm.Session, doer *User, assigneeID in
|
||||||
apiPullRequest.Action = api.HookIssueAssigned
|
apiPullRequest.Action = api.HookIssueAssigned
|
||||||
}
|
}
|
||||||
if err := prepareWebhooks(sess, issue.Repo, HookEventPullRequest, apiPullRequest); err != nil {
|
if err := prepareWebhooks(sess, issue.Repo, HookEventPullRequest, apiPullRequest); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
|
log.Error("PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -205,7 +205,7 @@ func (issue *Issue) changeAssignee(sess *xorm.Session, doer *User, assigneeID in
|
||||||
apiIssue.Action = api.HookIssueAssigned
|
apiIssue.Action = api.HookIssueAssigned
|
||||||
}
|
}
|
||||||
if err := prepareWebhooks(sess, issue.Repo, HookEventIssues, apiIssue); err != nil {
|
if err := prepareWebhooks(sess, issue.Repo, HookEventIssues, apiIssue); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
|
log.Error("PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,12 +171,12 @@ func (c *Comment) AfterDelete() {
|
||||||
func (c *Comment) HTMLURL() string {
|
func (c *Comment) HTMLURL() string {
|
||||||
err := c.LoadIssue()
|
err := c.LoadIssue()
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
|
log.Error("LoadIssue(%d): %v", c.IssueID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
err = c.Issue.loadRepo(x)
|
err = c.Issue.loadRepo(x)
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
|
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
if c.Type == CommentTypeCode {
|
if c.Type == CommentTypeCode {
|
||||||
|
@ -200,7 +200,7 @@ func (c *Comment) HTMLURL() string {
|
||||||
func (c *Comment) IssueURL() string {
|
func (c *Comment) IssueURL() string {
|
||||||
err := c.LoadIssue()
|
err := c.LoadIssue()
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
|
log.Error("LoadIssue(%d): %v", c.IssueID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@ func (c *Comment) IssueURL() string {
|
||||||
|
|
||||||
err = c.Issue.loadRepo(x)
|
err = c.Issue.loadRepo(x)
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
|
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return c.Issue.HTMLURL()
|
return c.Issue.HTMLURL()
|
||||||
|
@ -220,13 +220,13 @@ func (c *Comment) IssueURL() string {
|
||||||
func (c *Comment) PRURL() string {
|
func (c *Comment) PRURL() string {
|
||||||
err := c.LoadIssue()
|
err := c.LoadIssue()
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
|
log.Error("LoadIssue(%d): %v", c.IssueID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Issue.loadRepo(x)
|
err = c.Issue.loadRepo(x)
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
|
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ func (c *Comment) LoadPoster() error {
|
||||||
c.PosterID = -1
|
c.PosterID = -1
|
||||||
c.Poster = NewGhostUser()
|
c.Poster = NewGhostUser()
|
||||||
} else {
|
} else {
|
||||||
log.Error(3, "getUserByID[%d]: %v", c.ID, err)
|
log.Error("getUserByID[%d]: %v", c.ID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -333,7 +333,7 @@ func (c *Comment) LoadAttachments() error {
|
||||||
var err error
|
var err error
|
||||||
c.Attachments, err = getAttachmentsByCommentID(x, c.ID)
|
c.Attachments, err = getAttachmentsByCommentID(x, c.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "getAttachmentsByCommentID[%d]: %v", c.ID, err)
|
log.Error("getAttachmentsByCommentID[%d]: %v", c.ID, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -384,7 +384,7 @@ func (c *Comment) mailParticipants(e Engine, opType ActionType, issue *Issue) (e
|
||||||
content = fmt.Sprintf("Reopened #%d", issue.Index)
|
content = fmt.Sprintf("Reopened #%d", issue.Index)
|
||||||
}
|
}
|
||||||
if err = mailIssueCommentToParticipants(e, issue, c.Poster, content, c, mentions); err != nil {
|
if err = mailIssueCommentToParticipants(e, issue, c.Poster, content, c, mentions); err != nil {
|
||||||
log.Error(4, "mailIssueCommentToParticipants: %v", err)
|
log.Error("mailIssueCommentToParticipants: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -492,12 +492,12 @@ func (c *Comment) MustAsDiff() *Diff {
|
||||||
func (c *Comment) CodeCommentURL() string {
|
func (c *Comment) CodeCommentURL() string {
|
||||||
err := c.LoadIssue()
|
err := c.LoadIssue()
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
|
log.Error("LoadIssue(%d): %v", c.IssueID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
err = c.Issue.loadRepo(x)
|
err = c.Issue.loadRepo(x)
|
||||||
if err != nil { // Silently dropping errors :unamused:
|
if err != nil { // Silently dropping errors :unamused:
|
||||||
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
|
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s/files#%s", c.Issue.HTMLURL(), c.HashTag())
|
return fmt.Sprintf("%s/files#%s", c.Issue.HTMLURL(), c.HashTag())
|
||||||
|
@ -638,7 +638,7 @@ func sendCreateCommentAction(e *xorm.Session, opts *CreateCommentOptions, commen
|
||||||
// Notify watchers for whatever action comes in, ignore if no action type.
|
// Notify watchers for whatever action comes in, ignore if no action type.
|
||||||
if act.OpType > 0 {
|
if act.OpType > 0 {
|
||||||
if err = notifyWatchers(e, act); err != nil {
|
if err = notifyWatchers(e, act); err != nil {
|
||||||
log.Error(4, "notifyWatchers: %v", err)
|
log.Error("notifyWatchers: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -850,7 +850,7 @@ func CreateIssueComment(doer *User, repo *Repository, issue *Issue, content stri
|
||||||
Repository: repo.APIFormat(mode),
|
Repository: repo.APIFormat(mode),
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
|
log.Error("PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(repo.ID)
|
go HookQueue.Add(repo.ID)
|
||||||
}
|
}
|
||||||
|
@ -1053,7 +1053,7 @@ func UpdateComment(doer *User, c *Comment, oldContent string) error {
|
||||||
Repository: c.Issue.Repo.APIFormat(mode),
|
Repository: c.Issue.Repo.APIFormat(mode),
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "PrepareWebhooks [comment_id: %d]: %v", c.ID, err)
|
log.Error("PrepareWebhooks [comment_id: %d]: %v", c.ID, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(c.Issue.Repo.ID)
|
go HookQueue.Add(c.Issue.Repo.ID)
|
||||||
}
|
}
|
||||||
|
@ -1108,7 +1108,7 @@ func DeleteComment(doer *User, comment *Comment) error {
|
||||||
Repository: comment.Issue.Repo.APIFormat(mode),
|
Repository: comment.Issue.Repo.APIFormat(mode),
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
|
log.Error("PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(comment.Issue.Repo.ID)
|
go HookQueue.Add(comment.Issue.Repo.ID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -129,7 +129,7 @@ func (issue *Issue) mailParticipants(e Engine) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = mailIssueCommentToParticipants(e, issue, issue.Poster, issue.Content, nil, mentions); err != nil {
|
if err = mailIssueCommentToParticipants(e, issue, issue.Poster, issue.Content, nil, mentions); err != nil {
|
||||||
log.Error(4, "mailIssueCommentToParticipants: %v", err)
|
log.Error("mailIssueCommentToParticipants: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -393,7 +393,7 @@ func ChangeMilestoneAssign(issue *Issue, doer *User, oldMilestoneID int64) (err
|
||||||
if issue.IsPull {
|
if issue.IsPull {
|
||||||
err = issue.PullRequest.LoadIssue()
|
err = issue.PullRequest.LoadIssue()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "LoadIssue: %v", err)
|
log.Error("LoadIssue: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
||||||
|
@ -413,7 +413,7 @@ func ChangeMilestoneAssign(issue *Issue, doer *User, oldMilestoneID int64) (err
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(issue.RepoID)
|
go HookQueue.Add(issue.RepoID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,11 +39,11 @@ func (l *LFSLock) AfterLoad(session *xorm.Session) {
|
||||||
var err error
|
var err error
|
||||||
l.Owner, err = getUserByID(session, l.OwnerID)
|
l.Owner, err = getUserByID(session, l.OwnerID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "LFS lock AfterLoad failed OwnerId[%d] not found: %v", l.OwnerID, err)
|
log.Error("LFS lock AfterLoad failed OwnerId[%d] not found: %v", l.OwnerID, err)
|
||||||
}
|
}
|
||||||
l.Repo, err = getRepositoryByID(session, l.RepoID)
|
l.Repo, err = getRepositoryByID(session, l.RepoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "LFS lock AfterLoad failed RepoId[%d] not found: %v", l.RepoID, err)
|
log.Error("LFS lock AfterLoad failed RepoId[%d] not found: %v", l.RepoID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ func SendUserMail(c *macaron.Context, u *User, tpl base.TplName, code, subject,
|
||||||
var content bytes.Buffer
|
var content bytes.Buffer
|
||||||
|
|
||||||
if err := templates.ExecuteTemplate(&content, string(tpl), data); err != nil {
|
if err := templates.ExecuteTemplate(&content, string(tpl), data); err != nil {
|
||||||
log.Error(3, "Template: %v", err)
|
log.Error("Template: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ func SendActivateEmailMail(c *macaron.Context, u *User, email *EmailAddress) {
|
||||||
var content bytes.Buffer
|
var content bytes.Buffer
|
||||||
|
|
||||||
if err := templates.ExecuteTemplate(&content, string(mailAuthActivateEmail), data); err != nil {
|
if err := templates.ExecuteTemplate(&content, string(mailAuthActivateEmail), data); err != nil {
|
||||||
log.Error(3, "Template: %v", err)
|
log.Error("Template: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +107,7 @@ func SendRegisterNotifyMail(c *macaron.Context, u *User) {
|
||||||
var content bytes.Buffer
|
var content bytes.Buffer
|
||||||
|
|
||||||
if err := templates.ExecuteTemplate(&content, string(mailAuthRegisterNotify), data); err != nil {
|
if err := templates.ExecuteTemplate(&content, string(mailAuthRegisterNotify), data); err != nil {
|
||||||
log.Error(3, "Template: %v", err)
|
log.Error("Template: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ func SendCollaboratorMail(u, doer *User, repo *Repository) {
|
||||||
var content bytes.Buffer
|
var content bytes.Buffer
|
||||||
|
|
||||||
if err := templates.ExecuteTemplate(&content, string(mailNotifyCollaborator), data); err != nil {
|
if err := templates.ExecuteTemplate(&content, string(mailNotifyCollaborator), data); err != nil {
|
||||||
log.Error(3, "Template: %v", err)
|
log.Error("Template: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +165,7 @@ func composeIssueCommentMessage(issue *Issue, doer *User, content string, commen
|
||||||
var mailBody bytes.Buffer
|
var mailBody bytes.Buffer
|
||||||
|
|
||||||
if err := templates.ExecuteTemplate(&mailBody, string(tplName), data); err != nil {
|
if err := templates.ExecuteTemplate(&mailBody, string(tplName), data); err != nil {
|
||||||
log.Error(3, "Template: %v", err)
|
log.Error("Template: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := mailer.NewMessageFrom(tos, doer.DisplayName(), setting.MailService.FromEmail, subject, mailBody.String())
|
msg := mailer.NewMessageFrom(tos, doer.DisplayName(), setting.MailService.FromEmail, subject, mailBody.String())
|
||||||
|
|
|
@ -244,7 +244,7 @@ func Migrate(x *xorm.Engine) error {
|
||||||
|
|
||||||
v := currentVersion.Version
|
v := currentVersion.Version
|
||||||
if minDBVersion > v {
|
if minDBVersion > v {
|
||||||
log.Fatal(4, `Gitea no longer supports auto-migration from your previously installed version.
|
log.Fatal(`Gitea no longer supports auto-migration from your previously installed version.
|
||||||
Please try to upgrade to a lower version (>= v0.6.0) first, then upgrade to current version.`)
|
Please try to upgrade to a lower version (>= v0.6.0) first, then upgrade to current version.`)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -315,7 +315,7 @@ func dropTableColumns(sess *xorm.Session, tableName string, columnNames ...strin
|
||||||
|
|
||||||
return sess.Commit()
|
return sess.Commit()
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Unrecognized DB")
|
log.Fatal("Unrecognized DB")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -26,7 +26,7 @@ func removeActionColumns(x *xorm.Engine) error {
|
||||||
return fmt.Errorf("DROP COLUMN repo_name: %v", err)
|
return fmt.Errorf("DROP COLUMN repo_name: %v", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Unrecognized DB")
|
log.Fatal("Unrecognized DB")
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,7 @@ func removeIndexColumnFromRepoUnitTable(x *xorm.Engine) (err error) {
|
||||||
log.Warn("DROP COLUMN index: %v", err)
|
log.Warn("DROP COLUMN index: %v", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Unrecognized DB")
|
log.Fatal("Unrecognized DB")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -48,7 +48,7 @@ func migrateProtectedBranchStruct(x *xorm.Engine) error {
|
||||||
log.Warn("DROP COLUMN can_push (skipping): %v", err)
|
log.Warn("DROP COLUMN can_push (skipping): %v", err)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Unrecognized DB")
|
log.Fatal("Unrecognized DB")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -33,7 +33,7 @@ func addSizeToAttachment(x *xorm.Engine) error {
|
||||||
localPath := path.Join(setting.AttachmentPath, attach.UUID[0:1], attach.UUID[1:2], attach.UUID)
|
localPath := path.Join(setting.AttachmentPath, attach.UUID[0:1], attach.UUID[1:2], attach.UUID)
|
||||||
fi, err := os.Stat(localPath)
|
fi, err := os.Stat(localPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "calculate file size of attachment[UUID: %s]: %v", attach.UUID, err)
|
log.Error("calculate file size of attachment[UUID: %s]: %v", attach.UUID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
attach.Size = fi.Size()
|
attach.Size = fi.Size()
|
||||||
|
|
|
@ -303,7 +303,7 @@ func isOrganizationOwner(e Engine, orgID, uid int64) (bool, error) {
|
||||||
if has, err := e.Get(ownerTeam); err != nil {
|
if has, err := e.Get(ownerTeam); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
} else if !has {
|
} else if !has {
|
||||||
log.Error(4, "Organization does not have owner team: %d", orgID)
|
log.Error("Organization does not have owner team: %d", orgID)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return isTeamMember(e, orgID, ownerTeam.ID, uid)
|
return isTeamMember(e, orgID, ownerTeam.ID, uid)
|
||||||
|
|
|
@ -69,7 +69,7 @@ func (t *Team) IsOwnerTeam() bool {
|
||||||
func (t *Team) IsMember(userID int64) bool {
|
func (t *Team) IsMember(userID int64) bool {
|
||||||
isMember, err := IsTeamMember(t.OrgID, t.ID, userID)
|
isMember, err := IsTeamMember(t.OrgID, t.ID, userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "IsMember: %v", err)
|
log.Error("IsMember: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return isMember
|
return isMember
|
||||||
|
|
|
@ -135,7 +135,7 @@ func (pr *PullRequest) GetDefaultMergeMessage() string {
|
||||||
var err error
|
var err error
|
||||||
pr.HeadRepo, err = GetRepositoryByID(pr.HeadRepoID)
|
pr.HeadRepo, err = GetRepositoryByID(pr.HeadRepoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetRepositoryById[%d]: %v", pr.HeadRepoID, err)
|
log.Error("GetRepositoryById[%d]: %v", pr.HeadRepoID, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -145,7 +145,7 @@ func (pr *PullRequest) GetDefaultMergeMessage() string {
|
||||||
// GetDefaultSquashMessage returns default message used when squash and merging pull request
|
// GetDefaultSquashMessage returns default message used when squash and merging pull request
|
||||||
func (pr *PullRequest) GetDefaultSquashMessage() string {
|
func (pr *PullRequest) GetDefaultSquashMessage() string {
|
||||||
if err := pr.LoadIssue(); err != nil {
|
if err := pr.LoadIssue(); err != nil {
|
||||||
log.Error(4, "LoadIssue: %v", err)
|
log.Error("LoadIssue: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("%s (#%d)", pr.Issue.Title, pr.Issue.Index)
|
return fmt.Sprintf("%s (#%d)", pr.Issue.Title, pr.Issue.Index)
|
||||||
|
@ -172,21 +172,21 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest {
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
if err = pr.Issue.loadRepo(e); err != nil {
|
if err = pr.Issue.loadRepo(e); err != nil {
|
||||||
log.Error(log.ERROR, "loadRepo[%d]: %v", pr.ID, err)
|
log.Error("loadRepo[%d]: %v", pr.ID, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
apiIssue := pr.Issue.apiFormat(e)
|
apiIssue := pr.Issue.apiFormat(e)
|
||||||
if pr.BaseRepo == nil {
|
if pr.BaseRepo == nil {
|
||||||
pr.BaseRepo, err = getRepositoryByID(e, pr.BaseRepoID)
|
pr.BaseRepo, err = getRepositoryByID(e, pr.BaseRepoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(log.ERROR, "GetRepositoryById[%d]: %v", pr.ID, err)
|
log.Error("GetRepositoryById[%d]: %v", pr.ID, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if pr.HeadRepo == nil {
|
if pr.HeadRepo == nil {
|
||||||
pr.HeadRepo, err = getRepositoryByID(e, pr.HeadRepoID)
|
pr.HeadRepo, err = getRepositoryByID(e, pr.HeadRepoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(log.ERROR, "GetRepositoryById[%d]: %v", pr.ID, err)
|
log.Error("GetRepositoryById[%d]: %v", pr.ID, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -581,11 +581,11 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
|
||||||
pr.MergerID = doer.ID
|
pr.MergerID = doer.ID
|
||||||
|
|
||||||
if err = pr.setMerged(); err != nil {
|
if err = pr.setMerged(); err != nil {
|
||||||
log.Error(4, "setMerged [%d]: %v", pr.ID, err)
|
log.Error("setMerged [%d]: %v", pr.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = MergePullRequestAction(doer, pr.Issue.Repo, pr.Issue); err != nil {
|
if err = MergePullRequestAction(doer, pr.Issue.Repo, pr.Issue); err != nil {
|
||||||
log.Error(4, "MergePullRequestAction [%d]: %v", pr.ID, err)
|
log.Error("MergePullRequestAction [%d]: %v", pr.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset cached commit count
|
// Reset cached commit count
|
||||||
|
@ -593,7 +593,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
|
||||||
|
|
||||||
// Reload pull request information.
|
// Reload pull request information.
|
||||||
if err = pr.LoadAttributes(); err != nil {
|
if err = pr.LoadAttributes(); err != nil {
|
||||||
log.Error(4, "LoadAttributes: %v", err)
|
log.Error("LoadAttributes: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -605,14 +605,14 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
|
||||||
Repository: pr.Issue.Repo.APIFormat(mode),
|
Repository: pr.Issue.Repo.APIFormat(mode),
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(pr.Issue.Repo.ID)
|
go HookQueue.Add(pr.Issue.Repo.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
l, err := baseGitRepo.CommitsBetweenIDs(pr.MergedCommitID, pr.MergeBase)
|
l, err := baseGitRepo.CommitsBetweenIDs(pr.MergedCommitID, pr.MergeBase)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "CommitsBetweenIDs: %v", err)
|
log.Error("CommitsBetweenIDs: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -621,7 +621,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
|
||||||
// to avoid strange diff commits produced.
|
// to avoid strange diff commits produced.
|
||||||
mergeCommit, err := baseGitRepo.GetBranchCommit(pr.BaseBranch)
|
mergeCommit, err := baseGitRepo.GetBranchCommit(pr.BaseBranch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetBranchCommit: %v", err)
|
log.Error("GetBranchCommit: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if mergeStyle == MergeStyleMerge {
|
if mergeStyle == MergeStyleMerge {
|
||||||
|
@ -639,7 +639,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}
|
}
|
||||||
if err = PrepareWebhooks(pr.BaseRepo, HookEventPush, p); err != nil {
|
if err = PrepareWebhooks(pr.BaseRepo, HookEventPush, p); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(pr.BaseRepo.ID)
|
go HookQueue.Add(pr.BaseRepo.ID)
|
||||||
}
|
}
|
||||||
|
@ -692,7 +692,7 @@ func (pr *PullRequest) setMerged() (err error) {
|
||||||
func (pr *PullRequest) manuallyMerged() bool {
|
func (pr *PullRequest) manuallyMerged() bool {
|
||||||
commit, err := pr.getMergeCommit()
|
commit, err := pr.getMergeCommit()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PullRequest[%d].getMergeCommit: %v", pr.ID, err)
|
log.Error("PullRequest[%d].getMergeCommit: %v", pr.ID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if commit != nil {
|
if commit != nil {
|
||||||
|
@ -705,7 +705,7 @@ func (pr *PullRequest) manuallyMerged() bool {
|
||||||
if merger == nil {
|
if merger == nil {
|
||||||
if pr.BaseRepo.Owner == nil {
|
if pr.BaseRepo.Owner == nil {
|
||||||
if err = pr.BaseRepo.getOwner(x); err != nil {
|
if err = pr.BaseRepo.getOwner(x); err != nil {
|
||||||
log.Error(4, "BaseRepo.getOwner[%d]: %v", pr.ID, err)
|
log.Error("BaseRepo.getOwner[%d]: %v", pr.ID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -715,7 +715,7 @@ func (pr *PullRequest) manuallyMerged() bool {
|
||||||
pr.MergerID = merger.ID
|
pr.MergerID = merger.ID
|
||||||
|
|
||||||
if err = pr.setMerged(); err != nil {
|
if err = pr.setMerged(); err != nil {
|
||||||
log.Error(4, "PullRequest[%d].setMerged : %v", pr.ID, err)
|
log.Error("PullRequest[%d].setMerged : %v", pr.ID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
log.Info("manuallyMerged[%d]: Marked as manually merged into %s/%s by commit id: %s", pr.ID, pr.BaseRepo.Name, pr.BaseBranch, commit.ID.String())
|
log.Info("manuallyMerged[%d]: Marked as manually merged into %s/%s by commit id: %s", pr.ID, pr.BaseRepo.Name, pr.BaseBranch, commit.ID.String())
|
||||||
|
@ -936,7 +936,7 @@ func NewPullRequest(repo *Repository, pull *Issue, labelIDs []int64, uuids []str
|
||||||
Repo: repo,
|
Repo: repo,
|
||||||
IsPrivate: repo.IsPrivate,
|
IsPrivate: repo.IsPrivate,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "NotifyWatchers: %v", err)
|
log.Error("NotifyWatchers: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pr.Issue = pull
|
pr.Issue = pull
|
||||||
|
@ -949,7 +949,7 @@ func NewPullRequest(repo *Repository, pull *Issue, labelIDs []int64, uuids []str
|
||||||
Repository: repo.APIFormat(mode),
|
Repository: repo.APIFormat(mode),
|
||||||
Sender: pull.Poster.APIFormat(),
|
Sender: pull.Poster.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(repo.ID)
|
go HookQueue.Add(repo.ID)
|
||||||
}
|
}
|
||||||
|
@ -997,12 +997,12 @@ func PullRequests(baseRepoID int64, opts *PullRequestsOptions) ([]*PullRequest,
|
||||||
|
|
||||||
countSession, err := listPullRequestStatement(baseRepoID, opts)
|
countSession, err := listPullRequestStatement(baseRepoID, opts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "listPullRequestStatement", err)
|
log.Error("listPullRequestStatement: %v", err)
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
maxResults, err := countSession.Count(new(PullRequest))
|
maxResults, err := countSession.Count(new(PullRequest))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Count PRs", err)
|
log.Error("Count PRs: %v", err)
|
||||||
return nil, maxResults, err
|
return nil, maxResults, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1010,7 +1010,7 @@ func PullRequests(baseRepoID int64, opts *PullRequestsOptions) ([]*PullRequest,
|
||||||
findSession, err := listPullRequestStatement(baseRepoID, opts)
|
findSession, err := listPullRequestStatement(baseRepoID, opts)
|
||||||
sortIssuesSession(findSession, opts.SortType)
|
sortIssuesSession(findSession, opts.SortType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "listPullRequestStatement", err)
|
log.Error("listPullRequestStatement: %v", err)
|
||||||
return nil, maxResults, err
|
return nil, maxResults, err
|
||||||
}
|
}
|
||||||
findSession.Limit(ItemsPerPage, (opts.Page-1)*ItemsPerPage)
|
findSession.Limit(ItemsPerPage, (opts.Page-1)*ItemsPerPage)
|
||||||
|
@ -1215,7 +1215,7 @@ func (pr *PullRequest) AddToTaskQueue() {
|
||||||
go pullRequestQueue.AddFunc(pr.ID, func() {
|
go pullRequestQueue.AddFunc(pr.ID, func() {
|
||||||
pr.Status = PullRequestStatusChecking
|
pr.Status = PullRequestStatusChecking
|
||||||
if err := pr.UpdateCols("status"); err != nil {
|
if err := pr.UpdateCols("status"); err != nil {
|
||||||
log.Error(5, "AddToTaskQueue.UpdateCols[%d].(add to queue): %v", pr.ID, err)
|
log.Error("AddToTaskQueue.UpdateCols[%d].(add to queue): %v", pr.ID, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -1290,10 +1290,10 @@ func addHeadRepoTasks(prs []*PullRequest) {
|
||||||
for _, pr := range prs {
|
for _, pr := range prs {
|
||||||
log.Trace("addHeadRepoTasks[%d]: composing new test task", pr.ID)
|
log.Trace("addHeadRepoTasks[%d]: composing new test task", pr.ID)
|
||||||
if err := pr.UpdatePatch(); err != nil {
|
if err := pr.UpdatePatch(); err != nil {
|
||||||
log.Error(4, "UpdatePatch: %v", err)
|
log.Error("UpdatePatch: %v", err)
|
||||||
continue
|
continue
|
||||||
} else if err := pr.PushToBaseRepo(); err != nil {
|
} else if err := pr.PushToBaseRepo(); err != nil {
|
||||||
log.Error(4, "PushToBaseRepo: %v", err)
|
log.Error("PushToBaseRepo: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1307,23 +1307,23 @@ func AddTestPullRequestTask(doer *User, repoID int64, branch string, isSync bool
|
||||||
log.Trace("AddTestPullRequestTask [head_repo_id: %d, head_branch: %s]: finding pull requests", repoID, branch)
|
log.Trace("AddTestPullRequestTask [head_repo_id: %d, head_branch: %s]: finding pull requests", repoID, branch)
|
||||||
prs, err := GetUnmergedPullRequestsByHeadInfo(repoID, branch)
|
prs, err := GetUnmergedPullRequestsByHeadInfo(repoID, branch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Find pull requests [head_repo_id: %d, head_branch: %s]: %v", repoID, branch, err)
|
log.Error("Find pull requests [head_repo_id: %d, head_branch: %s]: %v", repoID, branch, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if isSync {
|
if isSync {
|
||||||
requests := PullRequestList(prs)
|
requests := PullRequestList(prs)
|
||||||
if err = requests.LoadAttributes(); err != nil {
|
if err = requests.LoadAttributes(); err != nil {
|
||||||
log.Error(4, "PullRequestList.LoadAttributes: %v", err)
|
log.Error("PullRequestList.LoadAttributes: %v", err)
|
||||||
}
|
}
|
||||||
if invalidationErr := checkForInvalidation(requests, repoID, doer, branch); invalidationErr != nil {
|
if invalidationErr := checkForInvalidation(requests, repoID, doer, branch); invalidationErr != nil {
|
||||||
log.Error(4, "checkForInvalidation: %v", invalidationErr)
|
log.Error("checkForInvalidation: %v", invalidationErr)
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, pr := range prs {
|
for _, pr := range prs {
|
||||||
pr.Issue.PullRequest = pr
|
pr.Issue.PullRequest = pr
|
||||||
if err = pr.Issue.LoadAttributes(); err != nil {
|
if err = pr.Issue.LoadAttributes(); err != nil {
|
||||||
log.Error(4, "LoadAttributes: %v", err)
|
log.Error("LoadAttributes: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = PrepareWebhooks(pr.Issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
if err = PrepareWebhooks(pr.Issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
|
||||||
|
@ -1333,7 +1333,7 @@ func AddTestPullRequestTask(doer *User, repoID int64, branch string, isSync bool
|
||||||
Repository: pr.Issue.Repo.APIFormat(AccessModeNone),
|
Repository: pr.Issue.Repo.APIFormat(AccessModeNone),
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "PrepareWebhooks [pull_id: %v]: %v", pr.ID, err)
|
log.Error("PrepareWebhooks [pull_id: %v]: %v", pr.ID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
go HookQueue.Add(pr.Issue.Repo.ID)
|
go HookQueue.Add(pr.Issue.Repo.ID)
|
||||||
|
@ -1347,7 +1347,7 @@ func AddTestPullRequestTask(doer *User, repoID int64, branch string, isSync bool
|
||||||
log.Trace("AddTestPullRequestTask [base_repo_id: %d, base_branch: %s]: finding pull requests", repoID, branch)
|
log.Trace("AddTestPullRequestTask [base_repo_id: %d, base_branch: %s]: finding pull requests", repoID, branch)
|
||||||
prs, err = GetUnmergedPullRequestsByBaseInfo(repoID, branch)
|
prs, err = GetUnmergedPullRequestsByBaseInfo(repoID, branch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Find pull requests [base_repo_id: %d, base_branch: %s]: %v", repoID, branch, err)
|
log.Error("Find pull requests [base_repo_id: %d, base_branch: %s]: %v", repoID, branch, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, pr := range prs {
|
for _, pr := range prs {
|
||||||
|
@ -1367,7 +1367,7 @@ func checkForInvalidation(requests PullRequestList, repoID int64, doer *User, br
|
||||||
go func() {
|
go func() {
|
||||||
err := requests.InvalidateCodeComments(doer, gitRepo, branch)
|
err := requests.InvalidateCodeComments(doer, gitRepo, branch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "PullRequestList.InvalidateCodeComments: %v", err)
|
log.Error("PullRequestList.InvalidateCodeComments: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
return nil
|
return nil
|
||||||
|
@ -1396,7 +1396,7 @@ func (pr *PullRequest) checkAndUpdateStatus() {
|
||||||
// Make sure there is no waiting test to process before leaving the checking status.
|
// Make sure there is no waiting test to process before leaving the checking status.
|
||||||
if !pullRequestQueue.Exist(pr.ID) {
|
if !pullRequestQueue.Exist(pr.ID) {
|
||||||
if err := pr.UpdateCols("status, conflicted_files"); err != nil {
|
if err := pr.UpdateCols("status, conflicted_files"); err != nil {
|
||||||
log.Error(4, "Update[%d]: %v", pr.ID, err)
|
log.Error("Update[%d]: %v", pr.ID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1404,7 +1404,7 @@ func (pr *PullRequest) checkAndUpdateStatus() {
|
||||||
// IsWorkInProgress determine if the Pull Request is a Work In Progress by its title
|
// IsWorkInProgress determine if the Pull Request is a Work In Progress by its title
|
||||||
func (pr *PullRequest) IsWorkInProgress() bool {
|
func (pr *PullRequest) IsWorkInProgress() bool {
|
||||||
if err := pr.LoadIssue(); err != nil {
|
if err := pr.LoadIssue(); err != nil {
|
||||||
log.Error(4, "LoadIssue: %v", err)
|
log.Error("LoadIssue: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1425,7 +1425,7 @@ func (pr *PullRequest) IsFilesConflicted() bool {
|
||||||
// It returns an empty string when none were found
|
// It returns an empty string when none were found
|
||||||
func (pr *PullRequest) GetWorkInProgressPrefix() string {
|
func (pr *PullRequest) GetWorkInProgressPrefix() string {
|
||||||
if err := pr.LoadIssue(); err != nil {
|
if err := pr.LoadIssue(); err != nil {
|
||||||
log.Error(4, "LoadIssue: %v", err)
|
log.Error("LoadIssue: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1444,7 +1444,7 @@ func TestPullRequests() {
|
||||||
|
|
||||||
err := x.Where("status = ?", PullRequestStatusChecking).Find(&prs)
|
err := x.Where("status = ?", PullRequestStatusChecking).Find(&prs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "Find Checking PRs", err)
|
log.Error("Find Checking PRs: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1454,14 +1454,14 @@ func TestPullRequests() {
|
||||||
for _, pr := range prs {
|
for _, pr := range prs {
|
||||||
checkedPRs[pr.ID] = struct{}{}
|
checkedPRs[pr.ID] = struct{}{}
|
||||||
if err := pr.GetBaseRepo(); err != nil {
|
if err := pr.GetBaseRepo(); err != nil {
|
||||||
log.Error(3, "GetBaseRepo: %v", err)
|
log.Error("GetBaseRepo: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if pr.manuallyMerged() {
|
if pr.manuallyMerged() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := pr.testPatch(x); err != nil {
|
if err := pr.testPatch(x); err != nil {
|
||||||
log.Error(3, "testPatch: %v", err)
|
log.Error("testPatch: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1480,12 +1480,12 @@ func TestPullRequests() {
|
||||||
|
|
||||||
pr, err := GetPullRequestByID(id)
|
pr, err := GetPullRequestByID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetPullRequestByID[%s]: %v", prID, err)
|
log.Error("GetPullRequestByID[%s]: %v", prID, err)
|
||||||
continue
|
continue
|
||||||
} else if pr.manuallyMerged() {
|
} else if pr.manuallyMerged() {
|
||||||
continue
|
continue
|
||||||
} else if err = pr.testPatch(x); err != nil {
|
} else if err = pr.testPatch(x); err != nil {
|
||||||
log.Error(4, "testPatch[%d]: %v", pr.ID, err)
|
log.Error("testPatch[%d]: %v", pr.ID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -199,7 +199,7 @@ func CreateRelease(gitRepo *git.Repository, rel *Release, attachmentUUIDs []stri
|
||||||
|
|
||||||
if !rel.IsDraft {
|
if !rel.IsDraft {
|
||||||
if err := rel.LoadAttributes(); err != nil {
|
if err := rel.LoadAttributes(); err != nil {
|
||||||
log.Error(2, "LoadAttributes: %v", err)
|
log.Error("LoadAttributes: %v", err)
|
||||||
} else {
|
} else {
|
||||||
mode, _ := AccessLevel(rel.Publisher, rel.Repo)
|
mode, _ := AccessLevel(rel.Publisher, rel.Repo)
|
||||||
if err := PrepareWebhooks(rel.Repo, HookEventRelease, &api.ReleasePayload{
|
if err := PrepareWebhooks(rel.Repo, HookEventRelease, &api.ReleasePayload{
|
||||||
|
@ -208,7 +208,7 @@ func CreateRelease(gitRepo *git.Repository, rel *Release, attachmentUUIDs []stri
|
||||||
Repository: rel.Repo.APIFormat(mode),
|
Repository: rel.Repo.APIFormat(mode),
|
||||||
Sender: rel.Publisher.APIFormat(),
|
Sender: rel.Publisher.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(rel.Repo.ID)
|
go HookQueue.Add(rel.Repo.ID)
|
||||||
}
|
}
|
||||||
|
@ -409,7 +409,7 @@ func UpdateRelease(doer *User, gitRepo *git.Repository, rel *Release, attachment
|
||||||
Repository: rel.Repo.APIFormat(mode),
|
Repository: rel.Repo.APIFormat(mode),
|
||||||
Sender: rel.Publisher.APIFormat(),
|
Sender: rel.Publisher.APIFormat(),
|
||||||
}); err1 != nil {
|
}); err1 != nil {
|
||||||
log.Error(2, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(rel.Repo.ID)
|
go HookQueue.Add(rel.Repo.ID)
|
||||||
}
|
}
|
||||||
|
@ -464,7 +464,7 @@ func DeleteReleaseByID(id int64, u *User, delTag bool) error {
|
||||||
Repository: rel.Repo.APIFormat(mode),
|
Repository: rel.Repo.APIFormat(mode),
|
||||||
Sender: rel.Publisher.APIFormat(),
|
Sender: rel.Publisher.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "PrepareWebhooks: %v", err)
|
log.Error("PrepareWebhooks: %v", err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(rel.Repo.ID)
|
go HookQueue.Add(rel.Repo.ID)
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,13 +74,13 @@ func LoadRepoConfig() {
|
||||||
for i, t := range types {
|
for i, t := range types {
|
||||||
files, err := options.Dir(t)
|
files, err := options.Dir(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to get %s files: %v", t, err)
|
log.Fatal("Failed to get %s files: %v", t, err)
|
||||||
}
|
}
|
||||||
customPath := path.Join(setting.CustomPath, "options", t)
|
customPath := path.Join(setting.CustomPath, "options", t)
|
||||||
if com.IsDir(customPath) {
|
if com.IsDir(customPath) {
|
||||||
customFiles, err := com.StatDir(customPath)
|
customFiles, err := com.StatDir(customPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to get custom %s files: %v", t, err)
|
log.Fatal("Failed to get custom %s files: %v", t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range customFiles {
|
for _, f := range customFiles {
|
||||||
|
@ -122,19 +122,19 @@ func NewRepoContext() {
|
||||||
|
|
||||||
// Check Git installation.
|
// Check Git installation.
|
||||||
if _, err := exec.LookPath("git"); err != nil {
|
if _, err := exec.LookPath("git"); err != nil {
|
||||||
log.Fatal(4, "Failed to test 'git' command: %v (forgotten install?)", err)
|
log.Fatal("Failed to test 'git' command: %v (forgotten install?)", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check Git version.
|
// Check Git version.
|
||||||
var err error
|
var err error
|
||||||
setting.Git.Version, err = git.BinVersion()
|
setting.Git.Version, err = git.BinVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to get Git version: %v", err)
|
log.Fatal("Failed to get Git version: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("Git Version: %s", setting.Git.Version)
|
log.Info("Git Version: %s", setting.Git.Version)
|
||||||
if version.Compare("1.7.1", setting.Git.Version, ">") {
|
if version.Compare("1.7.1", setting.Git.Version, ">") {
|
||||||
log.Fatal(4, "Gitea requires Git version greater or equal to 1.7.1")
|
log.Fatal("Gitea requires Git version greater or equal to 1.7.1")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Git requires setting user.name and user.email in order to commit changes.
|
// Git requires setting user.name and user.email in order to commit changes.
|
||||||
|
@ -143,11 +143,11 @@ func NewRepoContext() {
|
||||||
// ExitError indicates this config is not set
|
// ExitError indicates this config is not set
|
||||||
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
|
if _, ok := err.(*exec.ExitError); ok || strings.TrimSpace(stdout) == "" {
|
||||||
if _, stderr, gerr := process.GetManager().Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
|
if _, stderr, gerr := process.GetManager().Exec("NewRepoContext(set "+configKey+")", "git", "config", "--global", configKey, defaultValue); gerr != nil {
|
||||||
log.Fatal(4, "Failed to set git %s(%s): %s", configKey, gerr, stderr)
|
log.Fatal("Failed to set git %s(%s): %s", configKey, gerr, stderr)
|
||||||
}
|
}
|
||||||
log.Info("Git config %s set to %s", configKey, defaultValue)
|
log.Info("Git config %s set to %s", configKey, defaultValue)
|
||||||
} else {
|
} else {
|
||||||
log.Fatal(4, "Failed to get git %s(%s): %s", configKey, err, stderr)
|
log.Fatal("Failed to get git %s(%s): %s", configKey, err, stderr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -155,7 +155,7 @@ func NewRepoContext() {
|
||||||
// Set git some configurations.
|
// Set git some configurations.
|
||||||
if _, stderr, err := process.GetManager().Exec("NewRepoContext(git config --global core.quotepath false)",
|
if _, stderr, err := process.GetManager().Exec("NewRepoContext(git config --global core.quotepath false)",
|
||||||
"git", "config", "--global", "core.quotepath", "false"); err != nil {
|
"git", "config", "--global", "core.quotepath", "false"); err != nil {
|
||||||
log.Fatal(4, "Failed to execute 'git config --global core.quotepath false': %s", stderr)
|
log.Fatal("Failed to execute 'git config --global core.quotepath false': %s", stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
|
RemoveAllWithNotice("Clean up repository temporary data", filepath.Join(setting.AppDataPath, "tmp"))
|
||||||
|
@ -281,7 +281,7 @@ func (repo *Repository) innerAPIFormat(e Engine, mode AccessMode, isParent bool)
|
||||||
if !isParent {
|
if !isParent {
|
||||||
err := repo.getBaseRepo(e)
|
err := repo.getBaseRepo(e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "APIFormat: %v", err)
|
log.Error("APIFormat: %v", err)
|
||||||
}
|
}
|
||||||
if repo.BaseRepo != nil {
|
if repo.BaseRepo != nil {
|
||||||
parent = repo.BaseRepo.innerAPIFormat(e, mode, true)
|
parent = repo.BaseRepo.innerAPIFormat(e, mode, true)
|
||||||
|
@ -462,7 +462,7 @@ func (repo *Repository) GetOwnerName() error {
|
||||||
|
|
||||||
func (repo *Repository) mustOwnerName(e Engine) string {
|
func (repo *Repository) mustOwnerName(e Engine) string {
|
||||||
if err := repo.getOwnerName(e); err != nil {
|
if err := repo.getOwnerName(e); err != nil {
|
||||||
log.Error(4, "Error loading repository owner name: %v", err)
|
log.Error("Error loading repository owner name: %v", err)
|
||||||
return "error"
|
return "error"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -724,7 +724,7 @@ var (
|
||||||
func (repo *Repository) DescriptionHTML() template.HTML {
|
func (repo *Repository) DescriptionHTML() template.HTML {
|
||||||
desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas())
|
desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Failed to render description for %s (ID: %d): %v", repo.Name, repo.ID, err)
|
log.Error("Failed to render description for %s (ID: %d): %v", repo.Name, repo.ID, err)
|
||||||
return template.HTML(markup.Sanitize(repo.Description))
|
return template.HTML(markup.Sanitize(repo.Description))
|
||||||
}
|
}
|
||||||
return template.HTML(markup.Sanitize(string(desc)))
|
return template.HTML(markup.Sanitize(string(desc)))
|
||||||
|
@ -981,12 +981,12 @@ func MigrateRepository(doer, u *User, opts MigrateRepoOptions) (*Repository, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = SyncReleasesWithTags(repo, gitRepo); err != nil {
|
if err = SyncReleasesWithTags(repo, gitRepo); err != nil {
|
||||||
log.Error(4, "Failed to synchronize tags to releases for repository: %v", err)
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = repo.UpdateSize(); err != nil {
|
if err = repo.UpdateSize(); err != nil {
|
||||||
log.Error(4, "Failed to update size for repository: %v", err)
|
log.Error("Failed to update size for repository: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.IsMirror {
|
if opts.IsMirror {
|
||||||
|
@ -1405,7 +1405,7 @@ func CreateRepository(doer, u *User, opts CreateRepoOptions) (_ *Repository, err
|
||||||
repoPath := RepoPath(u.Name, repo.Name)
|
repoPath := RepoPath(u.Name, repo.Name)
|
||||||
if err = initRepository(sess, repoPath, u, repo, opts); err != nil {
|
if err = initRepository(sess, repoPath, u, repo, opts); err != nil {
|
||||||
if err2 := os.RemoveAll(repoPath); err2 != nil {
|
if err2 := os.RemoveAll(repoPath); err2 != nil {
|
||||||
log.Error(4, "initRepository: %v", err)
|
log.Error("initRepository: %v", err)
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2)
|
"delete repo directory %s/%s failed(2): %v", u.Name, repo.Name, err2)
|
||||||
}
|
}
|
||||||
|
@ -1435,7 +1435,7 @@ func countRepositories(userID int64, private bool) int64 {
|
||||||
|
|
||||||
count, err := sess.Count(new(Repository))
|
count, err := sess.Count(new(Repository))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "countRepositories: %v", err)
|
log.Error("countRepositories: %v", err)
|
||||||
}
|
}
|
||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
@ -1690,11 +1690,11 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
|
||||||
daemonExportFile := path.Join(repo.repoPath(e), `git-daemon-export-ok`)
|
daemonExportFile := path.Join(repo.repoPath(e), `git-daemon-export-ok`)
|
||||||
if repo.IsPrivate && com.IsExist(daemonExportFile) {
|
if repo.IsPrivate && com.IsExist(daemonExportFile) {
|
||||||
if err = os.Remove(daemonExportFile); err != nil {
|
if err = os.Remove(daemonExportFile); err != nil {
|
||||||
log.Error(4, "Failed to remove %s: %v", daemonExportFile, err)
|
log.Error("Failed to remove %s: %v", daemonExportFile, err)
|
||||||
}
|
}
|
||||||
} else if !repo.IsPrivate && !com.IsExist(daemonExportFile) {
|
} else if !repo.IsPrivate && !com.IsExist(daemonExportFile) {
|
||||||
if f, err := os.Create(daemonExportFile); err != nil {
|
if f, err := os.Create(daemonExportFile); err != nil {
|
||||||
log.Error(4, "Failed to create %s: %v", daemonExportFile, err)
|
log.Error("Failed to create %s: %v", daemonExportFile, err)
|
||||||
} else {
|
} else {
|
||||||
f.Close()
|
f.Close()
|
||||||
}
|
}
|
||||||
|
@ -1712,7 +1712,7 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = repo.updateSize(e); err != nil {
|
if err = repo.updateSize(e); err != nil {
|
||||||
log.Error(4, "Failed to update size for repository: %v", err)
|
log.Error("Failed to update size for repository: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1928,7 +1928,7 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
|
||||||
|
|
||||||
if repo.NumForks > 0 {
|
if repo.NumForks > 0 {
|
||||||
if _, err = sess.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
|
if _, err = sess.Exec("UPDATE `repository` SET fork_id=0,is_fork=? WHERE fork_id=?", false, repo.ID); err != nil {
|
||||||
log.Error(4, "reset 'fork_id' and 'is_fork': %v", err)
|
log.Error("reset 'fork_id' and 'is_fork': %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2090,7 +2090,7 @@ func DeleteOldRepositoryArchives() {
|
||||||
log.Trace("Doing: ArchiveCleanup")
|
log.Trace("Doing: ArchiveCleanup")
|
||||||
|
|
||||||
if err := x.Where("id > 0").Iterate(new(Repository), deleteOldRepositoryArchives); err != nil {
|
if err := x.Where("id > 0").Iterate(new(Repository), deleteOldRepositoryArchives); err != nil {
|
||||||
log.Error(4, "ArchiveClean: %v", err)
|
log.Error("ArchiveClean: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2243,12 +2243,12 @@ func GitFsck() {
|
||||||
desc := fmt.Sprintf("Failed to health check repository (%s): %v", repoPath, err)
|
desc := fmt.Sprintf("Failed to health check repository (%s): %v", repoPath, err)
|
||||||
log.Warn(desc)
|
log.Warn(desc)
|
||||||
if err = CreateRepositoryNotice(desc); err != nil {
|
if err = CreateRepositoryNotice(desc); err != nil {
|
||||||
log.Error(4, "CreateRepositoryNotice: %v", err)
|
log.Error("CreateRepositoryNotice: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "GitFsck: %v", err)
|
log.Error("GitFsck: %v", err)
|
||||||
}
|
}
|
||||||
log.Trace("Finished: GitFsck")
|
log.Trace("Finished: GitFsck")
|
||||||
}
|
}
|
||||||
|
@ -2283,7 +2283,7 @@ type repoChecker struct {
|
||||||
func repoStatsCheck(checker *repoChecker) {
|
func repoStatsCheck(checker *repoChecker) {
|
||||||
results, err := x.Query(checker.querySQL)
|
results, err := x.Query(checker.querySQL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Select %s: %v", checker.desc, err)
|
log.Error("Select %s: %v", checker.desc, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
|
@ -2291,7 +2291,7 @@ func repoStatsCheck(checker *repoChecker) {
|
||||||
log.Trace("Updating %s: %d", checker.desc, id)
|
log.Trace("Updating %s: %d", checker.desc, id)
|
||||||
_, err = x.Exec(checker.correctSQL, id, id)
|
_, err = x.Exec(checker.correctSQL, id, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Update %s[%d]: %v", checker.desc, id, err)
|
log.Error("Update %s[%d]: %v", checker.desc, id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2345,14 +2345,14 @@ func CheckRepoStats() {
|
||||||
desc := "repository count 'num_closed_issues'"
|
desc := "repository count 'num_closed_issues'"
|
||||||
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false)
|
results, err := x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_closed_issues!=(SELECT COUNT(*) FROM `issue` WHERE repo_id=repo.id AND is_closed=? AND is_pull=?)", true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Select %s: %v", desc, err)
|
log.Error("Select %s: %v", desc, err)
|
||||||
} else {
|
} else {
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
id := com.StrTo(result["id"]).MustInt64()
|
id := com.StrTo(result["id"]).MustInt64()
|
||||||
log.Trace("Updating %s: %d", desc, id)
|
log.Trace("Updating %s: %d", desc, id)
|
||||||
_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id)
|
_, err = x.Exec("UPDATE `repository` SET num_closed_issues=(SELECT COUNT(*) FROM `issue` WHERE repo_id=? AND is_closed=? AND is_pull=?) WHERE id=?", id, true, false, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Update %s[%d]: %v", desc, id, err)
|
log.Error("Update %s[%d]: %v", desc, id, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2362,7 +2362,7 @@ func CheckRepoStats() {
|
||||||
// ***** START: Repository.NumForks *****
|
// ***** START: Repository.NumForks *****
|
||||||
results, err = x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
|
results, err = x.Query("SELECT repo.id FROM `repository` repo WHERE repo.num_forks!=(SELECT COUNT(*) FROM `repository` WHERE fork_id=repo.id)")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Select repository count 'num_forks': %v", err)
|
log.Error("Select repository count 'num_forks': %v", err)
|
||||||
} else {
|
} else {
|
||||||
for _, result := range results {
|
for _, result := range results {
|
||||||
id := com.StrTo(result["id"]).MustInt64()
|
id := com.StrTo(result["id"]).MustInt64()
|
||||||
|
@ -2370,19 +2370,19 @@ func CheckRepoStats() {
|
||||||
|
|
||||||
repo, err := GetRepositoryByID(id)
|
repo, err := GetRepositoryByID(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetRepositoryByID[%d]: %v", id, err)
|
log.Error("GetRepositoryByID[%d]: %v", id, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
|
rawResult, err := x.Query("SELECT COUNT(*) FROM `repository` WHERE fork_id=?", repo.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Select count of forks[%d]: %v", repo.ID, err)
|
log.Error("Select count of forks[%d]: %v", repo.ID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
repo.NumForks = int(parseCountResult(rawResult))
|
repo.NumForks = int(parseCountResult(rawResult))
|
||||||
|
|
||||||
if err = UpdateRepository(repo, false); err != nil {
|
if err = UpdateRepository(repo, false); err != nil {
|
||||||
log.Error(4, "UpdateRepository[%d]: %v", id, err)
|
log.Error("UpdateRepository[%d]: %v", id, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2485,13 +2485,13 @@ func ForkRepository(doer, u *User, oldRepo *Repository, name, desc string) (_ *R
|
||||||
Repo: repo.APIFormat(mode),
|
Repo: repo.APIFormat(mode),
|
||||||
Sender: doer.APIFormat(),
|
Sender: doer.APIFormat(),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "PrepareWebhooks [repo_id: %d]: %v", oldRepo.ID, err)
|
log.Error("PrepareWebhooks [repo_id: %d]: %v", oldRepo.ID, err)
|
||||||
} else {
|
} else {
|
||||||
go HookQueue.Add(oldRepo.ID)
|
go HookQueue.Add(oldRepo.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = repo.UpdateSize(); err != nil {
|
if err = repo.UpdateSize(); err != nil {
|
||||||
log.Error(4, "Failed to update size for repository: %v", err)
|
log.Error("Failed to update size for repository: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Copy LFS meta objects in new session
|
// Copy LFS meta objects in new session
|
||||||
|
|
|
@ -113,7 +113,7 @@ func populateRepoIndexer(maxRepoID int64) {
|
||||||
Limit(RepositoryListDefaultPageSize).
|
Limit(RepositoryListDefaultPageSize).
|
||||||
Find(&repos)
|
Find(&repos)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "populateRepoIndexer: %v", err)
|
log.Error("populateRepoIndexer: %v", err)
|
||||||
return
|
return
|
||||||
} else if len(repos) == 0 {
|
} else if len(repos) == 0 {
|
||||||
break
|
break
|
||||||
|
@ -314,11 +314,11 @@ func processRepoIndexerOperationQueue() {
|
||||||
op := <-repoIndexerOperationQueue
|
op := <-repoIndexerOperationQueue
|
||||||
if op.deleted {
|
if op.deleted {
|
||||||
if err := indexer.DeleteRepoFromIndexer(op.repo.ID); err != nil {
|
if err := indexer.DeleteRepoFromIndexer(op.repo.ID); err != nil {
|
||||||
log.Error(4, "DeleteRepoFromIndexer: %v", err)
|
log.Error("DeleteRepoFromIndexer: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := updateRepoIndexer(op.repo); err != nil {
|
if err := updateRepoIndexer(op.repo); err != nil {
|
||||||
log.Error(4, "updateRepoIndexer: %v", err)
|
log.Error("updateRepoIndexer: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ func (m *Mirror) AfterLoad(session *xorm.Session) {
|
||||||
var err error
|
var err error
|
||||||
m.Repo, err = getRepositoryByID(session, m.RepoID)
|
m.Repo, err = getRepositoryByID(session, m.RepoID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "getRepositoryByID[%d]: %v", m.ID, err)
|
log.Error("getRepositoryByID[%d]: %v", m.ID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ func (m *Mirror) readAddress() {
|
||||||
var err error
|
var err error
|
||||||
m.address, err = remoteAddress(m.Repo.RepoPath())
|
m.address, err = remoteAddress(m.Repo.RepoPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "remoteAddress: %v", err)
|
log.Error("remoteAddress: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,12 +164,12 @@ func parseRemoteUpdateOutput(output string) []*mirrorSyncResult {
|
||||||
case strings.HasPrefix(lines[i], " "): // New commits of a reference
|
case strings.HasPrefix(lines[i], " "): // New commits of a reference
|
||||||
delimIdx := strings.Index(lines[i][3:], " ")
|
delimIdx := strings.Index(lines[i][3:], " ")
|
||||||
if delimIdx == -1 {
|
if delimIdx == -1 {
|
||||||
log.Error(2, "SHA delimiter not found: %q", lines[i])
|
log.Error("SHA delimiter not found: %q", lines[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
shas := strings.Split(lines[i][3:delimIdx+3], "..")
|
shas := strings.Split(lines[i][3:delimIdx+3], "..")
|
||||||
if len(shas) != 2 {
|
if len(shas) != 2 {
|
||||||
log.Error(2, "Expect two SHAs but not what found: %q", lines[i])
|
log.Error("Expect two SHAs but not what found: %q", lines[i])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
results = append(results, &mirrorSyncResult{
|
results = append(results, &mirrorSyncResult{
|
||||||
|
@ -204,13 +204,13 @@ func (m *Mirror) runSync() ([]*mirrorSyncResult, bool) {
|
||||||
// contain a password
|
// contain a password
|
||||||
message, err := sanitizeOutput(stderr, repoPath)
|
message, err := sanitizeOutput(stderr, repoPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "sanitizeOutput: %v", err)
|
log.Error("sanitizeOutput: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
desc := fmt.Sprintf("Failed to update mirror repository '%s': %s", repoPath, message)
|
desc := fmt.Sprintf("Failed to update mirror repository '%s': %s", repoPath, message)
|
||||||
log.Error(4, desc)
|
log.Error(desc)
|
||||||
if err = CreateRepositoryNotice(desc); err != nil {
|
if err = CreateRepositoryNotice(desc); err != nil {
|
||||||
log.Error(4, "CreateRepositoryNotice: %v", err)
|
log.Error("CreateRepositoryNotice: %v", err)
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
@ -218,15 +218,15 @@ func (m *Mirror) runSync() ([]*mirrorSyncResult, bool) {
|
||||||
|
|
||||||
gitRepo, err := git.OpenRepository(repoPath)
|
gitRepo, err := git.OpenRepository(repoPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "OpenRepository: %v", err)
|
log.Error("OpenRepository: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
if err = SyncReleasesWithTags(m.Repo, gitRepo); err != nil {
|
if err = SyncReleasesWithTags(m.Repo, gitRepo); err != nil {
|
||||||
log.Error(4, "Failed to synchronize tags to releases for repository: %v", err)
|
log.Error("Failed to synchronize tags to releases for repository: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := m.Repo.UpdateSize(); err != nil {
|
if err := m.Repo.UpdateSize(); err != nil {
|
||||||
log.Error(4, "Failed to update size for mirror repository: %v", err)
|
log.Error("Failed to update size for mirror repository: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.Repo.HasWiki() {
|
if m.Repo.HasWiki() {
|
||||||
|
@ -237,13 +237,13 @@ func (m *Mirror) runSync() ([]*mirrorSyncResult, bool) {
|
||||||
// contain a password
|
// contain a password
|
||||||
message, err := sanitizeOutput(stderr, wikiPath)
|
message, err := sanitizeOutput(stderr, wikiPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "sanitizeOutput: %v", err)
|
log.Error("sanitizeOutput: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
desc := fmt.Sprintf("Failed to update mirror wiki repository '%s': %s", wikiPath, message)
|
desc := fmt.Sprintf("Failed to update mirror wiki repository '%s': %s", wikiPath, message)
|
||||||
log.Error(4, desc)
|
log.Error(desc)
|
||||||
if err = CreateRepositoryNotice(desc); err != nil {
|
if err = CreateRepositoryNotice(desc); err != nil {
|
||||||
log.Error(4, "CreateRepositoryNotice: %v", err)
|
log.Error("CreateRepositoryNotice: %v", err)
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
@ -251,7 +251,7 @@ func (m *Mirror) runSync() ([]*mirrorSyncResult, bool) {
|
||||||
|
|
||||||
branches, err := m.Repo.GetBranches()
|
branches, err := m.Repo.GetBranches()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetBranches: %v", err)
|
log.Error("GetBranches: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,14 +310,14 @@ func MirrorUpdate() {
|
||||||
Iterate(new(Mirror), func(idx int, bean interface{}) error {
|
Iterate(new(Mirror), func(idx int, bean interface{}) error {
|
||||||
m := bean.(*Mirror)
|
m := bean.(*Mirror)
|
||||||
if m.Repo == nil {
|
if m.Repo == nil {
|
||||||
log.Error(4, "Disconnected mirror repository found: %d", m.ID)
|
log.Error("Disconnected mirror repository found: %d", m.ID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
MirrorQueue.Add(m.RepoID)
|
MirrorQueue.Add(m.RepoID)
|
||||||
return nil
|
return nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(4, "MirrorUpdate: %v", err)
|
log.Error("MirrorUpdate: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -333,7 +333,7 @@ func SyncMirrors() {
|
||||||
|
|
||||||
m, err := GetMirrorByRepoID(com.StrTo(repoID).MustInt64())
|
m, err := GetMirrorByRepoID(com.StrTo(repoID).MustInt64())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetMirrorByRepoID [%s]: %v", repoID, err)
|
log.Error("GetMirrorByRepoID [%s]: %v", repoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,7 +344,7 @@ func SyncMirrors() {
|
||||||
|
|
||||||
m.ScheduleNextUpdate()
|
m.ScheduleNextUpdate()
|
||||||
if err = updateMirror(sess, m); err != nil {
|
if err = updateMirror(sess, m); err != nil {
|
||||||
log.Error(4, "UpdateMirror [%s]: %v", repoID, err)
|
log.Error("UpdateMirror [%s]: %v", repoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,7 +354,7 @@ func SyncMirrors() {
|
||||||
} else {
|
} else {
|
||||||
gitRepo, err = git.OpenRepository(m.Repo.RepoPath())
|
gitRepo, err = git.OpenRepository(m.Repo.RepoPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "OpenRepository [%d]: %v", m.RepoID, err)
|
log.Error("OpenRepository [%d]: %v", m.RepoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -368,7 +368,7 @@ func SyncMirrors() {
|
||||||
// Create reference
|
// Create reference
|
||||||
if result.oldCommitID == gitShortEmptySha {
|
if result.oldCommitID == gitShortEmptySha {
|
||||||
if err = MirrorSyncCreateAction(m.Repo, result.refName); err != nil {
|
if err = MirrorSyncCreateAction(m.Repo, result.refName); err != nil {
|
||||||
log.Error(2, "MirrorSyncCreateAction [repo_id: %d]: %v", m.RepoID, err)
|
log.Error("MirrorSyncCreateAction [repo_id: %d]: %v", m.RepoID, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -376,7 +376,7 @@ func SyncMirrors() {
|
||||||
// Delete reference
|
// Delete reference
|
||||||
if result.newCommitID == gitShortEmptySha {
|
if result.newCommitID == gitShortEmptySha {
|
||||||
if err = MirrorSyncDeleteAction(m.Repo, result.refName); err != nil {
|
if err = MirrorSyncDeleteAction(m.Repo, result.refName); err != nil {
|
||||||
log.Error(2, "MirrorSyncDeleteAction [repo_id: %d]: %v", m.RepoID, err)
|
log.Error("MirrorSyncDeleteAction [repo_id: %d]: %v", m.RepoID, err)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -384,17 +384,17 @@ func SyncMirrors() {
|
||||||
// Push commits
|
// Push commits
|
||||||
oldCommitID, err := git.GetFullCommitID(gitRepo.Path, result.oldCommitID)
|
oldCommitID, err := git.GetFullCommitID(gitRepo.Path, result.oldCommitID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "GetFullCommitID [%d]: %v", m.RepoID, err)
|
log.Error("GetFullCommitID [%d]: %v", m.RepoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
newCommitID, err := git.GetFullCommitID(gitRepo.Path, result.newCommitID)
|
newCommitID, err := git.GetFullCommitID(gitRepo.Path, result.newCommitID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "GetFullCommitID [%d]: %v", m.RepoID, err)
|
log.Error("GetFullCommitID [%d]: %v", m.RepoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
commits, err := gitRepo.CommitsBetweenIDs(newCommitID, oldCommitID)
|
commits, err := gitRepo.CommitsBetweenIDs(newCommitID, oldCommitID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "CommitsBetweenIDs [repo_id: %d, new_commit_id: %s, old_commit_id: %s]: %v", m.RepoID, newCommitID, oldCommitID, err)
|
log.Error("CommitsBetweenIDs [repo_id: %d, new_commit_id: %s, old_commit_id: %s]: %v", m.RepoID, newCommitID, oldCommitID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = MirrorSyncPushAction(m.Repo, MirrorSyncPushActionOptions{
|
if err = MirrorSyncPushAction(m.Repo, MirrorSyncPushActionOptions{
|
||||||
|
@ -403,7 +403,7 @@ func SyncMirrors() {
|
||||||
NewCommitID: newCommitID,
|
NewCommitID: newCommitID,
|
||||||
Commits: ListToPushCommits(commits),
|
Commits: ListToPushCommits(commits),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error(2, "MirrorSyncPushAction [repo_id: %d]: %v", m.RepoID, err)
|
log.Error("MirrorSyncPushAction [repo_id: %d]: %v", m.RepoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -411,12 +411,12 @@ func SyncMirrors() {
|
||||||
// Get latest commit date and update to current repository updated time
|
// Get latest commit date and update to current repository updated time
|
||||||
commitDate, err := git.GetLatestCommitTime(m.Repo.RepoPath())
|
commitDate, err := git.GetLatestCommitTime(m.Repo.RepoPath())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "GetLatestCommitDate [%s]: %v", m.RepoID, err)
|
log.Error("GetLatestCommitDate [%d]: %v", m.RepoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.Exec("UPDATE repository SET updated_unix = ? WHERE id = ?", commitDate.Unix(), m.RepoID); err != nil {
|
if _, err = sess.Exec("UPDATE repository SET updated_unix = ? WHERE id = ?", commitDate.Unix(), m.RepoID); err != nil {
|
||||||
log.Error(2, "Update repository 'updated_unix' [%s]: %v", m.RepoID, err)
|
log.Error("Update repository 'updated_unix' [%d]: %v", m.RepoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -330,7 +330,7 @@ func appendAuthorizedKeysToFile(keys ...*PublicKey) error {
|
||||||
|
|
||||||
// .ssh directory should have mode 700, and authorized_keys file should have mode 600.
|
// .ssh directory should have mode 700, and authorized_keys file should have mode 600.
|
||||||
if fi.Mode().Perm() > 0600 {
|
if fi.Mode().Perm() > 0600 {
|
||||||
log.Error(4, "authorized_keys file has unusual permission flags: %s - setting to -rw-------", fi.Mode().Perm().String())
|
log.Error("authorized_keys file has unusual permission flags: %s - setting to -rw-------", fi.Mode().Perm().String())
|
||||||
if err = f.Chmod(0600); err != nil {
|
if err = f.Chmod(0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -281,7 +281,7 @@ func ParseCommitsWithStatus(oldCommits *list.List, repo *Repository) *list.List
|
||||||
}
|
}
|
||||||
statuses, err := GetLatestCommitStatus(repo, commit.ID.String(), 0)
|
statuses, err := GetLatestCommitStatus(repo, commit.ID.String(), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "GetLatestCommitStatus: %v", err)
|
log.Error("GetLatestCommitStatus: %v", err)
|
||||||
} else {
|
} else {
|
||||||
commit.Status = CalcCommitStatus(statuses)
|
commit.Status = CalcCommitStatus(statuses)
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,7 @@ func (list U2FRegistrationList) ToRegistrations() []u2f.Registration {
|
||||||
for _, reg := range list {
|
for _, reg := range list {
|
||||||
r, err := reg.Parse()
|
r, err := reg.Parse()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "parsing u2f registration: %v", err)
|
log.Fatal("parsing u2f registration: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
regs = append(regs, *r)
|
regs = append(regs, *r)
|
||||||
|
|
|
@ -215,7 +215,7 @@ func pushUpdate(opts PushUpdateOptions) (repo *Repository, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = repo.UpdateSize(); err != nil {
|
if err = repo.UpdateSize(); err != nil {
|
||||||
log.Error(4, "Failed to update size for repository: %v", err)
|
log.Error("Failed to update size for repository: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var commits = &PushCommits{}
|
var commits = &PushCommits{}
|
||||||
|
|
|
@ -367,7 +367,7 @@ func (u *User) SizedRelAvatarLink(size int) string {
|
||||||
case setting.DisableGravatar, setting.OfflineMode:
|
case setting.DisableGravatar, setting.OfflineMode:
|
||||||
if !com.IsFile(u.CustomAvatarPath()) {
|
if !com.IsFile(u.CustomAvatarPath()) {
|
||||||
if err := u.GenerateRandomAvatar(); err != nil {
|
if err := u.GenerateRandomAvatar(); err != nil {
|
||||||
log.Error(3, "GenerateRandomAvatar: %v", err)
|
log.Error("GenerateRandomAvatar: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,7 +523,7 @@ func (u *User) IsOrganization() bool {
|
||||||
func (u *User) IsUserOrgOwner(orgID int64) bool {
|
func (u *User) IsUserOrgOwner(orgID int64) bool {
|
||||||
isOwner, err := IsOrganizationOwner(orgID, u.ID)
|
isOwner, err := IsOrganizationOwner(orgID, u.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "IsOrganizationOwner: %v", err)
|
log.Error("IsOrganizationOwner: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return isOwner
|
return isOwner
|
||||||
|
@ -533,7 +533,7 @@ func (u *User) IsUserOrgOwner(orgID int64) bool {
|
||||||
func (u *User) IsUserPartOfOrg(userID int64) bool {
|
func (u *User) IsUserPartOfOrg(userID int64) bool {
|
||||||
isMember, err := IsOrganizationMember(u.ID, userID)
|
isMember, err := IsOrganizationMember(u.ID, userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "IsOrganizationMember: %v", err)
|
log.Error("IsOrganizationMember: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return isMember
|
return isMember
|
||||||
|
@ -543,7 +543,7 @@ func (u *User) IsUserPartOfOrg(userID int64) bool {
|
||||||
func (u *User) IsPublicMember(orgID int64) bool {
|
func (u *User) IsPublicMember(orgID int64) bool {
|
||||||
isMember, err := IsPublicMembership(orgID, u.ID)
|
isMember, err := IsPublicMembership(orgID, u.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "IsPublicMembership: %v", err)
|
log.Error("IsPublicMembership: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return isMember
|
return isMember
|
||||||
|
@ -864,7 +864,7 @@ func getVerifyUser(code string) (user *User) {
|
||||||
if user, err = GetUserByName(string(b)); user != nil {
|
if user, err = GetUserByName(string(b)); user != nil {
|
||||||
return user
|
return user
|
||||||
}
|
}
|
||||||
log.Error(4, "user.getVerifyUser: %v", err)
|
log.Error("user.getVerifyUser: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -1490,11 +1490,11 @@ func deleteKeysMarkedForDeletion(keys []string) (bool, error) {
|
||||||
for _, KeyToDelete := range keys {
|
for _, KeyToDelete := range keys {
|
||||||
key, err := searchPublicKeyByContentWithEngine(sess, KeyToDelete)
|
key, err := searchPublicKeyByContentWithEngine(sess, KeyToDelete)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "SearchPublicKeyByContent: %v", err)
|
log.Error("SearchPublicKeyByContent: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = deletePublicKeys(sess, key.ID); err != nil {
|
if err = deletePublicKeys(sess, key.ID); err != nil {
|
||||||
log.Error(4, "deletePublicKeys: %v", err)
|
log.Error("deletePublicKeys: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
sshKeysNeedUpdate = true
|
sshKeysNeedUpdate = true
|
||||||
|
@ -1515,7 +1515,7 @@ func addLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) boo
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40])
|
sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40])
|
||||||
if _, err := AddPublicKey(usr.ID, sshKeyName, sshKey, s.ID); err != nil {
|
if _, err := AddPublicKey(usr.ID, sshKeyName, sshKey, s.ID); err != nil {
|
||||||
log.Error(4, "addLdapSSHPublicKeys[%s]: Error adding LDAP Public SSH Key for user %s: %v", s.Name, usr.Name, err)
|
log.Error("addLdapSSHPublicKeys[%s]: Error adding LDAP Public SSH Key for user %s: %v", s.Name, usr.Name, err)
|
||||||
} else {
|
} else {
|
||||||
log.Trace("addLdapSSHPublicKeys[%s]: Added LDAP Public SSH Key for user %s", s.Name, usr.Name)
|
log.Trace("addLdapSSHPublicKeys[%s]: Added LDAP Public SSH Key for user %s", s.Name, usr.Name)
|
||||||
sshKeysNeedUpdate = true
|
sshKeysNeedUpdate = true
|
||||||
|
@ -1537,7 +1537,7 @@ func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []str
|
||||||
var giteaKeys []string
|
var giteaKeys []string
|
||||||
keys, err := ListPublicLdapSSHKeys(usr.ID, s.ID)
|
keys, err := ListPublicLdapSSHKeys(usr.ID, s.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "synchronizeLdapSSHPublicKeys[%s]: Error listing LDAP Public SSH Keys for user %s: %v", s.Name, usr.Name, err)
|
log.Error("synchronizeLdapSSHPublicKeys[%s]: Error listing LDAP Public SSH Keys for user %s: %v", s.Name, usr.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, v := range keys {
|
for _, v := range keys {
|
||||||
|
@ -1586,7 +1586,7 @@ func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []str
|
||||||
// Delete LDAP keys from DB that doesn't exist in LDAP
|
// Delete LDAP keys from DB that doesn't exist in LDAP
|
||||||
needUpd, err := deleteKeysMarkedForDeletion(giteaKeysToDelete)
|
needUpd, err := deleteKeysMarkedForDeletion(giteaKeysToDelete)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "synchronizeLdapSSHPublicKeys[%s]: Error deleting LDAP Public SSH Keys marked for deletion for user %s: %v", s.Name, usr.Name, err)
|
log.Error("synchronizeLdapSSHPublicKeys[%s]: Error deleting LDAP Public SSH Keys marked for deletion for user %s: %v", s.Name, usr.Name, err)
|
||||||
}
|
}
|
||||||
if needUpd {
|
if needUpd {
|
||||||
sshKeysNeedUpdate = true
|
sshKeysNeedUpdate = true
|
||||||
|
@ -1606,7 +1606,7 @@ func SyncExternalUsers() {
|
||||||
|
|
||||||
ls, err := LoginSources()
|
ls, err := LoginSources()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "SyncExternalUsers: %v", err)
|
log.Error("SyncExternalUsers: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1669,7 +1669,7 @@ func SyncExternalUsers() {
|
||||||
err = CreateUser(usr)
|
err = CreateUser(usr)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "SyncExternalUsers[%s]: Error creating user %s: %v", s.Name, su.Username, err)
|
log.Error("SyncExternalUsers[%s]: Error creating user %s: %v", s.Name, su.Username, err)
|
||||||
} else if isAttributeSSHPublicKeySet {
|
} else if isAttributeSSHPublicKeySet {
|
||||||
log.Trace("SyncExternalUsers[%s]: Adding LDAP Public SSH Keys for user %s", s.Name, usr.Name)
|
log.Trace("SyncExternalUsers[%s]: Adding LDAP Public SSH Keys for user %s", s.Name, usr.Name)
|
||||||
if addLdapSSHPublicKeys(usr, s, su.SSHPublicKey) {
|
if addLdapSSHPublicKeys(usr, s, su.SSHPublicKey) {
|
||||||
|
@ -1702,7 +1702,7 @@ func SyncExternalUsers() {
|
||||||
|
|
||||||
err = UpdateUserCols(usr, "full_name", "email", "is_admin", "is_active")
|
err = UpdateUserCols(usr, "full_name", "email", "is_admin", "is_active")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "SyncExternalUsers[%s]: Error updating user %s: %v", s.Name, usr.Name, err)
|
log.Error("SyncExternalUsers[%s]: Error updating user %s: %v", s.Name, usr.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1729,7 +1729,7 @@ func SyncExternalUsers() {
|
||||||
usr.IsActive = false
|
usr.IsActive = false
|
||||||
err = UpdateUserCols(&usr, "is_active")
|
err = UpdateUserCols(&usr, "is_active")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "SyncExternalUsers[%s]: Error deactivating user %s: %v", s.Name, usr.Name, err)
|
log.Error("SyncExternalUsers[%s]: Error deactivating user %s: %v", s.Name, usr.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -123,7 +123,7 @@ type Webhook struct {
|
||||||
func (w *Webhook) AfterLoad() {
|
func (w *Webhook) AfterLoad() {
|
||||||
w.HookEvent = &HookEvent{}
|
w.HookEvent = &HookEvent{}
|
||||||
if err := json.Unmarshal([]byte(w.Events), w.HookEvent); err != nil {
|
if err := json.Unmarshal([]byte(w.Events), w.HookEvent); err != nil {
|
||||||
log.Error(3, "Unmarshal[%d]: %v", w.ID, err)
|
log.Error("Unmarshal[%d]: %v", w.ID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ func (w *Webhook) AfterLoad() {
|
||||||
func (w *Webhook) GetSlackHook() *SlackMeta {
|
func (w *Webhook) GetSlackHook() *SlackMeta {
|
||||||
s := &SlackMeta{}
|
s := &SlackMeta{}
|
||||||
if err := json.Unmarshal([]byte(w.Meta), s); err != nil {
|
if err := json.Unmarshal([]byte(w.Meta), s); err != nil {
|
||||||
log.Error(4, "webhook.GetSlackHook(%d): %v", w.ID, err)
|
log.Error("webhook.GetSlackHook(%d): %v", w.ID, err)
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -140,7 +140,7 @@ func (w *Webhook) GetSlackHook() *SlackMeta {
|
||||||
func (w *Webhook) GetDiscordHook() *DiscordMeta {
|
func (w *Webhook) GetDiscordHook() *DiscordMeta {
|
||||||
s := &DiscordMeta{}
|
s := &DiscordMeta{}
|
||||||
if err := json.Unmarshal([]byte(w.Meta), s); err != nil {
|
if err := json.Unmarshal([]byte(w.Meta), s); err != nil {
|
||||||
log.Error(4, "webhook.GetDiscordHook(%d): %v", w.ID, err)
|
log.Error("webhook.GetDiscordHook(%d): %v", w.ID, err)
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -572,13 +572,13 @@ func (t *HookTask) AfterLoad() {
|
||||||
|
|
||||||
t.RequestInfo = &HookRequest{}
|
t.RequestInfo = &HookRequest{}
|
||||||
if err := json.Unmarshal([]byte(t.RequestContent), t.RequestInfo); err != nil {
|
if err := json.Unmarshal([]byte(t.RequestContent), t.RequestInfo); err != nil {
|
||||||
log.Error(3, "Unmarshal RequestContent[%d]: %v", t.ID, err)
|
log.Error("Unmarshal RequestContent[%d]: %v", t.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(t.ResponseContent) > 0 {
|
if len(t.ResponseContent) > 0 {
|
||||||
t.ResponseInfo = &HookResponse{}
|
t.ResponseInfo = &HookResponse{}
|
||||||
if err := json.Unmarshal([]byte(t.ResponseContent), t.ResponseInfo); err != nil {
|
if err := json.Unmarshal([]byte(t.ResponseContent), t.ResponseInfo); err != nil {
|
||||||
log.Error(3, "Unmarshal ResponseContent[%d]: %v", t.ID, err)
|
log.Error("Unmarshal ResponseContent[%d]: %v", t.ID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -586,7 +586,7 @@ func (t *HookTask) AfterLoad() {
|
||||||
func (t *HookTask) simpleMarshalJSON(v interface{}) string {
|
func (t *HookTask) simpleMarshalJSON(v interface{}) string {
|
||||||
p, err := json.Marshal(v)
|
p, err := json.Marshal(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "Marshal [%d]: %v", t.ID, err)
|
log.Error("Marshal [%d]: %v", t.ID, err)
|
||||||
}
|
}
|
||||||
return string(p)
|
return string(p)
|
||||||
}
|
}
|
||||||
|
@ -666,7 +666,7 @@ func prepareWebhook(e Engine, w *Webhook, repo *Repository, event HookEventType,
|
||||||
if len(w.Secret) > 0 {
|
if len(w.Secret) > 0 {
|
||||||
data, err := payloader.JSONPayload()
|
data, err := payloader.JSONPayload()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "prepareWebhooks.JSONPayload: %v", err)
|
log.Error("prepareWebhooks.JSONPayload: %v", err)
|
||||||
}
|
}
|
||||||
sig := hmac.New(sha256.New, []byte(w.Secret))
|
sig := hmac.New(sha256.New, []byte(w.Secret))
|
||||||
sig.Write(data)
|
sig.Write(data)
|
||||||
|
@ -765,13 +765,13 @@ func (t *HookTask) deliver() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := UpdateHookTask(t); err != nil {
|
if err := UpdateHookTask(t); err != nil {
|
||||||
log.Error(4, "UpdateHookTask [%d]: %v", t.ID, err)
|
log.Error("UpdateHookTask [%d]: %v", t.ID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update webhook last delivery status.
|
// Update webhook last delivery status.
|
||||||
w, err := GetWebhookByID(t.HookID)
|
w, err := GetWebhookByID(t.HookID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(5, "GetWebhookByID: %v", err)
|
log.Error("GetWebhookByID: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if t.IsSucceed {
|
if t.IsSucceed {
|
||||||
|
@ -780,7 +780,7 @@ func (t *HookTask) deliver() {
|
||||||
w.LastStatus = HookStatusFail
|
w.LastStatus = HookStatusFail
|
||||||
}
|
}
|
||||||
if err = UpdateWebhookLastStatus(w); err != nil {
|
if err = UpdateWebhookLastStatus(w); err != nil {
|
||||||
log.Error(5, "UpdateWebhookLastStatus: %v", err)
|
log.Error("UpdateWebhookLastStatus: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -813,7 +813,7 @@ func DeliverHooks() {
|
||||||
tasks := make([]*HookTask, 0, 10)
|
tasks := make([]*HookTask, 0, 10)
|
||||||
err := x.Where("is_delivered=?", false).Find(&tasks)
|
err := x.Where("is_delivered=?", false).Find(&tasks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "DeliverHooks: %v", err)
|
log.Error("DeliverHooks: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -829,13 +829,13 @@ func DeliverHooks() {
|
||||||
|
|
||||||
repoID, err := com.StrTo(repoIDStr).Int64()
|
repoID, err := com.StrTo(repoIDStr).Int64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Invalid repo ID: %s", repoIDStr)
|
log.Error("Invalid repo ID: %s", repoIDStr)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
tasks = make([]*HookTask, 0, 5)
|
tasks = make([]*HookTask, 0, 5)
|
||||||
if err := x.Where("repo_id=? AND is_delivered=?", repoID, false).Find(&tasks); err != nil {
|
if err := x.Where("repo_id=? AND is_delivered=?", repoID, false).Find(&tasks); err != nil {
|
||||||
log.Error(4, "Get repository [%s] hook tasks: %v", repoID, err)
|
log.Error("Get repository [%d] hook tasks: %v", repoID, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, t := range tasks {
|
for _, t := range tasks {
|
||||||
|
|
|
@ -63,13 +63,13 @@ func SignedInID(ctx *macaron.Context, sess session.Store) int64 {
|
||||||
t, err := models.GetAccessTokenBySHA(tokenSHA)
|
t, err := models.GetAccessTokenBySHA(tokenSHA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if models.IsErrAccessTokenNotExist(err) || models.IsErrAccessTokenEmpty(err) {
|
if models.IsErrAccessTokenNotExist(err) || models.IsErrAccessTokenEmpty(err) {
|
||||||
log.Error(4, "GetAccessTokenBySHA: %v", err)
|
log.Error("GetAccessTokenBySHA: %v", err)
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
t.UpdatedUnix = util.TimeStampNow()
|
t.UpdatedUnix = util.TimeStampNow()
|
||||||
if err = models.UpdateAccessToken(t); err != nil {
|
if err = models.UpdateAccessToken(t); err != nil {
|
||||||
log.Error(4, "UpdateAccessToken: %v", err)
|
log.Error("UpdateAccessToken: %v", err)
|
||||||
}
|
}
|
||||||
ctx.Data["IsApiToken"] = true
|
ctx.Data["IsApiToken"] = true
|
||||||
return t.UID
|
return t.UID
|
||||||
|
@ -92,7 +92,7 @@ func checkOAuthAccessToken(accessToken string) int64 {
|
||||||
}
|
}
|
||||||
token, err := models.ParseOAuth2Token(accessToken)
|
token, err := models.ParseOAuth2Token(accessToken)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Trace("ParseOAuth2Token", err)
|
log.Trace("ParseOAuth2Token: %v", err)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
var grant *models.OAuth2Grant
|
var grant *models.OAuth2Grant
|
||||||
|
@ -120,7 +120,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return user, false
|
return user, false
|
||||||
} else if !models.IsErrUserNotExist(err) {
|
} else if !models.IsErrUserNotExist(err) {
|
||||||
log.Error(4, "GetUserById: %v", err)
|
log.Error("GetUserById: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
u, err := models.GetUserByName(webAuthUser)
|
u, err := models.GetUserByName(webAuthUser)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !models.IsErrUserNotExist(err) {
|
if !models.IsErrUserNotExist(err) {
|
||||||
log.Error(4, "GetUserByName: %v", err)
|
log.Error("GetUserByName: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
}
|
}
|
||||||
if err = models.CreateUser(u); err != nil {
|
if err = models.CreateUser(u); err != nil {
|
||||||
// FIXME: should I create a system notice?
|
// FIXME: should I create a system notice?
|
||||||
log.Error(4, "CreateUser: %v", err)
|
log.Error("CreateUser: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
return u, false
|
return u, false
|
||||||
|
@ -183,13 +183,13 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
if isUsernameToken {
|
if isUsernameToken {
|
||||||
u, err = models.GetUserByID(token.UID)
|
u, err = models.GetUserByID(token.UID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetUserByID: %v", err)
|
log.Error("GetUserByID: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
u, err = models.GetUserByName(uname)
|
u, err = models.GetUserByName(uname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "GetUserByID: %v", err)
|
log.Error("GetUserByID: %v", err)
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
if u.ID != token.UID {
|
if u.ID != token.UID {
|
||||||
|
@ -198,11 +198,11 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
}
|
}
|
||||||
token.UpdatedUnix = util.TimeStampNow()
|
token.UpdatedUnix = util.TimeStampNow()
|
||||||
if err = models.UpdateAccessToken(token); err != nil {
|
if err = models.UpdateAccessToken(token); err != nil {
|
||||||
log.Error(4, "UpdateAccessToken: %v", err)
|
log.Error("UpdateAccessToken: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) {
|
if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) {
|
||||||
log.Error(4, "GetAccessTokenBySha: %v", err)
|
log.Error("GetAccessTokenBySha: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
u, err = models.UserSignIn(uname, passwd)
|
u, err = models.UserSignIn(uname, passwd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !models.IsErrUserNotExist(err) {
|
if !models.IsErrUserNotExist(err) {
|
||||||
log.Error(4, "UserSignIn: %v", err)
|
log.Error("UserSignIn: %v", err)
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
|
@ -107,7 +107,7 @@ func (ls *Source) findUserDN(l *ldap.Conn, name string) (string, bool) {
|
||||||
|
|
||||||
userDN := sr.Entries[0].DN
|
userDN := sr.Entries[0].DN
|
||||||
if userDN == "" {
|
if userDN == "" {
|
||||||
log.Error(4, "LDAP search was successful, but found no DN!")
|
log.Error("LDAP search was successful, but found no DN!")
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ func checkAdmin(l *ldap.Conn, ls *Source, userDN string) bool {
|
||||||
sr, err := l.Search(search)
|
sr, err := l.Search(search)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LDAP Admin Search failed unexpectedly! (%v)", err)
|
log.Error("LDAP Admin Search failed unexpectedly! (%v)", err)
|
||||||
} else if len(sr.Entries) < 1 {
|
} else if len(sr.Entries) < 1 {
|
||||||
log.Trace("LDAP Admin Search found no matching entries.")
|
log.Trace("LDAP Admin Search found no matching entries.")
|
||||||
} else {
|
} else {
|
||||||
|
@ -176,12 +176,12 @@ func checkAdmin(l *ldap.Conn, ls *Source, userDN string) bool {
|
||||||
func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResult {
|
func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResult {
|
||||||
// See https://tools.ietf.org/search/rfc4513#section-5.1.2
|
// See https://tools.ietf.org/search/rfc4513#section-5.1.2
|
||||||
if len(passwd) == 0 {
|
if len(passwd) == 0 {
|
||||||
log.Debug("Auth. failed for %s, password cannot be empty")
|
log.Debug("Auth. failed for %s, password cannot be empty", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
l, err := dial(ls)
|
l, err := dial(ls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LDAP Connect error, %s:%v", ls.Host, err)
|
log.Error("LDAP Connect error, %s:%v", ls.Host, err)
|
||||||
ls.Enabled = false
|
ls.Enabled = false
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -261,7 +261,7 @@ func (ls *Source) SearchEntry(name, passwd string, directBind bool) *SearchResul
|
||||||
|
|
||||||
sr, err := l.Search(search)
|
sr, err := l.Search(search)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LDAP Search failed unexpectedly! (%v)", err)
|
log.Error("LDAP Search failed unexpectedly! (%v)", err)
|
||||||
return nil
|
return nil
|
||||||
} else if len(sr.Entries) < 1 {
|
} else if len(sr.Entries) < 1 {
|
||||||
if directBind {
|
if directBind {
|
||||||
|
@ -311,7 +311,7 @@ func (ls *Source) UsePagedSearch() bool {
|
||||||
func (ls *Source) SearchEntries() []*SearchResult {
|
func (ls *Source) SearchEntries() []*SearchResult {
|
||||||
l, err := dial(ls)
|
l, err := dial(ls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LDAP Connect error, %s:%v", ls.Host, err)
|
log.Error("LDAP Connect error, %s:%v", ls.Host, err)
|
||||||
ls.Enabled = false
|
ls.Enabled = false
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ func (ls *Source) SearchEntries() []*SearchResult {
|
||||||
sr, err = l.Search(search)
|
sr, err = l.Search(search)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LDAP Search failed unexpectedly! (%v)", err)
|
log.Error("LDAP Search failed unexpectedly! (%v)", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -110,7 +110,7 @@ func GetRandomBytesAsBase64(n int) string {
|
||||||
_, err := io.ReadFull(rand.Reader, bytes)
|
_, err := io.ReadFull(rand.Reader, bytes)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Error reading random bytes: %v", err)
|
log.Fatal("Error reading random bytes: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return base64.RawURLEncoding.EncodeToString(bytes)
|
return base64.RawURLEncoding.EncodeToString(bytes)
|
||||||
|
@ -197,12 +197,12 @@ const DefaultAvatarSize = -1
|
||||||
func libravatarURL(email string) (*url.URL, error) {
|
func libravatarURL(email string) (*url.URL, error) {
|
||||||
urlStr, err := setting.LibravatarService.FromEmail(email)
|
urlStr, err := setting.LibravatarService.FromEmail(email)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "LibravatarService.FromEmail(email=%s): error %v", email, err)
|
log.Error("LibravatarService.FromEmail(email=%s): error %v", email, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
u, err := url.Parse(urlStr)
|
u, err := url.Parse(urlStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Failed to parse libravatar url(%s): error %v", urlStr, err)
|
log.Error("Failed to parse libravatar url(%s): error %v", urlStr, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return u, nil
|
return u, nil
|
||||||
|
|
|
@ -72,7 +72,7 @@ func (ctx *APIContext) Error(status int, title string, obj interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if status == 500 {
|
if status == 500 {
|
||||||
log.Error(4, "%s: %s", title, message)
|
log.Error("%s: %s", title, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.JSON(status, APIError{
|
ctx.JSON(status, APIError{
|
||||||
|
|
|
@ -116,7 +116,7 @@ func (ctx *Context) RenderWithErr(msg string, tpl base.TplName, form interface{}
|
||||||
// NotFound displays a 404 (Not Found) page and prints the given error, if any.
|
// NotFound displays a 404 (Not Found) page and prints the given error, if any.
|
||||||
func (ctx *Context) NotFound(title string, err error) {
|
func (ctx *Context) NotFound(title string, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "%s: %v", title, err)
|
log.Error("%s: %v", title, err)
|
||||||
if macaron.Env != macaron.PROD {
|
if macaron.Env != macaron.PROD {
|
||||||
ctx.Data["ErrorMsg"] = err
|
ctx.Data["ErrorMsg"] = err
|
||||||
}
|
}
|
||||||
|
@ -131,7 +131,7 @@ func (ctx *Context) NotFound(title string, err error) {
|
||||||
// error, if any.
|
// error, if any.
|
||||||
func (ctx *Context) ServerError(title string, err error) {
|
func (ctx *Context) ServerError(title string, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "%s: %v", title, err)
|
log.Error("%s: %v", title, err)
|
||||||
if macaron.Env != macaron.PROD {
|
if macaron.Env != macaron.PROD {
|
||||||
ctx.Data["ErrorMsg"] = err
|
ctx.Data["ErrorMsg"] = err
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ func (ctx *Context) NotFoundOrServerError(title string, errck func(error) bool,
|
||||||
// HandleText handles HTTP status code
|
// HandleText handles HTTP status code
|
||||||
func (ctx *Context) HandleText(status int, title string) {
|
func (ctx *Context) HandleText(status int, title string) {
|
||||||
if (status/100 == 4) || (status/100 == 5) {
|
if (status/100 == 4) || (status/100 == 5) {
|
||||||
log.Error(4, "%s", title)
|
log.Error("%s", title)
|
||||||
}
|
}
|
||||||
ctx.PlainText(status, []byte(title))
|
ctx.PlainText(status, []byte(title))
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,9 @@
|
||||||
package context
|
package context
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
macaron "gopkg.in/macaron.v1"
|
macaron "gopkg.in/macaron.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -32,7 +30,7 @@ func Recovery() macaron.Handler {
|
||||||
return func(ctx *Context) {
|
return func(ctx *Context) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
combinedErr := fmt.Errorf("%s\n%s", err, string(stack(3)))
|
combinedErr := fmt.Errorf("%s\n%s", err, string(log.Stack(2)))
|
||||||
ctx.ServerError("PANIC:", combinedErr)
|
ctx.ServerError("PANIC:", combinedErr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -40,73 +38,3 @@ func Recovery() macaron.Handler {
|
||||||
ctx.Next()
|
ctx.Next()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
unknown = []byte("???")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Although we could just use debug.Stack(), this routine will return the source code
|
|
||||||
// skip the provided number of frames - i.e. allowing us to ignore this function call
|
|
||||||
// and the preceding function call.
|
|
||||||
// If the problem is a lack of memory of course all this is not going to work...
|
|
||||||
func stack(skip int) []byte {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
// Store the last file we opened as its probable that the preceding stack frame
|
|
||||||
// will be in the same file
|
|
||||||
var lines [][]byte
|
|
||||||
var lastFilename string
|
|
||||||
for i := skip; ; i++ { // Skip over frames
|
|
||||||
programCounter, filename, lineNumber, ok := runtime.Caller(i)
|
|
||||||
// If we can't retrieve the information break - basically we're into go internals at this point.
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print equivalent of debug.Stack()
|
|
||||||
fmt.Fprintf(buf, "%s:%d (0x%x)\n", filename, lineNumber, programCounter)
|
|
||||||
// Now try to print the offending line
|
|
||||||
if filename != lastFilename {
|
|
||||||
data, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil {
|
|
||||||
// can't read this sourcefile
|
|
||||||
// likely we don't have the sourcecode available
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
lines = bytes.Split(data, []byte{'\n'})
|
|
||||||
lastFilename = filename
|
|
||||||
}
|
|
||||||
fmt.Fprintf(buf, "\t%s: %s\n", functionName(programCounter), source(lines, lineNumber))
|
|
||||||
}
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// functionName converts the provided programCounter into a function name
|
|
||||||
func functionName(programCounter uintptr) []byte {
|
|
||||||
function := runtime.FuncForPC(programCounter)
|
|
||||||
if function == nil {
|
|
||||||
return unknown
|
|
||||||
}
|
|
||||||
name := []byte(function.Name())
|
|
||||||
|
|
||||||
// Because we provide the filename we can drop the preceding package name.
|
|
||||||
if lastslash := bytes.LastIndex(name, []byte("/")); lastslash >= 0 {
|
|
||||||
name = name[lastslash+1:]
|
|
||||||
}
|
|
||||||
// And the current package name.
|
|
||||||
if period := bytes.Index(name, []byte(".")); period >= 0 {
|
|
||||||
name = name[period+1:]
|
|
||||||
}
|
|
||||||
// And we should just replace the interpunct with a dot
|
|
||||||
name = bytes.Replace(name, []byte("·"), []byte("."), -1)
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
|
|
||||||
// source returns a space-trimmed slice of the n'th line.
|
|
||||||
func source(lines [][]byte, n int) []byte {
|
|
||||||
n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
|
|
||||||
if n < 0 || n >= len(lines) {
|
|
||||||
return unknown
|
|
||||||
}
|
|
||||||
return bytes.TrimSpace(lines[n])
|
|
||||||
}
|
|
||||||
|
|
|
@ -124,7 +124,7 @@ func (r *Repository) BranchNameSubURL() string {
|
||||||
case r.IsViewCommit:
|
case r.IsViewCommit:
|
||||||
return "commit/" + r.BranchName
|
return "commit/" + r.BranchName
|
||||||
}
|
}
|
||||||
log.Error(4, "Unknown view type for repo: %v", r)
|
log.Error("Unknown view type for repo: %v", r)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -536,7 +536,7 @@ func getRefName(ctx *Context, pathType RepoRefType) string {
|
||||||
}
|
}
|
||||||
return path
|
return path
|
||||||
default:
|
default:
|
||||||
log.Error(4, "Unrecognized path type: %v", path)
|
log.Error("Unrecognized path type: %v", path)
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ func NewContext() {
|
||||||
if setting.Cron.UpdateMirror.Enabled {
|
if setting.Cron.UpdateMirror.Enabled {
|
||||||
entry, err = c.AddFunc("Update mirrors", setting.Cron.UpdateMirror.Schedule, models.MirrorUpdate)
|
entry, err = c.AddFunc("Update mirrors", setting.Cron.UpdateMirror.Schedule, models.MirrorUpdate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Cron[Update mirrors]: %v", err)
|
log.Fatal("Cron[Update mirrors]: %v", err)
|
||||||
}
|
}
|
||||||
if setting.Cron.UpdateMirror.RunAtStart {
|
if setting.Cron.UpdateMirror.RunAtStart {
|
||||||
entry.Prev = time.Now()
|
entry.Prev = time.Now()
|
||||||
|
@ -36,7 +36,7 @@ func NewContext() {
|
||||||
if setting.Cron.RepoHealthCheck.Enabled {
|
if setting.Cron.RepoHealthCheck.Enabled {
|
||||||
entry, err = c.AddFunc("Repository health check", setting.Cron.RepoHealthCheck.Schedule, models.GitFsck)
|
entry, err = c.AddFunc("Repository health check", setting.Cron.RepoHealthCheck.Schedule, models.GitFsck)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Cron[Repository health check]: %v", err)
|
log.Fatal("Cron[Repository health check]: %v", err)
|
||||||
}
|
}
|
||||||
if setting.Cron.RepoHealthCheck.RunAtStart {
|
if setting.Cron.RepoHealthCheck.RunAtStart {
|
||||||
entry.Prev = time.Now()
|
entry.Prev = time.Now()
|
||||||
|
@ -47,7 +47,7 @@ func NewContext() {
|
||||||
if setting.Cron.CheckRepoStats.Enabled {
|
if setting.Cron.CheckRepoStats.Enabled {
|
||||||
entry, err = c.AddFunc("Check repository statistics", setting.Cron.CheckRepoStats.Schedule, models.CheckRepoStats)
|
entry, err = c.AddFunc("Check repository statistics", setting.Cron.CheckRepoStats.Schedule, models.CheckRepoStats)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Cron[Check repository statistics]: %v", err)
|
log.Fatal("Cron[Check repository statistics]: %v", err)
|
||||||
}
|
}
|
||||||
if setting.Cron.CheckRepoStats.RunAtStart {
|
if setting.Cron.CheckRepoStats.RunAtStart {
|
||||||
entry.Prev = time.Now()
|
entry.Prev = time.Now()
|
||||||
|
@ -58,7 +58,7 @@ func NewContext() {
|
||||||
if setting.Cron.ArchiveCleanup.Enabled {
|
if setting.Cron.ArchiveCleanup.Enabled {
|
||||||
entry, err = c.AddFunc("Clean up old repository archives", setting.Cron.ArchiveCleanup.Schedule, models.DeleteOldRepositoryArchives)
|
entry, err = c.AddFunc("Clean up old repository archives", setting.Cron.ArchiveCleanup.Schedule, models.DeleteOldRepositoryArchives)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Cron[Clean up old repository archives]: %v", err)
|
log.Fatal("Cron[Clean up old repository archives]: %v", err)
|
||||||
}
|
}
|
||||||
if setting.Cron.ArchiveCleanup.RunAtStart {
|
if setting.Cron.ArchiveCleanup.RunAtStart {
|
||||||
entry.Prev = time.Now()
|
entry.Prev = time.Now()
|
||||||
|
@ -69,7 +69,7 @@ func NewContext() {
|
||||||
if setting.Cron.SyncExternalUsers.Enabled {
|
if setting.Cron.SyncExternalUsers.Enabled {
|
||||||
entry, err = c.AddFunc("Synchronize external users", setting.Cron.SyncExternalUsers.Schedule, models.SyncExternalUsers)
|
entry, err = c.AddFunc("Synchronize external users", setting.Cron.SyncExternalUsers.Schedule, models.SyncExternalUsers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Cron[Synchronize external users]: %v", err)
|
log.Fatal("Cron[Synchronize external users]: %v", err)
|
||||||
}
|
}
|
||||||
if setting.Cron.SyncExternalUsers.RunAtStart {
|
if setting.Cron.SyncExternalUsers.RunAtStart {
|
||||||
entry.Prev = time.Now()
|
entry.Prev = time.Now()
|
||||||
|
@ -80,7 +80,7 @@ func NewContext() {
|
||||||
if setting.Cron.DeletedBranchesCleanup.Enabled {
|
if setting.Cron.DeletedBranchesCleanup.Enabled {
|
||||||
entry, err = c.AddFunc("Remove old deleted branches", setting.Cron.DeletedBranchesCleanup.Schedule, models.RemoveOldDeletedBranches)
|
entry, err = c.AddFunc("Remove old deleted branches", setting.Cron.DeletedBranchesCleanup.Schedule, models.RemoveOldDeletedBranches)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Cron[Remove old deleted branches]: %v", err)
|
log.Fatal("Cron[Remove old deleted branches]: %v", err)
|
||||||
}
|
}
|
||||||
if setting.Cron.DeletedBranchesCleanup.RunAtStart {
|
if setting.Cron.DeletedBranchesCleanup.RunAtStart {
|
||||||
entry.Prev = time.Now()
|
entry.Prev = time.Now()
|
||||||
|
|
|
@ -116,7 +116,7 @@ func populateIssueIndexer() {
|
||||||
Collaborate: util.OptionalBoolFalse,
|
Collaborate: util.OptionalBoolFalse,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "SearchRepositoryByName: %v", err)
|
log.Error("SearchRepositoryByName: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(repos) == 0 {
|
if len(repos) == 0 {
|
||||||
|
@ -130,11 +130,11 @@ func populateIssueIndexer() {
|
||||||
IsPull: util.OptionalBoolNone,
|
IsPull: util.OptionalBoolNone,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Issues: %v", err)
|
log.Error("Issues: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err = models.IssueList(is).LoadDiscussComments(); err != nil {
|
if err = models.IssueList(is).LoadDiscussComments(); err != nil {
|
||||||
log.Error(4, "LoadComments: %v", err)
|
log.Error("LoadComments: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, issue := range is {
|
for _, issue := range is {
|
||||||
|
@ -166,7 +166,7 @@ func DeleteRepoIssueIndexer(repo *models.Repository) {
|
||||||
var ids []int64
|
var ids []int64
|
||||||
ids, err := models.GetIssueIDsByRepoID(repo.ID)
|
ids, err := models.GetIssueIDsByRepoID(repo.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "getIssueIDsByRepoID failed: %v", err)
|
log.Error("getIssueIDsByRepoID failed: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -53,7 +53,7 @@ func (l *LevelQueue) Run() error {
|
||||||
bs, err := l.queue.RPop()
|
bs, err := l.queue.RPop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != levelqueue.ErrNotFound {
|
if err != levelqueue.ErrNotFound {
|
||||||
log.Error(4, "RPop: %v", err)
|
log.Error("RPop: %v", err)
|
||||||
}
|
}
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
|
@ -67,7 +67,7 @@ func (l *LevelQueue) Run() error {
|
||||||
var data IndexerData
|
var data IndexerData
|
||||||
err = json.Unmarshal(bs, &data)
|
err = json.Unmarshal(bs, &data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Unmarshal: %v", err)
|
log.Error("Unmarshal: %v", err)
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -77,11 +77,11 @@ func (l *LevelQueue) Run() error {
|
||||||
if data.IsDelete {
|
if data.IsDelete {
|
||||||
if data.ID > 0 {
|
if data.ID > 0 {
|
||||||
if err = l.indexer.Delete(data.ID); err != nil {
|
if err = l.indexer.Delete(data.ID); err != nil {
|
||||||
log.Error(4, "indexer.Delete: %v", err)
|
log.Error("indexer.Delete: %v", err)
|
||||||
}
|
}
|
||||||
} else if len(data.IDs) > 0 {
|
} else if len(data.IDs) > 0 {
|
||||||
if err = l.indexer.Delete(data.IDs...); err != nil {
|
if err = l.indexer.Delete(data.IDs...); err != nil {
|
||||||
log.Error(4, "indexer.Delete: %v", err)
|
log.Error("indexer.Delete: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
time.Sleep(time.Millisecond * 10)
|
time.Sleep(time.Millisecond * 10)
|
||||||
|
|
|
@ -68,7 +68,7 @@ func (update RepoIndexerUpdate) AddToFlushingBatch(batch rupture.FlushingBatch)
|
||||||
case RepoIndexerOpDelete:
|
case RepoIndexerOpDelete:
|
||||||
return batch.Delete(id)
|
return batch.Delete(id)
|
||||||
default:
|
default:
|
||||||
log.Error(4, "Unrecognized repo indexer op: %d", update.Op)
|
log.Error("Unrecognized repo indexer op: %d", update.Op)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -78,17 +78,17 @@ func InitRepoIndexer(populateIndexer func() error) {
|
||||||
var err error
|
var err error
|
||||||
repoIndexer, err = openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion)
|
repoIndexer, err = openIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "InitRepoIndexer: %v", err)
|
log.Fatal("InitRepoIndexer: %v", err)
|
||||||
}
|
}
|
||||||
if repoIndexer != nil {
|
if repoIndexer != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = createRepoIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion); err != nil {
|
if err = createRepoIndexer(setting.Indexer.RepoPath, repoIndexerLatestVersion); err != nil {
|
||||||
log.Fatal(4, "CreateRepoIndexer: %v", err)
|
log.Fatal("CreateRepoIndexer: %v", err)
|
||||||
}
|
}
|
||||||
if err = populateIndexer(); err != nil {
|
if err = populateIndexer(); err != nil {
|
||||||
log.Fatal(4, "PopulateRepoIndex: %v", err)
|
log.Fatal("PopulateRepoIndex: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,7 +135,7 @@ func filenameIndexerID(repoID int64, filename string) string {
|
||||||
func filenameOfIndexerID(indexerID string) string {
|
func filenameOfIndexerID(indexerID string) string {
|
||||||
index := strings.IndexByte(indexerID, '_')
|
index := strings.IndexByte(indexerID, '_')
|
||||||
if index == -1 {
|
if index == -1 {
|
||||||
log.Error(4, "Unexpected ID in repo indexer: %s", indexerID)
|
log.Error("Unexpected ID in repo indexer: %s", indexerID)
|
||||||
}
|
}
|
||||||
return indexerID[index+1:]
|
return indexerID[index+1:]
|
||||||
}
|
}
|
||||||
|
|
|
@ -333,7 +333,7 @@ func PutHandler(ctx *context.Context) {
|
||||||
ctx.Resp.WriteHeader(500)
|
ctx.Resp.WriteHeader(500)
|
||||||
fmt.Fprintf(ctx.Resp, `{"message":"%s"}`, err)
|
fmt.Fprintf(ctx.Resp, `{"message":"%s"}`, err)
|
||||||
if err = repository.RemoveLFSMetaObjectByOid(rv.Oid); err != nil {
|
if err = repository.RemoveLFSMetaObjectByOid(rv.Oid); err != nil {
|
||||||
log.Error(4, "RemoveLFSMetaObjectByOid: %v", err)
|
log.Error("RemoveLFSMetaObjectByOid: %v", err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
328
modules/log/base.go
Normal file
328
modules/log/base.go
Normal file
|
@ -0,0 +1,328 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These flags define which text to prefix to each log entry generated
|
||||||
|
// by the Logger. Bits are or'ed together to control what's printed.
|
||||||
|
// There is no control over the order they appear (the order listed
|
||||||
|
// here) or the format they present (as described in the comments).
|
||||||
|
// The prefix is followed by a colon only if more than time is stated
|
||||||
|
// is specified. For example, flags Ldate | Ltime
|
||||||
|
// produce, 2009/01/23 01:23:23 message.
|
||||||
|
// The standard is:
|
||||||
|
// 2009/01/23 01:23:23 ...a/b/c/d.go:23:runtime.Caller() [I]: message
|
||||||
|
const (
|
||||||
|
Ldate = 1 << iota // the date in the local time zone: 2009/01/23
|
||||||
|
Ltime // the time in the local time zone: 01:23:23
|
||||||
|
Lmicroseconds // microsecond resolution: 01:23:23.123123. assumes Ltime.
|
||||||
|
Llongfile // full file name and line number: /a/b/c/d.go:23
|
||||||
|
Lshortfile // final file name element and line number: d.go:23. overrides Llongfile
|
||||||
|
Lfuncname // function name of the caller: runtime.Caller()
|
||||||
|
Lshortfuncname // last part of the function name
|
||||||
|
LUTC // if Ldate or Ltime is set, use UTC rather than the local time zone
|
||||||
|
Llevelinitial // Initial character of the provided level in brackets eg. [I] for info
|
||||||
|
Llevel // Provided level in brackets [INFO]
|
||||||
|
|
||||||
|
// Last 20 characters of the filename
|
||||||
|
Lmedfile = Lshortfile | Llongfile
|
||||||
|
|
||||||
|
// LstdFlags is the initial value for the standard logger
|
||||||
|
LstdFlags = Ldate | Ltime | Lmedfile | Lshortfuncname | Llevelinitial
|
||||||
|
)
|
||||||
|
|
||||||
|
var flagFromString = map[string]int{
|
||||||
|
"none": 0,
|
||||||
|
"date": Ldate,
|
||||||
|
"time": Ltime,
|
||||||
|
"microseconds": Lmicroseconds,
|
||||||
|
"longfile": Llongfile,
|
||||||
|
"shortfile": Lshortfile,
|
||||||
|
"funcname": Lfuncname,
|
||||||
|
"shortfuncname": Lshortfuncname,
|
||||||
|
"utc": LUTC,
|
||||||
|
"levelinitial": Llevelinitial,
|
||||||
|
"level": Llevel,
|
||||||
|
"medfile": Lmedfile,
|
||||||
|
"stdflags": LstdFlags,
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlagsFromString takes a comma separated list of flags and returns
|
||||||
|
// the flags for this string
|
||||||
|
func FlagsFromString(from string) int {
|
||||||
|
flags := 0
|
||||||
|
for _, flag := range strings.Split(strings.ToLower(from), ",") {
|
||||||
|
f, ok := flagFromString[strings.TrimSpace(flag)]
|
||||||
|
if ok {
|
||||||
|
flags = flags | f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return flags
|
||||||
|
}
|
||||||
|
|
||||||
|
type byteArrayWriter []byte
|
||||||
|
|
||||||
|
func (b *byteArrayWriter) Write(p []byte) (int, error) {
|
||||||
|
*b = append(*b, p...)
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BaseLogger represent a basic logger for Gitea
|
||||||
|
type BaseLogger struct {
|
||||||
|
out io.WriteCloser
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
Level Level `json:"level"`
|
||||||
|
StacktraceLevel Level `json:"stacktraceLevel"`
|
||||||
|
Flags int `json:"flags"`
|
||||||
|
Prefix string `json:"prefix"`
|
||||||
|
Colorize bool `json:"colorize"`
|
||||||
|
Expression string `json:"expression"`
|
||||||
|
regexp *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseLogger) createLogger(out io.WriteCloser, level ...Level) {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
b.out = out
|
||||||
|
switch b.Flags {
|
||||||
|
case 0:
|
||||||
|
b.Flags = LstdFlags
|
||||||
|
case -1:
|
||||||
|
b.Flags = 0
|
||||||
|
}
|
||||||
|
if len(level) > 0 {
|
||||||
|
b.Level = level[0]
|
||||||
|
}
|
||||||
|
b.createExpression()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseLogger) createExpression() {
|
||||||
|
if len(b.Expression) > 0 {
|
||||||
|
var err error
|
||||||
|
b.regexp, err = regexp.Compile(b.Expression)
|
||||||
|
if err != nil {
|
||||||
|
b.regexp = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the logging level for this logger
|
||||||
|
func (b *BaseLogger) GetLevel() Level {
|
||||||
|
return b.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStacktraceLevel returns the stacktrace logging level for this logger
|
||||||
|
func (b *BaseLogger) GetStacktraceLevel() Level {
|
||||||
|
return b.StacktraceLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy of cheap integer to fixed-width decimal to ascii from logger.
|
||||||
|
func itoa(buf *[]byte, i int, wid int) {
|
||||||
|
var b [20]byte
|
||||||
|
bp := len(b) - 1
|
||||||
|
for i >= 10 || wid > 1 {
|
||||||
|
wid--
|
||||||
|
q := i / 10
|
||||||
|
b[bp] = byte('0' + i - q*10)
|
||||||
|
bp--
|
||||||
|
i = q
|
||||||
|
}
|
||||||
|
// i < 10
|
||||||
|
b[bp] = byte('0' + i)
|
||||||
|
*buf = append(*buf, b[bp:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *BaseLogger) createMsg(buf *[]byte, event *Event) {
|
||||||
|
*buf = append(*buf, b.Prefix...)
|
||||||
|
t := event.time
|
||||||
|
if b.Flags&(Ldate|Ltime|Lmicroseconds) != 0 {
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, fgCyanBytes...)
|
||||||
|
}
|
||||||
|
if b.Flags&LUTC != 0 {
|
||||||
|
t = t.UTC()
|
||||||
|
}
|
||||||
|
if b.Flags&Ldate != 0 {
|
||||||
|
year, month, day := t.Date()
|
||||||
|
itoa(buf, year, 4)
|
||||||
|
*buf = append(*buf, '/')
|
||||||
|
itoa(buf, int(month), 2)
|
||||||
|
*buf = append(*buf, '/')
|
||||||
|
itoa(buf, day, 2)
|
||||||
|
*buf = append(*buf, ' ')
|
||||||
|
}
|
||||||
|
if b.Flags&(Ltime|Lmicroseconds) != 0 {
|
||||||
|
hour, min, sec := t.Clock()
|
||||||
|
itoa(buf, hour, 2)
|
||||||
|
*buf = append(*buf, ':')
|
||||||
|
itoa(buf, min, 2)
|
||||||
|
*buf = append(*buf, ':')
|
||||||
|
itoa(buf, sec, 2)
|
||||||
|
if b.Flags&Lmicroseconds != 0 {
|
||||||
|
*buf = append(*buf, '.')
|
||||||
|
itoa(buf, t.Nanosecond()/1e3, 6)
|
||||||
|
}
|
||||||
|
*buf = append(*buf, ' ')
|
||||||
|
}
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, resetBytes...)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if b.Flags&(Lshortfile|Llongfile) != 0 {
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, fgGreenBytes...)
|
||||||
|
}
|
||||||
|
file := event.filename
|
||||||
|
if b.Flags&Lmedfile == Lmedfile {
|
||||||
|
startIndex := len(file) - 20
|
||||||
|
if startIndex > 0 {
|
||||||
|
file = "..." + file[startIndex:]
|
||||||
|
}
|
||||||
|
} else if b.Flags&Lshortfile != 0 {
|
||||||
|
startIndex := strings.LastIndexByte(file, '/')
|
||||||
|
if startIndex > 0 && startIndex < len(file) {
|
||||||
|
file = file[startIndex+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*buf = append(*buf, file...)
|
||||||
|
*buf = append(*buf, ':')
|
||||||
|
itoa(buf, event.line, -1)
|
||||||
|
if b.Flags&(Lfuncname|Lshortfuncname) != 0 {
|
||||||
|
*buf = append(*buf, ':')
|
||||||
|
} else {
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, resetBytes...)
|
||||||
|
}
|
||||||
|
*buf = append(*buf, ' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if b.Flags&(Lfuncname|Lshortfuncname) != 0 {
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, fgGreenBytes...)
|
||||||
|
}
|
||||||
|
funcname := event.caller
|
||||||
|
if b.Flags&Lshortfuncname != 0 {
|
||||||
|
lastIndex := strings.LastIndexByte(funcname, '.')
|
||||||
|
if lastIndex > 0 && len(funcname) > lastIndex+1 {
|
||||||
|
funcname = funcname[lastIndex+1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*buf = append(*buf, funcname...)
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, resetBytes...)
|
||||||
|
}
|
||||||
|
*buf = append(*buf, ' ')
|
||||||
|
|
||||||
|
}
|
||||||
|
if b.Flags&(Llevel|Llevelinitial) != 0 {
|
||||||
|
level := strings.ToUpper(event.level.String())
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, levelToColor[event.level]...)
|
||||||
|
}
|
||||||
|
*buf = append(*buf, '[')
|
||||||
|
if b.Flags&Llevelinitial != 0 {
|
||||||
|
*buf = append(*buf, level[0])
|
||||||
|
} else {
|
||||||
|
*buf = append(*buf, level...)
|
||||||
|
}
|
||||||
|
*buf = append(*buf, ']')
|
||||||
|
if b.Colorize {
|
||||||
|
*buf = append(*buf, resetBytes...)
|
||||||
|
}
|
||||||
|
*buf = append(*buf, ' ')
|
||||||
|
}
|
||||||
|
|
||||||
|
var msg = []byte(event.msg)
|
||||||
|
if len(msg) > 0 && msg[len(msg)-1] == '\n' {
|
||||||
|
msg = msg[:len(msg)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
pawMode := allowColor
|
||||||
|
if !b.Colorize {
|
||||||
|
pawMode = removeColor
|
||||||
|
}
|
||||||
|
|
||||||
|
baw := byteArrayWriter(*buf)
|
||||||
|
(&protectedANSIWriter{
|
||||||
|
w: &baw,
|
||||||
|
mode: pawMode,
|
||||||
|
}).Write([]byte(msg))
|
||||||
|
*buf = baw
|
||||||
|
|
||||||
|
if event.stacktrace != "" && b.StacktraceLevel <= event.level {
|
||||||
|
lines := bytes.Split([]byte(event.stacktrace), []byte("\n"))
|
||||||
|
if len(lines) > 1 {
|
||||||
|
for _, line := range lines {
|
||||||
|
*buf = append(*buf, "\n\t"...)
|
||||||
|
*buf = append(*buf, line...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*buf = append(*buf, '\n')
|
||||||
|
}
|
||||||
|
*buf = append(*buf, '\n')
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogEvent logs the event to the internal writer
|
||||||
|
func (b *BaseLogger) LogEvent(event *Event) error {
|
||||||
|
if b.Level > event.level {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
if !b.Match(event) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var buf []byte
|
||||||
|
b.createMsg(&buf, event)
|
||||||
|
_, err := b.out.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match checks if the given event matches the logger's regexp expression
|
||||||
|
func (b *BaseLogger) Match(event *Event) bool {
|
||||||
|
if b.regexp == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if b.regexp.Match([]byte(fmt.Sprintf("%s:%d:%s", event.filename, event.line, event.caller))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Match on the non-colored msg - therefore strip out colors
|
||||||
|
var msg []byte
|
||||||
|
baw := byteArrayWriter(msg)
|
||||||
|
(&protectedANSIWriter{
|
||||||
|
w: &baw,
|
||||||
|
mode: removeColor,
|
||||||
|
}).Write([]byte(event.msg))
|
||||||
|
msg = baw
|
||||||
|
if b.regexp.Match(msg) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the base logger
|
||||||
|
func (b *BaseLogger) Close() {
|
||||||
|
b.mu.Lock()
|
||||||
|
defer b.mu.Unlock()
|
||||||
|
if b.out != nil {
|
||||||
|
b.out.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName returns empty for these provider loggers
|
||||||
|
func (b *BaseLogger) GetName() string {
|
||||||
|
return ""
|
||||||
|
}
|
277
modules/log/base_test.go
Normal file
277
modules/log/base_test.go
Normal file
|
@ -0,0 +1,277 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type CallbackWriteCloser struct {
|
||||||
|
callback func([]byte, bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CallbackWriteCloser) Write(p []byte) (int, error) {
|
||||||
|
c.callback(p, false)
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c CallbackWriteCloser) Close() error {
|
||||||
|
c.callback(nil, true)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBaseLogger(t *testing.T) {
|
||||||
|
var written []byte
|
||||||
|
var closed bool
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
written = p
|
||||||
|
closed = close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
b := BaseLogger{
|
||||||
|
out: c,
|
||||||
|
Level: INFO,
|
||||||
|
Flags: LstdFlags | LUTC,
|
||||||
|
Prefix: prefix,
|
||||||
|
}
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, INFO, b.GetLevel())
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = DEBUG
|
||||||
|
expected = ""
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
|
||||||
|
event.level = TRACE
|
||||||
|
expected = ""
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = ERROR
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = CRITICAL
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
b.Close()
|
||||||
|
assert.Equal(t, true, closed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBaseLoggerDated(t *testing.T) {
|
||||||
|
var written []byte
|
||||||
|
var closed bool
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
written = p
|
||||||
|
closed = close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
prefix := ""
|
||||||
|
b := BaseLogger{
|
||||||
|
out: c,
|
||||||
|
Level: WARN,
|
||||||
|
Flags: Ldate | Ltime | Lmicroseconds | Lshortfile | Llevel,
|
||||||
|
Prefix: prefix,
|
||||||
|
}
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 115, location)
|
||||||
|
|
||||||
|
dateString := date.Format("2006/01/02 15:04:05.000000")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: WARN,
|
||||||
|
msg: "TEST MESSAGE TEST\n",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, WARN, b.GetLevel())
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = INFO
|
||||||
|
expected = ""
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = ERROR
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = DEBUG
|
||||||
|
expected = ""
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = CRITICAL
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d [%s] %s", prefix, dateString, "FILENAME", event.line, strings.ToUpper(event.level.String()), event.msg)
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = TRACE
|
||||||
|
expected = ""
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
b.Close()
|
||||||
|
assert.Equal(t, true, closed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBaseLoggerMultiLineNoFlagsRegexp(t *testing.T) {
|
||||||
|
var written []byte
|
||||||
|
var closed bool
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
written = p
|
||||||
|
closed = close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
prefix := ""
|
||||||
|
b := BaseLogger{
|
||||||
|
Level: DEBUG,
|
||||||
|
StacktraceLevel: ERROR,
|
||||||
|
Flags: -1,
|
||||||
|
Prefix: prefix,
|
||||||
|
Expression: "FILENAME",
|
||||||
|
}
|
||||||
|
b.createLogger(c)
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 115, location)
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: DEBUG,
|
||||||
|
msg: "TEST\nMESSAGE\nTEST",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, DEBUG, b.GetLevel())
|
||||||
|
|
||||||
|
expected := "TEST\n\tMESSAGE\n\tTEST\n"
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.filename = "ELSEWHERE"
|
||||||
|
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, "", string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.caller = "FILENAME"
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event = Event{
|
||||||
|
level: DEBUG,
|
||||||
|
msg: "TEST\nFILENAME\nTEST",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/ELSEWHERE",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
expected = "TEST\n\tFILENAME\n\tTEST\n"
|
||||||
|
b.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBrokenRegexp(t *testing.T) {
|
||||||
|
var closed bool
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
closed = close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
b := BaseLogger{
|
||||||
|
Level: DEBUG,
|
||||||
|
StacktraceLevel: ERROR,
|
||||||
|
Flags: -1,
|
||||||
|
Prefix: prefix,
|
||||||
|
Expression: "\\",
|
||||||
|
}
|
||||||
|
b.createLogger(c)
|
||||||
|
assert.Empty(t, b.regexp)
|
||||||
|
b.Close()
|
||||||
|
assert.Equal(t, true, closed)
|
||||||
|
}
|
348
modules/log/colors.go
Normal file
348
modules/log/colors.go
Normal file
|
@ -0,0 +1,348 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const escape = "\033"
|
||||||
|
|
||||||
|
// ColorAttribute defines a single SGR Code
|
||||||
|
type ColorAttribute int
|
||||||
|
|
||||||
|
// Base ColorAttributes
|
||||||
|
const (
|
||||||
|
Reset ColorAttribute = iota
|
||||||
|
Bold
|
||||||
|
Faint
|
||||||
|
Italic
|
||||||
|
Underline
|
||||||
|
BlinkSlow
|
||||||
|
BlinkRapid
|
||||||
|
ReverseVideo
|
||||||
|
Concealed
|
||||||
|
CrossedOut
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground text colors
|
||||||
|
const (
|
||||||
|
FgBlack ColorAttribute = iota + 30
|
||||||
|
FgRed
|
||||||
|
FgGreen
|
||||||
|
FgYellow
|
||||||
|
FgBlue
|
||||||
|
FgMagenta
|
||||||
|
FgCyan
|
||||||
|
FgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Foreground Hi-Intensity text colors
|
||||||
|
const (
|
||||||
|
FgHiBlack ColorAttribute = iota + 90
|
||||||
|
FgHiRed
|
||||||
|
FgHiGreen
|
||||||
|
FgHiYellow
|
||||||
|
FgHiBlue
|
||||||
|
FgHiMagenta
|
||||||
|
FgHiCyan
|
||||||
|
FgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background text colors
|
||||||
|
const (
|
||||||
|
BgBlack ColorAttribute = iota + 40
|
||||||
|
BgRed
|
||||||
|
BgGreen
|
||||||
|
BgYellow
|
||||||
|
BgBlue
|
||||||
|
BgMagenta
|
||||||
|
BgCyan
|
||||||
|
BgWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
// Background Hi-Intensity text colors
|
||||||
|
const (
|
||||||
|
BgHiBlack ColorAttribute = iota + 100
|
||||||
|
BgHiRed
|
||||||
|
BgHiGreen
|
||||||
|
BgHiYellow
|
||||||
|
BgHiBlue
|
||||||
|
BgHiMagenta
|
||||||
|
BgHiCyan
|
||||||
|
BgHiWhite
|
||||||
|
)
|
||||||
|
|
||||||
|
var colorAttributeToString = map[ColorAttribute]string{
|
||||||
|
Reset: "Reset",
|
||||||
|
Bold: "Bold",
|
||||||
|
Faint: "Faint",
|
||||||
|
Italic: "Italic",
|
||||||
|
Underline: "Underline",
|
||||||
|
BlinkSlow: "BlinkSlow",
|
||||||
|
BlinkRapid: "BlinkRapid",
|
||||||
|
ReverseVideo: "ReverseVideo",
|
||||||
|
Concealed: "Concealed",
|
||||||
|
CrossedOut: "CrossedOut",
|
||||||
|
FgBlack: "FgBlack",
|
||||||
|
FgRed: "FgRed",
|
||||||
|
FgGreen: "FgGreen",
|
||||||
|
FgYellow: "FgYellow",
|
||||||
|
FgBlue: "FgBlue",
|
||||||
|
FgMagenta: "FgMagenta",
|
||||||
|
FgCyan: "FgCyan",
|
||||||
|
FgWhite: "FgWhite",
|
||||||
|
FgHiBlack: "FgHiBlack",
|
||||||
|
FgHiRed: "FgHiRed",
|
||||||
|
FgHiGreen: "FgHiGreen",
|
||||||
|
FgHiYellow: "FgHiYellow",
|
||||||
|
FgHiBlue: "FgHiBlue",
|
||||||
|
FgHiMagenta: "FgHiMagenta",
|
||||||
|
FgHiCyan: "FgHiCyan",
|
||||||
|
FgHiWhite: "FgHiWhite",
|
||||||
|
BgBlack: "BgBlack",
|
||||||
|
BgRed: "BgRed",
|
||||||
|
BgGreen: "BgGreen",
|
||||||
|
BgYellow: "BgYellow",
|
||||||
|
BgBlue: "BgBlue",
|
||||||
|
BgMagenta: "BgMagenta",
|
||||||
|
BgCyan: "BgCyan",
|
||||||
|
BgWhite: "BgWhite",
|
||||||
|
BgHiBlack: "BgHiBlack",
|
||||||
|
BgHiRed: "BgHiRed",
|
||||||
|
BgHiGreen: "BgHiGreen",
|
||||||
|
BgHiYellow: "BgHiYellow",
|
||||||
|
BgHiBlue: "BgHiBlue",
|
||||||
|
BgHiMagenta: "BgHiMagenta",
|
||||||
|
BgHiCyan: "BgHiCyan",
|
||||||
|
BgHiWhite: "BgHiWhite",
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ColorAttribute) String() string {
|
||||||
|
return colorAttributeToString[*c]
|
||||||
|
}
|
||||||
|
|
||||||
|
var colorAttributeFromString = map[string]ColorAttribute{}
|
||||||
|
|
||||||
|
// ColorAttributeFromString will return a ColorAttribute given a string
|
||||||
|
func ColorAttributeFromString(from string) ColorAttribute {
|
||||||
|
lowerFrom := strings.TrimSpace(strings.ToLower(from))
|
||||||
|
return colorAttributeFromString[lowerFrom]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ColorString converts a list of ColorAttributes to a color string
|
||||||
|
func ColorString(attrs ...ColorAttribute) string {
|
||||||
|
return string(ColorBytes(attrs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ColorBytes converts a list of ColorAttributes to a byte array
|
||||||
|
func ColorBytes(attrs ...ColorAttribute) []byte {
|
||||||
|
bytes := make([]byte, 0, 20)
|
||||||
|
bytes = append(bytes, escape[0], '[')
|
||||||
|
if len(attrs) > 0 {
|
||||||
|
bytes = append(bytes, strconv.Itoa(int(attrs[0]))...)
|
||||||
|
for _, a := range attrs[1:] {
|
||||||
|
bytes = append(bytes, ';')
|
||||||
|
bytes = append(bytes, strconv.Itoa(int(a))...)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
bytes = append(bytes, strconv.Itoa(int(Bold))...)
|
||||||
|
}
|
||||||
|
bytes = append(bytes, 'm')
|
||||||
|
return bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
var levelToColor = map[Level]string{
|
||||||
|
TRACE: ColorString(Bold, FgCyan),
|
||||||
|
DEBUG: ColorString(Bold, FgBlue),
|
||||||
|
INFO: ColorString(Bold, FgGreen),
|
||||||
|
WARN: ColorString(Bold, FgYellow),
|
||||||
|
ERROR: ColorString(Bold, FgRed),
|
||||||
|
CRITICAL: ColorString(Bold, BgMagenta),
|
||||||
|
FATAL: ColorString(Bold, BgRed),
|
||||||
|
NONE: ColorString(Reset),
|
||||||
|
}
|
||||||
|
|
||||||
|
var resetBytes = ColorBytes(Reset)
|
||||||
|
var fgCyanBytes = ColorBytes(FgCyan)
|
||||||
|
var fgGreenBytes = ColorBytes(FgGreen)
|
||||||
|
var fgBoldBytes = ColorBytes(Bold)
|
||||||
|
|
||||||
|
type protectedANSIWriterMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
escapeAll protectedANSIWriterMode = iota
|
||||||
|
allowColor
|
||||||
|
removeColor
|
||||||
|
)
|
||||||
|
|
||||||
|
type protectedANSIWriter struct {
|
||||||
|
w io.Writer
|
||||||
|
mode protectedANSIWriterMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write will protect against unusual characters
|
||||||
|
func (c *protectedANSIWriter) Write(bytes []byte) (int, error) {
|
||||||
|
end := len(bytes)
|
||||||
|
totalWritten := 0
|
||||||
|
normalLoop:
|
||||||
|
for i := 0; i < end; {
|
||||||
|
lasti := i
|
||||||
|
|
||||||
|
if c.mode == escapeAll {
|
||||||
|
for i < end && (bytes[i] >= ' ' || bytes[i] == '\n') {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for i < end && bytes[i] >= ' ' {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i > lasti {
|
||||||
|
written, err := c.w.Write(bytes[lasti:i])
|
||||||
|
totalWritten = totalWritten + written
|
||||||
|
if err != nil {
|
||||||
|
return totalWritten, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if i >= end {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're not just escaping all we should prefix all newlines with a \t
|
||||||
|
if c.mode != escapeAll {
|
||||||
|
if bytes[i] == '\n' {
|
||||||
|
written, err := c.w.Write([]byte{'\n', '\t'})
|
||||||
|
if written > 0 {
|
||||||
|
totalWritten++
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return totalWritten, err
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
continue normalLoop
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes[i] == escape[0] && i+1 < end && bytes[i+1] == '[' {
|
||||||
|
for j := i + 2; j < end; j++ {
|
||||||
|
if bytes[j] >= '0' && bytes[j] <= '9' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if bytes[j] == ';' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if bytes[j] == 'm' {
|
||||||
|
if c.mode == allowColor {
|
||||||
|
written, err := c.w.Write(bytes[i : j+1])
|
||||||
|
totalWritten = totalWritten + written
|
||||||
|
if err != nil {
|
||||||
|
return totalWritten, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
totalWritten = j
|
||||||
|
}
|
||||||
|
i = j + 1
|
||||||
|
continue normalLoop
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Process naughty character
|
||||||
|
if _, err := fmt.Fprintf(c.w, `\%#o03d`, bytes[i]); err != nil {
|
||||||
|
return totalWritten, err
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
totalWritten++
|
||||||
|
}
|
||||||
|
return totalWritten, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ColoredValue will Color the provided value
|
||||||
|
type ColoredValue struct {
|
||||||
|
ColorBytes *[]byte
|
||||||
|
ResetBytes *[]byte
|
||||||
|
Value *interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColoredValue is a helper function to create a ColoredValue from a Value
|
||||||
|
// If no color is provided it defaults to Bold with standard Reset
|
||||||
|
// If a ColoredValue is provided it is not changed
|
||||||
|
func NewColoredValue(value interface{}, color ...ColorAttribute) *ColoredValue {
|
||||||
|
return NewColoredValuePointer(&value, color...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColoredValuePointer is a helper function to create a ColoredValue from a Value Pointer
|
||||||
|
// If no color is provided it defaults to Bold with standard Reset
|
||||||
|
// If a ColoredValue is provided it is not changed
|
||||||
|
func NewColoredValuePointer(value *interface{}, color ...ColorAttribute) *ColoredValue {
|
||||||
|
if val, ok := (*value).(*ColoredValue); ok {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
if len(color) > 0 {
|
||||||
|
bytes := ColorBytes(color...)
|
||||||
|
return &ColoredValue{
|
||||||
|
ColorBytes: &bytes,
|
||||||
|
ResetBytes: &resetBytes,
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &ColoredValue{
|
||||||
|
ColorBytes: &fgBoldBytes,
|
||||||
|
ResetBytes: &resetBytes,
|
||||||
|
Value: value,
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewColoredValueBytes creates a value from the provided value with color bytes
|
||||||
|
// If a ColoredValue is provided it is not changed
|
||||||
|
func NewColoredValueBytes(value interface{}, colorBytes *[]byte) *ColoredValue {
|
||||||
|
if val, ok := value.(*ColoredValue); ok {
|
||||||
|
return val
|
||||||
|
}
|
||||||
|
return &ColoredValue{
|
||||||
|
ColorBytes: colorBytes,
|
||||||
|
ResetBytes: &resetBytes,
|
||||||
|
Value: &value,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format will format the provided value and protect against ANSI spoofing within the value
|
||||||
|
func (cv *ColoredValue) Format(s fmt.State, c rune) {
|
||||||
|
s.Write([]byte(*cv.ColorBytes))
|
||||||
|
fmt.Fprintf(&protectedANSIWriter{w: s}, fmtString(s, c), *(cv.Value))
|
||||||
|
s.Write([]byte(*cv.ResetBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
func fmtString(s fmt.State, c rune) string {
|
||||||
|
var width, precision string
|
||||||
|
base := make([]byte, 0, 8)
|
||||||
|
base = append(base, '%')
|
||||||
|
for _, c := range []byte(" +-#0") {
|
||||||
|
if s.Flag(int(c)) {
|
||||||
|
base = append(base, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if w, ok := s.Width(); ok {
|
||||||
|
width = strconv.Itoa(w)
|
||||||
|
}
|
||||||
|
if p, ok := s.Precision(); ok {
|
||||||
|
precision = "." + strconv.Itoa(p)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s%s%s%c", base, width, precision, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for attr, from := range colorAttributeToString {
|
||||||
|
colorAttributeFromString[strings.ToLower(from)] = attr
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
@ -7,73 +8,60 @@ package log
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"net"
|
"net"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConnWriter implements LoggerInterface.
|
type connWriter struct {
|
||||||
// it writes messages in keep-live tcp connection.
|
|
||||||
type ConnWriter struct {
|
|
||||||
lg *log.Logger
|
|
||||||
innerWriter io.WriteCloser
|
innerWriter io.WriteCloser
|
||||||
ReconnectOnMsg bool `json:"reconnectOnMsg"`
|
ReconnectOnMsg bool `json:"reconnectOnMsg"`
|
||||||
Reconnect bool `json:"reconnect"`
|
Reconnect bool `json:"reconnect"`
|
||||||
Net string `json:"net"`
|
Net string `json:"net"`
|
||||||
Addr string `json:"addr"`
|
Addr string `json:"addr"`
|
||||||
Level int `json:"level"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConn creates new ConnWrite returning as LoggerInterface.
|
// Close the inner writer
|
||||||
func NewConn() LoggerInterface {
|
func (i *connWriter) Close() error {
|
||||||
conn := new(ConnWriter)
|
if i.innerWriter != nil {
|
||||||
conn.Level = TRACE
|
return i.innerWriter.Close()
|
||||||
return conn
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init inits connection writer with json config.
|
|
||||||
// json config only need key "level".
|
|
||||||
func (cw *ConnWriter) Init(jsonconfig string) error {
|
|
||||||
return json.Unmarshal([]byte(jsonconfig), cw)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteMsg writes message in connection.
|
|
||||||
// if connection is down, try to re-connect.
|
|
||||||
func (cw *ConnWriter) WriteMsg(msg string, skip, level int) error {
|
|
||||||
if cw.Level > level {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if cw.neededConnectOnMsg() {
|
|
||||||
if err := cw.connect(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if cw.ReconnectOnMsg {
|
|
||||||
defer cw.innerWriter.Close()
|
|
||||||
}
|
|
||||||
cw.lg.Println(msg)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush no things for this implementation
|
// Write the data to the connection
|
||||||
func (cw *ConnWriter) Flush() {
|
func (i *connWriter) Write(p []byte) (int, error) {
|
||||||
}
|
if i.neededConnectOnMsg() {
|
||||||
|
if err := i.connect(); err != nil {
|
||||||
// Destroy destroy connection writer and close tcp listener.
|
return 0, err
|
||||||
func (cw *ConnWriter) Destroy() {
|
}
|
||||||
if cw.innerWriter == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
cw.innerWriter.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cw *ConnWriter) connect() error {
|
|
||||||
if cw.innerWriter != nil {
|
|
||||||
cw.innerWriter.Close()
|
|
||||||
cw.innerWriter = nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
conn, err := net.Dial(cw.Net, cw.Addr)
|
if i.ReconnectOnMsg {
|
||||||
|
defer i.innerWriter.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return i.innerWriter.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *connWriter) neededConnectOnMsg() bool {
|
||||||
|
if i.Reconnect {
|
||||||
|
i.Reconnect = false
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if i.innerWriter == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return i.ReconnectOnMsg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *connWriter) connect() error {
|
||||||
|
if i.innerWriter != nil {
|
||||||
|
i.innerWriter.Close()
|
||||||
|
i.innerWriter = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := net.Dial(i.Net, i.Addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -82,22 +70,50 @@ func (cw *ConnWriter) connect() error {
|
||||||
tcpConn.SetKeepAlive(true)
|
tcpConn.SetKeepAlive(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
cw.innerWriter = conn
|
i.innerWriter = conn
|
||||||
cw.lg = log.New(conn, "", log.Ldate|log.Ltime)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cw *ConnWriter) neededConnectOnMsg() bool {
|
// ConnLogger implements LoggerProvider.
|
||||||
if cw.Reconnect {
|
// it writes messages in keep-live tcp connection.
|
||||||
cw.Reconnect = false
|
type ConnLogger struct {
|
||||||
return true
|
BaseLogger
|
||||||
}
|
ReconnectOnMsg bool `json:"reconnectOnMsg"`
|
||||||
|
Reconnect bool `json:"reconnect"`
|
||||||
|
Net string `json:"net"`
|
||||||
|
Addr string `json:"addr"`
|
||||||
|
}
|
||||||
|
|
||||||
if cw.innerWriter == nil {
|
// NewConn creates new ConnLogger returning as LoggerProvider.
|
||||||
return true
|
func NewConn() LoggerProvider {
|
||||||
}
|
conn := new(ConnLogger)
|
||||||
|
conn.Level = TRACE
|
||||||
|
return conn
|
||||||
|
}
|
||||||
|
|
||||||
return cw.ReconnectOnMsg
|
// Init inits connection writer with json config.
|
||||||
|
// json config only need key "level".
|
||||||
|
func (log *ConnLogger) Init(jsonconfig string) error {
|
||||||
|
err := json.Unmarshal([]byte(jsonconfig), log)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.createLogger(&connWriter{
|
||||||
|
ReconnectOnMsg: log.ReconnectOnMsg,
|
||||||
|
Reconnect: log.Reconnect,
|
||||||
|
Net: log.Net,
|
||||||
|
Addr: log.Addr,
|
||||||
|
}, log.Level)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush does nothing for this implementation
|
||||||
|
func (log *ConnLogger) Flush() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName returns the default name for this implementation
|
||||||
|
func (log *ConnLogger) GetName() string {
|
||||||
|
return "conn"
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
240
modules/log/conn_test.go
Normal file
240
modules/log/conn_test.go
Normal file
|
@ -0,0 +1,240 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func listenReadAndClose(t *testing.T, l net.Listener, expected string) {
|
||||||
|
conn, err := l.Accept()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer conn.Close()
|
||||||
|
written, err := ioutil.ReadAll(conn)
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConnLogger(t *testing.T) {
|
||||||
|
var written []byte
|
||||||
|
|
||||||
|
protocol := "tcp"
|
||||||
|
address := ":3099"
|
||||||
|
|
||||||
|
l, err := net.Listen(protocol, address)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer l.Close()
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
|
||||||
|
logger := NewConn()
|
||||||
|
connLogger := logger.(*ConnLogger)
|
||||||
|
|
||||||
|
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, true, true, protocol, address))
|
||||||
|
|
||||||
|
assert.Equal(t, flags, connLogger.Flags)
|
||||||
|
assert.Equal(t, level, connLogger.Level)
|
||||||
|
assert.Equal(t, level, logger.GetLevel())
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
listenReadAndClose(t, l, expected)
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
err := logger.LogEvent(&event)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
listenReadAndClose(t, l, expected)
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
err := logger.LogEvent(&event)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConnLoggerBadConfig(t *testing.T) {
|
||||||
|
logger := NewConn()
|
||||||
|
|
||||||
|
err := logger.Init("{")
|
||||||
|
assert.Equal(t, "unexpected end of JSON input", err.Error())
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConnLoggerCloseBeforeSend(t *testing.T) {
|
||||||
|
protocol := "tcp"
|
||||||
|
address := ":3099"
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
|
||||||
|
logger := NewConn()
|
||||||
|
|
||||||
|
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConnLoggerFailConnect(t *testing.T) {
|
||||||
|
protocol := "tcp"
|
||||||
|
address := ":3099"
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
|
||||||
|
logger := NewConn()
|
||||||
|
|
||||||
|
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
|
||||||
|
|
||||||
|
assert.Equal(t, level, logger.GetLevel())
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
//dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := logger.LogEvent(&event)
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConnLoggerClose(t *testing.T) {
|
||||||
|
var written []byte
|
||||||
|
|
||||||
|
protocol := "tcp"
|
||||||
|
address := ":3099"
|
||||||
|
|
||||||
|
l, err := net.Listen(protocol, address)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer l.Close()
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
|
||||||
|
logger := NewConn()
|
||||||
|
connLogger := logger.(*ConnLogger)
|
||||||
|
|
||||||
|
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, false, protocol, address))
|
||||||
|
|
||||||
|
assert.Equal(t, flags, connLogger.Flags)
|
||||||
|
assert.Equal(t, level, connLogger.Level)
|
||||||
|
assert.Equal(t, level, logger.GetLevel())
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
err := logger.LogEvent(&event)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
logger.Close()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
listenReadAndClose(t, l, expected)
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
logger = NewConn()
|
||||||
|
connLogger = logger.(*ConnLogger)
|
||||||
|
|
||||||
|
logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"reconnectOnMsg\":%t,\"reconnect\":%t,\"net\":\"%s\",\"addr\":\"%s\"}", prefix, level.String(), flags, false, true, protocol, address))
|
||||||
|
|
||||||
|
assert.Equal(t, flags, connLogger.Flags)
|
||||||
|
assert.Equal(t, level, connLogger.Level)
|
||||||
|
assert.Equal(t, level, logger.GetLevel())
|
||||||
|
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
wg.Add(2)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
listenReadAndClose(t, l, expected)
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
err := logger.LogEvent(&event)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
logger.Close()
|
||||||
|
|
||||||
|
}()
|
||||||
|
wg.Wait()
|
||||||
|
logger.Flush()
|
||||||
|
logger.Close()
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
@ -6,75 +7,72 @@ package log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"log"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Brush brush type
|
// CanColorStdout reports if we can color the Stdout
|
||||||
type Brush func(string) string
|
// Although we could do terminal sniffing and the like - in reality
|
||||||
|
// most tools on *nix are happy to display ansi colors.
|
||||||
|
// We will terminal sniff on Windows in console_windows.go
|
||||||
|
var CanColorStdout = true
|
||||||
|
|
||||||
// NewBrush create a brush according color
|
// CanColorStderr reports if we can color the Stderr
|
||||||
func NewBrush(color string) Brush {
|
var CanColorStderr = true
|
||||||
pre := "\033["
|
|
||||||
reset := "\033[0m"
|
type nopWriteCloser struct {
|
||||||
return func(text string) string {
|
w io.WriteCloser
|
||||||
return pre + color + "m" + text + reset
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var colors = []Brush{
|
func (n *nopWriteCloser) Write(p []byte) (int, error) {
|
||||||
NewBrush("1;36"), // Trace cyan
|
return n.w.Write(p)
|
||||||
NewBrush("1;34"), // Debug blue
|
|
||||||
NewBrush("1;32"), // Info green
|
|
||||||
NewBrush("1;33"), // Warn yellow
|
|
||||||
NewBrush("1;31"), // Error red
|
|
||||||
NewBrush("1;35"), // Critical purple
|
|
||||||
NewBrush("1;31"), // Fatal red
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsoleWriter implements LoggerInterface and writes messages to terminal.
|
func (n *nopWriteCloser) Close() error {
|
||||||
type ConsoleWriter struct {
|
return nil
|
||||||
lg *log.Logger
|
|
||||||
Level int `json:"level"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewConsole create ConsoleWriter returning as LoggerInterface.
|
// ConsoleLogger implements LoggerProvider and writes messages to terminal.
|
||||||
func NewConsole() LoggerInterface {
|
type ConsoleLogger struct {
|
||||||
return &ConsoleWriter{
|
BaseLogger
|
||||||
lg: log.New(os.Stdout, "", log.Ldate|log.Ltime),
|
Stderr bool `json:"stderr"`
|
||||||
Level: TRACE,
|
}
|
||||||
}
|
|
||||||
|
// NewConsoleLogger create ConsoleLogger returning as LoggerProvider.
|
||||||
|
func NewConsoleLogger() LoggerProvider {
|
||||||
|
log := &ConsoleLogger{}
|
||||||
|
log.createLogger(&nopWriteCloser{
|
||||||
|
w: os.Stdout,
|
||||||
|
})
|
||||||
|
return log
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init inits connection writer with json config.
|
// Init inits connection writer with json config.
|
||||||
// json config only need key "level".
|
// json config only need key "level".
|
||||||
func (cw *ConsoleWriter) Init(config string) error {
|
func (log *ConsoleLogger) Init(config string) error {
|
||||||
return json.Unmarshal([]byte(config), cw)
|
err := json.Unmarshal([]byte(config), log)
|
||||||
}
|
if err != nil {
|
||||||
|
return err
|
||||||
// WriteMsg writes message in console.
|
|
||||||
// if OS is windows, ignore colors.
|
|
||||||
func (cw *ConsoleWriter) WriteMsg(msg string, skip, level int) error {
|
|
||||||
if cw.Level > level {
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
if runtime.GOOS == "windows" {
|
if log.Stderr {
|
||||||
cw.lg.Println(msg)
|
log.createLogger(&nopWriteCloser{
|
||||||
|
w: os.Stderr,
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
cw.lg.Println(colors[level](msg))
|
log.createLogger(log.out)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush when log should be flushed
|
// Flush when log should be flushed
|
||||||
func (cw *ConsoleWriter) Flush() {
|
func (log *ConsoleLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy when writer is destroy
|
// GetName returns the default name for this implementation
|
||||||
func (cw *ConsoleWriter) Destroy() {
|
func (log *ConsoleLogger) GetName() string {
|
||||||
|
return "console"
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Register("console", NewConsole)
|
Register("console", NewConsoleLogger)
|
||||||
}
|
}
|
||||||
|
|
137
modules/log/console_test.go
Normal file
137
modules/log/console_test.go
Normal file
|
@ -0,0 +1,137 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConsoleLoggerBadConfig(t *testing.T) {
|
||||||
|
logger := NewConsoleLogger()
|
||||||
|
|
||||||
|
err := logger.Init("{")
|
||||||
|
assert.Equal(t, "unexpected end of JSON input", err.Error())
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsoleLoggerMinimalConfig(t *testing.T) {
|
||||||
|
for _, level := range Levels() {
|
||||||
|
var written []byte
|
||||||
|
var closed bool
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
written = p
|
||||||
|
closed = close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
prefix := ""
|
||||||
|
flags := LstdFlags
|
||||||
|
|
||||||
|
cw := NewConsoleLogger()
|
||||||
|
realCW := cw.(*ConsoleLogger)
|
||||||
|
cw.Init(fmt.Sprintf("{\"level\":\"%s\"}", level))
|
||||||
|
nwc := realCW.out.(*nopWriteCloser)
|
||||||
|
nwc.w = c
|
||||||
|
|
||||||
|
assert.Equal(t, flags, realCW.Flags)
|
||||||
|
assert.Equal(t, FromString(level), realCW.Level)
|
||||||
|
assert.Equal(t, FromString(level), cw.GetLevel())
|
||||||
|
assert.Equal(t, prefix, realCW.Prefix)
|
||||||
|
assert.Equal(t, "", string(written))
|
||||||
|
cw.Close()
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestConsoleLogger(t *testing.T) {
|
||||||
|
var written []byte
|
||||||
|
var closed bool
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
written = p
|
||||||
|
closed = close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
|
||||||
|
cw := NewConsoleLogger()
|
||||||
|
realCW := cw.(*ConsoleLogger)
|
||||||
|
realCW.Colorize = false
|
||||||
|
nwc := realCW.out.(*nopWriteCloser)
|
||||||
|
nwc.w = c
|
||||||
|
|
||||||
|
cw.Init(fmt.Sprintf("{\"expression\":\"FILENAME\",\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d}", prefix, level.String(), flags))
|
||||||
|
|
||||||
|
assert.Equal(t, flags, realCW.Flags)
|
||||||
|
assert.Equal(t, level, realCW.Level)
|
||||||
|
assert.Equal(t, level, cw.GetLevel())
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
cw.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
event.level = DEBUG
|
||||||
|
expected = ""
|
||||||
|
cw.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
|
||||||
|
event.level = TRACE
|
||||||
|
expected = ""
|
||||||
|
cw.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
|
||||||
|
nonMatchEvent := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FI_LENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
event.level = INFO
|
||||||
|
expected = ""
|
||||||
|
cw.LogEvent(&nonMatchEvent)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
cw.LogEvent(&event)
|
||||||
|
assert.Equal(t, expected, string(written))
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
written = written[:0]
|
||||||
|
|
||||||
|
cw.Close()
|
||||||
|
assert.Equal(t, false, closed)
|
||||||
|
}
|
43
modules/log/console_windows.go
Normal file
43
modules/log/console_windows.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/mattn/go-isatty"
|
||||||
|
"golang.org/x/sys/windows"
|
||||||
|
)
|
||||||
|
|
||||||
|
func enableVTMode(console windows.Handle) bool {
|
||||||
|
mode := uint32(0)
|
||||||
|
err := windows.GetConsoleMode(console, &mode)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnableVirtualTerminalProcessing is the console mode to allow ANSI code
|
||||||
|
// interpretation on the console. See:
|
||||||
|
// https://docs.microsoft.com/en-us/windows/console/setconsolemode
|
||||||
|
// It only works on windows 10. Earlier terminals will fail with an err which we will
|
||||||
|
// handle to say don't color
|
||||||
|
mode = mode | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
err = windows.SetConsoleMode(console, mode)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||||
|
CanColorStdout = enableVTMode(windows.Stdout)
|
||||||
|
} else {
|
||||||
|
CanColorStdout = isatty.IsCygwinTerminal(os.Stderr.Fd())
|
||||||
|
}
|
||||||
|
|
||||||
|
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||||
|
CanColorStderr = enableVTMode(windows.Stderr)
|
||||||
|
} else {
|
||||||
|
CanColorStderr = isatty.IsCygwinTerminal(os.Stderr.Fd())
|
||||||
|
}
|
||||||
|
}
|
62
modules/log/errors.go
Normal file
62
modules/log/errors.go
Normal file
|
@ -0,0 +1,62 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// ErrTimeout represents a "Timeout" kind of error.
|
||||||
|
type ErrTimeout struct {
|
||||||
|
Name string
|
||||||
|
Provider string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrTimeout checks if an error is a ErrTimeout.
|
||||||
|
func IsErrTimeout(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := err.(ErrTimeout)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrTimeout) Error() string {
|
||||||
|
return fmt.Sprintf("Log Timeout for %s (%s)", err.Name, err.Provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrUnknownProvider represents a "Unknown Provider" kind of error.
|
||||||
|
type ErrUnknownProvider struct {
|
||||||
|
Provider string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrUnknownProvider checks if an error is a ErrUnknownProvider.
|
||||||
|
func IsErrUnknownProvider(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := err.(ErrUnknownProvider)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrUnknownProvider) Error() string {
|
||||||
|
return fmt.Sprintf("Unknown Log Provider \"%s\" (Was it registered?)", err.Provider)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrDuplicateName represents a Duplicate Name error
|
||||||
|
type ErrDuplicateName struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsErrDuplicateName checks if an error is a ErrDuplicateName.
|
||||||
|
func IsErrDuplicateName(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := err.(ErrDuplicateName)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ErrDuplicateName) Error() string {
|
||||||
|
return fmt.Sprintf("Duplicate named logger: %s", err.Name)
|
||||||
|
}
|
335
modules/log/event.go
Normal file
335
modules/log/event.go
Normal file
|
@ -0,0 +1,335 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Event represents a logging event
|
||||||
|
type Event struct {
|
||||||
|
level Level
|
||||||
|
msg string
|
||||||
|
caller string
|
||||||
|
filename string
|
||||||
|
line int
|
||||||
|
time time.Time
|
||||||
|
stacktrace string
|
||||||
|
}
|
||||||
|
|
||||||
|
// EventLogger represents the behaviours of a logger
|
||||||
|
type EventLogger interface {
|
||||||
|
LogEvent(event *Event) error
|
||||||
|
Close()
|
||||||
|
Flush()
|
||||||
|
GetLevel() Level
|
||||||
|
GetStacktraceLevel() Level
|
||||||
|
GetName() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChannelledLog represents a cached channel to a LoggerProvider
|
||||||
|
type ChannelledLog struct {
|
||||||
|
name string
|
||||||
|
provider string
|
||||||
|
queue chan *Event
|
||||||
|
loggerProvider LoggerProvider
|
||||||
|
flush chan bool
|
||||||
|
close chan bool
|
||||||
|
closed chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChannelledLog a new logger instance with given logger provider and config.
|
||||||
|
func NewChannelledLog(name, provider, config string, bufferLength int64) (*ChannelledLog, error) {
|
||||||
|
if log, ok := providers[provider]; ok {
|
||||||
|
l := &ChannelledLog{
|
||||||
|
queue: make(chan *Event, bufferLength),
|
||||||
|
flush: make(chan bool),
|
||||||
|
close: make(chan bool),
|
||||||
|
closed: make(chan bool),
|
||||||
|
}
|
||||||
|
l.loggerProvider = log()
|
||||||
|
if err := l.loggerProvider.Init(config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
l.name = name
|
||||||
|
l.provider = provider
|
||||||
|
go l.Start()
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
return nil, ErrUnknownProvider{provider}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start processing the ChannelledLog
|
||||||
|
func (l *ChannelledLog) Start() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event, ok := <-l.queue:
|
||||||
|
if !ok {
|
||||||
|
l.closeLogger()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.loggerProvider.LogEvent(event)
|
||||||
|
case _, ok := <-l.flush:
|
||||||
|
if !ok {
|
||||||
|
l.closeLogger()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.loggerProvider.Flush()
|
||||||
|
case _, _ = <-l.close:
|
||||||
|
l.closeLogger()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogEvent logs an event to this ChannelledLog
|
||||||
|
func (l *ChannelledLog) LogEvent(event *Event) error {
|
||||||
|
select {
|
||||||
|
case l.queue <- event:
|
||||||
|
return nil
|
||||||
|
case <-time.After(60 * time.Second):
|
||||||
|
// We're blocked!
|
||||||
|
return ErrTimeout{
|
||||||
|
Name: l.name,
|
||||||
|
Provider: l.provider,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *ChannelledLog) closeLogger() {
|
||||||
|
l.loggerProvider.Flush()
|
||||||
|
l.loggerProvider.Close()
|
||||||
|
l.closed <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close this ChannelledLog
|
||||||
|
func (l *ChannelledLog) Close() {
|
||||||
|
l.close <- true
|
||||||
|
<-l.closed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush this ChannelledLog
|
||||||
|
func (l *ChannelledLog) Flush() {
|
||||||
|
l.flush <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel gets the level of this ChannelledLog
|
||||||
|
func (l *ChannelledLog) GetLevel() Level {
|
||||||
|
return l.loggerProvider.GetLevel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStacktraceLevel gets the level of this ChannelledLog
|
||||||
|
func (l *ChannelledLog) GetStacktraceLevel() Level {
|
||||||
|
return l.loggerProvider.GetStacktraceLevel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName returns the name of this ChannelledLog
|
||||||
|
func (l *ChannelledLog) GetName() string {
|
||||||
|
return l.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultiChannelledLog represents a cached channel to a LoggerProvider
|
||||||
|
type MultiChannelledLog struct {
|
||||||
|
name string
|
||||||
|
bufferLength int64
|
||||||
|
queue chan *Event
|
||||||
|
mutex sync.Mutex
|
||||||
|
loggers map[string]EventLogger
|
||||||
|
flush chan bool
|
||||||
|
close chan bool
|
||||||
|
started bool
|
||||||
|
level Level
|
||||||
|
stacktraceLevel Level
|
||||||
|
closed chan bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMultiChannelledLog a new logger instance with given logger provider and config.
|
||||||
|
func NewMultiChannelledLog(name string, bufferLength int64) *MultiChannelledLog {
|
||||||
|
m := &MultiChannelledLog{
|
||||||
|
name: name,
|
||||||
|
queue: make(chan *Event, bufferLength),
|
||||||
|
flush: make(chan bool),
|
||||||
|
bufferLength: bufferLength,
|
||||||
|
loggers: make(map[string]EventLogger),
|
||||||
|
level: NONE,
|
||||||
|
stacktraceLevel: NONE,
|
||||||
|
close: make(chan bool),
|
||||||
|
closed: make(chan bool),
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddLogger adds a logger to this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) AddLogger(logger EventLogger) error {
|
||||||
|
m.mutex.Lock()
|
||||||
|
name := logger.GetName()
|
||||||
|
if _, has := m.loggers[name]; has {
|
||||||
|
m.mutex.Unlock()
|
||||||
|
return ErrDuplicateName{name}
|
||||||
|
}
|
||||||
|
m.loggers[name] = logger
|
||||||
|
if logger.GetLevel() < m.level {
|
||||||
|
m.level = logger.GetLevel()
|
||||||
|
}
|
||||||
|
if logger.GetStacktraceLevel() < m.stacktraceLevel {
|
||||||
|
m.stacktraceLevel = logger.GetStacktraceLevel()
|
||||||
|
}
|
||||||
|
m.mutex.Unlock()
|
||||||
|
go m.Start()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelLogger removes a sub logger from this MultiChannelledLog
|
||||||
|
// NB: If you delete the last sublogger this logger will simply drop
|
||||||
|
// log events
|
||||||
|
func (m *MultiChannelledLog) DelLogger(name string) bool {
|
||||||
|
m.mutex.Lock()
|
||||||
|
logger, has := m.loggers[name]
|
||||||
|
if !has {
|
||||||
|
m.mutex.Unlock()
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
delete(m.loggers, name)
|
||||||
|
m.internalResetLevel()
|
||||||
|
m.mutex.Unlock()
|
||||||
|
logger.Flush()
|
||||||
|
logger.Close()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEventLogger returns a sub logger from this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) GetEventLogger(name string) EventLogger {
|
||||||
|
m.mutex.Lock()
|
||||||
|
defer m.mutex.Unlock()
|
||||||
|
return m.loggers[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetEventLoggerNames returns a list of names
|
||||||
|
func (m *MultiChannelledLog) GetEventLoggerNames() []string {
|
||||||
|
m.mutex.Lock()
|
||||||
|
defer m.mutex.Unlock()
|
||||||
|
var keys []string
|
||||||
|
for k := range m.loggers {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MultiChannelledLog) closeLoggers() {
|
||||||
|
m.mutex.Lock()
|
||||||
|
for _, logger := range m.loggers {
|
||||||
|
logger.Flush()
|
||||||
|
logger.Close()
|
||||||
|
}
|
||||||
|
m.mutex.Unlock()
|
||||||
|
m.closed <- true
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start processing the MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) Start() {
|
||||||
|
m.mutex.Lock()
|
||||||
|
if m.started {
|
||||||
|
m.mutex.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.started = true
|
||||||
|
m.mutex.Unlock()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case event, ok := <-m.queue:
|
||||||
|
if !ok {
|
||||||
|
m.closeLoggers()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mutex.Lock()
|
||||||
|
for _, logger := range m.loggers {
|
||||||
|
err := logger.LogEvent(event)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.mutex.Unlock()
|
||||||
|
case _, ok := <-m.flush:
|
||||||
|
if !ok {
|
||||||
|
m.closeLoggers()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m.mutex.Lock()
|
||||||
|
for _, logger := range m.loggers {
|
||||||
|
logger.Flush()
|
||||||
|
}
|
||||||
|
m.mutex.Unlock()
|
||||||
|
case <-m.close:
|
||||||
|
m.closeLoggers()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogEvent logs an event to this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) LogEvent(event *Event) error {
|
||||||
|
select {
|
||||||
|
case m.queue <- event:
|
||||||
|
return nil
|
||||||
|
case <-time.After(60 * time.Second):
|
||||||
|
// We're blocked!
|
||||||
|
return ErrTimeout{
|
||||||
|
Name: m.name,
|
||||||
|
Provider: "MultiChannelledLog",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) Close() {
|
||||||
|
m.close <- true
|
||||||
|
<-m.closed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush this ChannelledLog
|
||||||
|
func (m *MultiChannelledLog) Flush() {
|
||||||
|
m.flush <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel gets the level of this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) GetLevel() Level {
|
||||||
|
return m.level
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStacktraceLevel gets the level of this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) GetStacktraceLevel() Level {
|
||||||
|
return m.stacktraceLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MultiChannelledLog) internalResetLevel() Level {
|
||||||
|
m.level = NONE
|
||||||
|
for _, logger := range m.loggers {
|
||||||
|
level := logger.GetLevel()
|
||||||
|
if level < m.level {
|
||||||
|
m.level = level
|
||||||
|
}
|
||||||
|
level = logger.GetStacktraceLevel()
|
||||||
|
if level < m.stacktraceLevel {
|
||||||
|
m.stacktraceLevel = level
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return m.level
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetLevel will reset the level of this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) ResetLevel() Level {
|
||||||
|
m.mutex.Lock()
|
||||||
|
defer m.mutex.Unlock()
|
||||||
|
return m.internalResetLevel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName gets the name of this MultiChannelledLog
|
||||||
|
func (m *MultiChannelledLog) GetName() string {
|
||||||
|
return m.name
|
||||||
|
}
|
|
@ -5,10 +5,11 @@
|
||||||
package log
|
package log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bufio"
|
||||||
|
"compress/gzip"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -16,10 +17,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileLogWriter implements LoggerInterface.
|
// FileLogger implements LoggerProvider.
|
||||||
// It writes messages by lines limit, file size limit, or time frequency.
|
// It writes messages by lines limit, file size limit, or time frequency.
|
||||||
type FileLogWriter struct {
|
type FileLogger struct {
|
||||||
*log.Logger
|
BaseLogger
|
||||||
mw *MuxWriter
|
mw *MuxWriter
|
||||||
// The opened file
|
// The opened file
|
||||||
Filename string `json:"filename"`
|
Filename string `json:"filename"`
|
||||||
|
@ -35,47 +36,57 @@ type FileLogWriter struct {
|
||||||
|
|
||||||
Rotate bool `json:"rotate"`
|
Rotate bool `json:"rotate"`
|
||||||
|
|
||||||
startLock sync.Mutex // Only one log can write to the file
|
Compress bool `json:"compress"`
|
||||||
|
CompressionLevel int `json:"compressionLevel"`
|
||||||
|
|
||||||
Level int `json:"level"`
|
startLock sync.Mutex // Only one log can write to the file
|
||||||
}
|
}
|
||||||
|
|
||||||
// MuxWriter an *os.File writer with locker.
|
// MuxWriter an *os.File writer with locker.
|
||||||
type MuxWriter struct {
|
type MuxWriter struct {
|
||||||
sync.Mutex
|
mu sync.Mutex
|
||||||
fd *os.File
|
fd *os.File
|
||||||
|
owner *FileLogger
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write writes to os.File.
|
// Write writes to os.File.
|
||||||
func (l *MuxWriter) Write(b []byte) (int, error) {
|
func (mw *MuxWriter) Write(b []byte) (int, error) {
|
||||||
l.Lock()
|
mw.mu.Lock()
|
||||||
defer l.Unlock()
|
defer mw.mu.Unlock()
|
||||||
return l.fd.Write(b)
|
mw.owner.docheck(len(b))
|
||||||
|
return mw.fd.Write(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the internal writer
|
||||||
|
func (mw *MuxWriter) Close() error {
|
||||||
|
return mw.fd.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFd sets os.File in writer.
|
// SetFd sets os.File in writer.
|
||||||
func (l *MuxWriter) SetFd(fd *os.File) {
|
func (mw *MuxWriter) SetFd(fd *os.File) {
|
||||||
if l.fd != nil {
|
if mw.fd != nil {
|
||||||
l.fd.Close()
|
mw.fd.Close()
|
||||||
}
|
}
|
||||||
l.fd = fd
|
mw.fd = fd
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileWriter create a FileLogWriter returning as LoggerInterface.
|
// NewFileLogger create a FileLogger returning as LoggerProvider.
|
||||||
func NewFileWriter() LoggerInterface {
|
func NewFileLogger() LoggerProvider {
|
||||||
w := &FileLogWriter{
|
log := &FileLogger{
|
||||||
Filename: "",
|
Filename: "",
|
||||||
Maxsize: 1 << 28, //256 MB
|
Maxsize: 1 << 28, //256 MB
|
||||||
Daily: true,
|
Daily: true,
|
||||||
Maxdays: 7,
|
Maxdays: 7,
|
||||||
Rotate: true,
|
Rotate: true,
|
||||||
Level: TRACE,
|
Compress: true,
|
||||||
|
CompressionLevel: gzip.DefaultCompression,
|
||||||
}
|
}
|
||||||
|
log.Level = TRACE
|
||||||
// use MuxWriter instead direct use os.File for lock write when rotate
|
// use MuxWriter instead direct use os.File for lock write when rotate
|
||||||
w.mw = new(MuxWriter)
|
log.mw = new(MuxWriter)
|
||||||
// set MuxWriter as Logger's io.Writer
|
log.mw.owner = log
|
||||||
w.Logger = log.New(w.mw, "", log.Ldate|log.Ltime)
|
|
||||||
return w
|
return log
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init file logger with json config.
|
// Init file logger with json config.
|
||||||
|
@ -87,109 +98,131 @@ func NewFileWriter() LoggerInterface {
|
||||||
// "maxdays":15,
|
// "maxdays":15,
|
||||||
// "rotate":true
|
// "rotate":true
|
||||||
// }
|
// }
|
||||||
func (w *FileLogWriter) Init(config string) error {
|
func (log *FileLogger) Init(config string) error {
|
||||||
if err := json.Unmarshal([]byte(config), w); err != nil {
|
if err := json.Unmarshal([]byte(config), log); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(w.Filename) == 0 {
|
if len(log.Filename) == 0 {
|
||||||
return errors.New("config must have filename")
|
return errors.New("config must have filename")
|
||||||
}
|
}
|
||||||
return w.StartLogger()
|
// set MuxWriter as Logger's io.Writer
|
||||||
|
log.createLogger(log.mw)
|
||||||
|
return log.StartLogger()
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartLogger start file logger. create log file and set to locker-inside file writer.
|
// StartLogger start file logger. create log file and set to locker-inside file writer.
|
||||||
func (w *FileLogWriter) StartLogger() error {
|
func (log *FileLogger) StartLogger() error {
|
||||||
fd, err := w.createLogFile()
|
fd, err := log.createLogFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.mw.SetFd(fd)
|
log.mw.SetFd(fd)
|
||||||
return w.initFd()
|
return log.initFd()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *FileLogWriter) docheck(size int) {
|
func (log *FileLogger) docheck(size int) {
|
||||||
w.startLock.Lock()
|
log.startLock.Lock()
|
||||||
defer w.startLock.Unlock()
|
defer log.startLock.Unlock()
|
||||||
if w.Rotate && ((w.Maxsize > 0 && w.maxsizeCursize >= w.Maxsize) ||
|
if log.Rotate && ((log.Maxsize > 0 && log.maxsizeCursize >= log.Maxsize) ||
|
||||||
(w.Daily && time.Now().Day() != w.dailyOpenDate)) {
|
(log.Daily && time.Now().Day() != log.dailyOpenDate)) {
|
||||||
if err := w.DoRotate(); err != nil {
|
if err := log.DoRotate(); err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
fmt.Fprintf(os.Stderr, "FileLogger(%q): %s\n", log.Filename, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.maxsizeCursize += size
|
log.maxsizeCursize += size
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteMsg writes logger message into file.
|
func (log *FileLogger) createLogFile() (*os.File, error) {
|
||||||
func (w *FileLogWriter) WriteMsg(msg string, skip, level int) error {
|
|
||||||
if level < w.Level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
n := 24 + len(msg) // 24 stand for the length "2013/06/23 21:00:22 [T] "
|
|
||||||
w.docheck(n)
|
|
||||||
w.Logger.Println(msg)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *FileLogWriter) createLogFile() (*os.File, error) {
|
|
||||||
// Open the log file
|
// Open the log file
|
||||||
return os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
|
return os.OpenFile(log.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *FileLogWriter) initFd() error {
|
func (log *FileLogger) initFd() error {
|
||||||
fd := w.mw.fd
|
fd := log.mw.fd
|
||||||
finfo, err := fd.Stat()
|
finfo, err := fd.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get stat: %v", err)
|
return fmt.Errorf("get stat: %v", err)
|
||||||
}
|
}
|
||||||
w.maxsizeCursize = int(finfo.Size())
|
log.maxsizeCursize = int(finfo.Size())
|
||||||
w.dailyOpenDate = time.Now().Day()
|
log.dailyOpenDate = time.Now().Day()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DoRotate means it need to write file in new file.
|
// DoRotate means it need to write file in new file.
|
||||||
// new file name like xx.log.2013-01-01.2
|
// new file name like xx.log.2013-01-01.2
|
||||||
func (w *FileLogWriter) DoRotate() error {
|
func (log *FileLogger) DoRotate() error {
|
||||||
_, err := os.Lstat(w.Filename)
|
_, err := os.Lstat(log.Filename)
|
||||||
if err == nil { // file exists
|
if err == nil { // file exists
|
||||||
// Find the next available number
|
// Find the next available number
|
||||||
num := 1
|
num := 1
|
||||||
fname := ""
|
fname := ""
|
||||||
for ; err == nil && num <= 999; num++ {
|
for ; err == nil && num <= 999; num++ {
|
||||||
fname = w.Filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num)
|
fname = log.Filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num)
|
||||||
_, err = os.Lstat(fname)
|
_, err = os.Lstat(fname)
|
||||||
|
if log.Compress && err != nil {
|
||||||
|
_, err = os.Lstat(fname + ".gz")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// return error if the last file checked still existed
|
// return error if the last file checked still existed
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return fmt.Errorf("rotate: cannot find free log number to rename %s", w.Filename)
|
return fmt.Errorf("rotate: cannot find free log number to rename %s", log.Filename)
|
||||||
}
|
}
|
||||||
|
|
||||||
// block Logger's io.Writer
|
fd := log.mw.fd
|
||||||
w.mw.Lock()
|
|
||||||
defer w.mw.Unlock()
|
|
||||||
|
|
||||||
fd := w.mw.fd
|
|
||||||
fd.Close()
|
fd.Close()
|
||||||
|
|
||||||
// close fd before rename
|
// close fd before rename
|
||||||
// Rename the file to its newfound home
|
// Rename the file to its newfound home
|
||||||
if err = os.Rename(w.Filename, fname); err != nil {
|
if err = os.Rename(log.Filename, fname); err != nil {
|
||||||
return fmt.Errorf("Rotate: %v", err)
|
return fmt.Errorf("Rotate: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if log.Compress {
|
||||||
|
go compressOldLogFile(fname, log.CompressionLevel)
|
||||||
|
}
|
||||||
|
|
||||||
// re-start logger
|
// re-start logger
|
||||||
if err = w.StartLogger(); err != nil {
|
if err = log.StartLogger(); err != nil {
|
||||||
return fmt.Errorf("Rotate StartLogger: %v", err)
|
return fmt.Errorf("Rotate StartLogger: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go w.deleteOldLog()
|
go log.deleteOldLog()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *FileLogWriter) deleteOldLog() {
|
func compressOldLogFile(fname string, compressionLevel int) error {
|
||||||
dir := filepath.Dir(w.Filename)
|
reader, err := os.Open(fname)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer reader.Close()
|
||||||
|
buffer := bufio.NewReader(reader)
|
||||||
|
fw, err := os.OpenFile(fname+".gz", os.O_WRONLY|os.O_CREATE, 0660)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer fw.Close()
|
||||||
|
zw, err := gzip.NewWriterLevel(fw, compressionLevel)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer zw.Close()
|
||||||
|
_, err = buffer.WriteTo(zw)
|
||||||
|
if err != nil {
|
||||||
|
zw.Close()
|
||||||
|
fw.Close()
|
||||||
|
os.Remove(fname + ".gz")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
return os.Remove(fname)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (log *FileLogger) deleteOldLog() {
|
||||||
|
dir := filepath.Dir(log.Filename)
|
||||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
|
@ -197,8 +230,8 @@ func (w *FileLogWriter) deleteOldLog() {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*w.Maxdays) {
|
if !info.IsDir() && info.ModTime().Unix() < (time.Now().Unix()-60*60*24*log.Maxdays) {
|
||||||
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.Filename)) {
|
if strings.HasPrefix(filepath.Base(path), filepath.Base(log.Filename)) {
|
||||||
|
|
||||||
if err := os.Remove(path); err != nil {
|
if err := os.Remove(path); err != nil {
|
||||||
returnErr = fmt.Errorf("Failed to remove %s: %v", path, err)
|
returnErr = fmt.Errorf("Failed to remove %s: %v", path, err)
|
||||||
|
@ -209,18 +242,18 @@ func (w *FileLogWriter) deleteOldLog() {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy destroy file logger, close file writer.
|
|
||||||
func (w *FileLogWriter) Destroy() {
|
|
||||||
w.mw.fd.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flush file logger.
|
// Flush flush file logger.
|
||||||
// there are no buffering messages in file logger in memory.
|
// there are no buffering messages in file logger in memory.
|
||||||
// flush file means sync file from disk.
|
// flush file means sync file from disk.
|
||||||
func (w *FileLogWriter) Flush() {
|
func (log *FileLogger) Flush() {
|
||||||
w.mw.fd.Sync()
|
log.mw.fd.Sync()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetName returns the default name for this implementation
|
||||||
|
func (log *FileLogger) GetName() string {
|
||||||
|
return "file"
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Register("file", NewFileWriter)
|
Register("file", NewFileLogger)
|
||||||
}
|
}
|
||||||
|
|
247
modules/log/file_test.go
Normal file
247
modules/log/file_test.go
Normal file
|
@ -0,0 +1,247 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestFileLoggerFails(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "TestFileLogger")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
//filename := filepath.Join(tmpDir, "test.log")
|
||||||
|
|
||||||
|
fileLogger := NewFileLogger()
|
||||||
|
//realFileLogger, ok := fileLogger.(*FileLogger)
|
||||||
|
//assert.Equal(t, true, ok)
|
||||||
|
|
||||||
|
// Fail if there is bad json
|
||||||
|
err = fileLogger.Init("{")
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Fail if there is no filename
|
||||||
|
err = fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\"}", prefix, level.String(), flags, ""))
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
// Fail if the file isn't a filename
|
||||||
|
err = fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\"}", prefix, level.String(), flags, filepath.ToSlash(tmpDir)))
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestFileLogger(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "TestFileLogger")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
filename := filepath.Join(tmpDir, "test.log")
|
||||||
|
|
||||||
|
fileLogger := NewFileLogger()
|
||||||
|
realFileLogger, ok := fileLogger.(*FileLogger)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
|
||||||
|
fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\",\"maxsize\":%d,\"compress\":false}", prefix, level.String(), flags, filepath.ToSlash(filename), len(expected)*2))
|
||||||
|
|
||||||
|
assert.Equal(t, flags, realFileLogger.Flags)
|
||||||
|
assert.Equal(t, level, realFileLogger.Level)
|
||||||
|
assert.Equal(t, level, fileLogger.GetLevel())
|
||||||
|
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err := ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
event.level = DEBUG
|
||||||
|
expected = expected + ""
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
event.level = TRACE
|
||||||
|
expected = expected + ""
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
// Should rotate
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename + fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), 1))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
for num := 2; num <= 999; num++ {
|
||||||
|
file, err := os.OpenFile(filename+fmt.Sprintf(".%s.%03d", time.Now().Format("2006-01-02"), num), os.O_RDONLY|os.O_CREATE, 0666)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
err = realFileLogger.DoRotate()
|
||||||
|
assert.Error(t, err)
|
||||||
|
|
||||||
|
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
// Should fail to rotate
|
||||||
|
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
fileLogger.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompressFileLogger(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "TestFileLogger")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
filename := filepath.Join(tmpDir, "test.log")
|
||||||
|
|
||||||
|
fileLogger := NewFileLogger()
|
||||||
|
realFileLogger, ok := fileLogger.(*FileLogger)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
|
||||||
|
fileLogger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"filename\":\"%s\",\"maxsize\":%d,\"compress\":true}", prefix, level.String(), flags, filepath.ToSlash(filename), len(expected)*2))
|
||||||
|
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err := ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
|
// Should rotate
|
||||||
|
fileLogger.LogEvent(&event)
|
||||||
|
fileLogger.Flush()
|
||||||
|
|
||||||
|
for num := 2; num <= 999; num++ {
|
||||||
|
file, err := os.OpenFile(filename+fmt.Sprintf(".%s.%03d.gz", time.Now().Format("2006-01-02"), num), os.O_RDONLY|os.O_CREATE, 0666)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
err = realFileLogger.DoRotate()
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompressOldFile(t *testing.T) {
|
||||||
|
tmpDir, err := ioutil.TempDir("", "TestFileLogger")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
defer os.RemoveAll(tmpDir)
|
||||||
|
fname := filepath.Join(tmpDir, "test")
|
||||||
|
nonGzip := filepath.Join(tmpDir, "test-nonGzip")
|
||||||
|
|
||||||
|
f, err := os.OpenFile(fname, os.O_CREATE|os.O_WRONLY, 0660)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
ng, err := os.OpenFile(nonGzip, os.O_CREATE|os.O_WRONLY, 0660)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
for i := 0; i < 999; i++ {
|
||||||
|
f.WriteString("This is a test file\n")
|
||||||
|
ng.WriteString("This is a test file\n")
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
ng.Close()
|
||||||
|
|
||||||
|
err = compressOldLogFile(fname, -1)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
_, err = os.Lstat(fname + ".gz")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
f, err = os.Open(fname + ".gz")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
zr, err := gzip.NewReader(f)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
data, err := ioutil.ReadAll(zr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
original, err := ioutil.ReadFile(nonGzip)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, original, data)
|
||||||
|
}
|
111
modules/log/level.go
Normal file
111
modules/log/level.go
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Level is the level of the logger
|
||||||
|
type Level int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TRACE represents the lowest log level
|
||||||
|
TRACE Level = iota
|
||||||
|
// DEBUG is for debug logging
|
||||||
|
DEBUG
|
||||||
|
// INFO is for information
|
||||||
|
INFO
|
||||||
|
// WARN is for warning information
|
||||||
|
WARN
|
||||||
|
// ERROR is for error reporting
|
||||||
|
ERROR
|
||||||
|
// CRITICAL is for critical errors
|
||||||
|
CRITICAL
|
||||||
|
// FATAL is for fatal errors
|
||||||
|
FATAL
|
||||||
|
// NONE is for no logging
|
||||||
|
NONE
|
||||||
|
)
|
||||||
|
|
||||||
|
var toString = map[Level]string{
|
||||||
|
TRACE: "trace",
|
||||||
|
DEBUG: "debug",
|
||||||
|
INFO: "info",
|
||||||
|
WARN: "warn",
|
||||||
|
ERROR: "error",
|
||||||
|
CRITICAL: "critical",
|
||||||
|
FATAL: "fatal",
|
||||||
|
NONE: "none",
|
||||||
|
}
|
||||||
|
|
||||||
|
var toLevel = map[string]Level{
|
||||||
|
"trace": TRACE,
|
||||||
|
"debug": DEBUG,
|
||||||
|
"info": INFO,
|
||||||
|
"warn": WARN,
|
||||||
|
"error": ERROR,
|
||||||
|
"critical": CRITICAL,
|
||||||
|
"fatal": FATAL,
|
||||||
|
"none": NONE,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Levels returns all the possible logging levels
|
||||||
|
func Levels() []string {
|
||||||
|
keys := make([]string, 0)
|
||||||
|
for key := range toLevel {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l Level) String() string {
|
||||||
|
s, ok := toString[l]
|
||||||
|
if ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return "info"
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON takes a Level and turns it into text
|
||||||
|
func (l Level) MarshalJSON() ([]byte, error) {
|
||||||
|
buffer := bytes.NewBufferString(`"`)
|
||||||
|
buffer.WriteString(toString[l])
|
||||||
|
buffer.WriteString(`"`)
|
||||||
|
return buffer.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromString takes a level string and returns a Level
|
||||||
|
func FromString(level string) Level {
|
||||||
|
temp, ok := toLevel[strings.ToLower(level)]
|
||||||
|
if !ok {
|
||||||
|
return INFO
|
||||||
|
}
|
||||||
|
return temp
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON takes text and turns it into a Level
|
||||||
|
func (l *Level) UnmarshalJSON(b []byte) error {
|
||||||
|
var tmp interface{}
|
||||||
|
err := json.Unmarshal(b, &tmp)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Err: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v := tmp.(type) {
|
||||||
|
case string:
|
||||||
|
*l = FromString(string(v))
|
||||||
|
case int:
|
||||||
|
*l = FromString(Level(v).String())
|
||||||
|
default:
|
||||||
|
*l = INFO
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
55
modules/log/level_test.go
Normal file
55
modules/log/level_test.go
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
type testLevel struct {
|
||||||
|
Level Level `json:"level"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLevelMarshalUnmarshalJSON(t *testing.T) {
|
||||||
|
levelBytes, err := json.Marshal(testLevel{
|
||||||
|
Level: INFO,
|
||||||
|
})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, string(makeTestLevelBytes(INFO.String())), string(levelBytes))
|
||||||
|
|
||||||
|
var testLevel testLevel
|
||||||
|
err = json.Unmarshal(levelBytes, &testLevel)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
|
err = json.Unmarshal(makeTestLevelBytes(`FOFOO`), &testLevel)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 2)), &testLevel)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(fmt.Sprintf(`{"level":%d}`, 10012)), &testLevel)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(`{"level":{}}`), &testLevel)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, INFO, testLevel.Level)
|
||||||
|
|
||||||
|
assert.Equal(t, INFO.String(), Level(1001).String())
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(`{"level":{}`), &testLevel.Level)
|
||||||
|
assert.Error(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestLevelBytes(level string) []byte {
|
||||||
|
return []byte(fmt.Sprintf(`{"level":"%s"}`, level))
|
||||||
|
}
|
|
@ -8,48 +8,68 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/sync/syncmap"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
loggers []*Logger
|
// DEFAULT is the name of the default logger
|
||||||
|
DEFAULT = "default"
|
||||||
|
// NamedLoggers map of named loggers
|
||||||
|
NamedLoggers = make(map[string]*Logger)
|
||||||
// GitLogger logger for git
|
// GitLogger logger for git
|
||||||
GitLogger *Logger
|
GitLogger *Logger
|
||||||
|
prefix string
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewLogger create a logger
|
// NewLogger create a logger for the default logger
|
||||||
func NewLogger(bufLen int64, mode, config string) {
|
func NewLogger(bufLen int64, name, provider, config string) *Logger {
|
||||||
logger := newLogger(bufLen)
|
err := NewNamedLogger(DEFAULT, bufLen, name, provider, config)
|
||||||
|
if err != nil {
|
||||||
|
CriticalWithSkip(1, "Unable to create default logger: %v", err)
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return NamedLoggers[DEFAULT]
|
||||||
|
}
|
||||||
|
|
||||||
isExist := false
|
// NewNamedLogger creates a new named logger for a given configuration
|
||||||
for i, l := range loggers {
|
func NewNamedLogger(name string, bufLen int64, subname, provider, config string) error {
|
||||||
if l.adapter == mode {
|
logger, ok := NamedLoggers[name]
|
||||||
isExist = true
|
if !ok {
|
||||||
loggers[i] = logger
|
logger = newLogger(name, bufLen)
|
||||||
}
|
|
||||||
|
NamedLoggers[name] = logger
|
||||||
}
|
}
|
||||||
if !isExist {
|
|
||||||
loggers = append(loggers, logger)
|
return logger.SetLogger(subname, provider, config)
|
||||||
}
|
}
|
||||||
if err := logger.SetLogger(mode, config); err != nil {
|
|
||||||
Fatal(2, "Failed to set logger (%s): %v", mode, err)
|
// DelNamedLogger closes and deletes the named logger
|
||||||
|
func DelNamedLogger(name string) {
|
||||||
|
l, ok := NamedLoggers[name]
|
||||||
|
if ok {
|
||||||
|
delete(NamedLoggers, name)
|
||||||
|
l.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DelLogger removes loggers that are for the given mode
|
// DelLogger removes the named sublogger from the default logger
|
||||||
func DelLogger(mode string) error {
|
func DelLogger(name string) error {
|
||||||
for _, l := range loggers {
|
logger := NamedLoggers[DEFAULT]
|
||||||
if _, ok := l.outputs.Load(mode); ok {
|
found, err := logger.DelLogger(name)
|
||||||
return l.DelLogger(mode)
|
if !found {
|
||||||
}
|
Trace("Log %s not found, no need to delete", name)
|
||||||
}
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
Trace("Log adapter %s not found, no need to delete", mode)
|
// GetLogger returns either a named logger or the default logger
|
||||||
return nil
|
func GetLogger(name string) *Logger {
|
||||||
|
logger, ok := NamedLoggers[name]
|
||||||
|
if ok {
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
return NamedLoggers[DEFAULT]
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewGitLogger create a logger for git
|
// NewGitLogger create a logger for git
|
||||||
|
@ -58,333 +78,168 @@ func NewGitLogger(logPath string) {
|
||||||
path := path.Dir(logPath)
|
path := path.Dir(logPath)
|
||||||
|
|
||||||
if err := os.MkdirAll(path, os.ModePerm); err != nil {
|
if err := os.MkdirAll(path, os.ModePerm); err != nil {
|
||||||
Fatal(4, "Failed to create dir %s: %v", path, err)
|
Fatal("Failed to create dir %s: %v", path, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
GitLogger = newLogger(0)
|
GitLogger = newLogger("git", 0)
|
||||||
GitLogger.SetLogger("file", fmt.Sprintf(`{"level":0,"filename":"%s","rotate":false}`, logPath))
|
GitLogger.SetLogger("file", "file", fmt.Sprintf(`{"level":"TRACE","filename":"%s","rotate":false}`, logPath))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the minimum logger level
|
||||||
|
func GetLevel() Level {
|
||||||
|
return NamedLoggers[DEFAULT].GetLevel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStacktraceLevel returns the minimum logger level
|
||||||
|
func GetStacktraceLevel() Level {
|
||||||
|
return NamedLoggers[DEFAULT].GetStacktraceLevel()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trace records trace log
|
// Trace records trace log
|
||||||
func Trace(format string, v ...interface{}) {
|
func Trace(format string, v ...interface{}) {
|
||||||
for _, logger := range loggers {
|
Log(1, TRACE, format, v...)
|
||||||
logger.Trace(format, v...)
|
}
|
||||||
}
|
|
||||||
|
// IsTrace returns true if at least one logger is TRACE
|
||||||
|
func IsTrace() bool {
|
||||||
|
return GetLevel() <= TRACE
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug records debug log
|
// Debug records debug log
|
||||||
func Debug(format string, v ...interface{}) {
|
func Debug(format string, v ...interface{}) {
|
||||||
for _, logger := range loggers {
|
Log(1, DEBUG, format, v...)
|
||||||
logger.Debug(format, v...)
|
}
|
||||||
}
|
|
||||||
|
// IsDebug returns true if at least one logger is DEBUG
|
||||||
|
func IsDebug() bool {
|
||||||
|
return GetLevel() <= DEBUG
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info records info log
|
// Info records info log
|
||||||
func Info(format string, v ...interface{}) {
|
func Info(format string, v ...interface{}) {
|
||||||
for _, logger := range loggers {
|
Log(1, INFO, format, v...)
|
||||||
logger.Info(format, v...)
|
}
|
||||||
}
|
|
||||||
|
// IsInfo returns true if at least one logger is INFO
|
||||||
|
func IsInfo() bool {
|
||||||
|
return GetLevel() <= INFO
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn records warning log
|
// Warn records warning log
|
||||||
func Warn(format string, v ...interface{}) {
|
func Warn(format string, v ...interface{}) {
|
||||||
for _, logger := range loggers {
|
Log(1, WARN, format, v...)
|
||||||
logger.Warn(format, v...)
|
}
|
||||||
}
|
|
||||||
|
// IsWarn returns true if at least one logger is WARN
|
||||||
|
func IsWarn() bool {
|
||||||
|
return GetLevel() <= WARN
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error records error log
|
// Error records error log
|
||||||
func Error(skip int, format string, v ...interface{}) {
|
func Error(format string, v ...interface{}) {
|
||||||
for _, logger := range loggers {
|
Log(1, ERROR, format, v...)
|
||||||
logger.Error(skip, format, v...)
|
}
|
||||||
}
|
|
||||||
|
// ErrorWithSkip records error log from "skip" calls back from this function
|
||||||
|
func ErrorWithSkip(skip int, format string, v ...interface{}) {
|
||||||
|
Log(skip+1, ERROR, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsError returns true if at least one logger is ERROR
|
||||||
|
func IsError() bool {
|
||||||
|
return GetLevel() <= ERROR
|
||||||
}
|
}
|
||||||
|
|
||||||
// Critical records critical log
|
// Critical records critical log
|
||||||
func Critical(skip int, format string, v ...interface{}) {
|
func Critical(format string, v ...interface{}) {
|
||||||
for _, logger := range loggers {
|
Log(1, CRITICAL, format, v...)
|
||||||
logger.Critical(skip, format, v...)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal records error log and exit process
|
// CriticalWithSkip records critical log from "skip" calls back from this function
|
||||||
func Fatal(skip int, format string, v ...interface{}) {
|
func CriticalWithSkip(skip int, format string, v ...interface{}) {
|
||||||
Error(skip, format, v...)
|
Log(skip+1, CRITICAL, format, v...)
|
||||||
for _, l := range loggers {
|
}
|
||||||
l.Close()
|
|
||||||
}
|
// IsCritical returns true if at least one logger is CRITICAL
|
||||||
|
func IsCritical() bool {
|
||||||
|
return GetLevel() <= CRITICAL
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal records fatal log and exit process
|
||||||
|
func Fatal(format string, v ...interface{}) {
|
||||||
|
Log(1, FATAL, format, v...)
|
||||||
|
Close()
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FatalWithSkip records fatal log from "skip" calls back from this function
|
||||||
|
func FatalWithSkip(skip int, format string, v ...interface{}) {
|
||||||
|
Log(skip+1, FATAL, format, v...)
|
||||||
|
Close()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsFatal returns true if at least one logger is FATAL
|
||||||
|
func IsFatal() bool {
|
||||||
|
return GetLevel() <= FATAL
|
||||||
|
}
|
||||||
|
|
||||||
// Close closes all the loggers
|
// Close closes all the loggers
|
||||||
func Close() {
|
func Close() {
|
||||||
for _, l := range loggers {
|
l, ok := NamedLoggers[DEFAULT]
|
||||||
l.Close()
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delete(NamedLoggers, DEFAULT)
|
||||||
|
l.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log a message with defined skip and at logging level
|
||||||
|
// A skip of 0 refers to the caller of this command
|
||||||
|
func Log(skip int, level Level, format string, v ...interface{}) {
|
||||||
|
l, ok := NamedLoggers[DEFAULT]
|
||||||
|
if ok {
|
||||||
|
l.Log(skip+1, level, format, v...)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// .___ __ _____
|
|
||||||
// | | _____/ |_ ____________/ ____\____ ____ ____
|
|
||||||
// | |/ \ __\/ __ \_ __ \ __\\__ \ _/ ___\/ __ \
|
|
||||||
// | | | \ | \ ___/| | \/| | / __ \\ \__\ ___/
|
|
||||||
// |___|___| /__| \___ >__| |__| (____ /\___ >___ >
|
|
||||||
// \/ \/ \/ \/ \/
|
|
||||||
|
|
||||||
// LogLevel level type for log
|
|
||||||
//type LogLevel int
|
|
||||||
|
|
||||||
// log levels
|
|
||||||
const (
|
|
||||||
TRACE = iota
|
|
||||||
DEBUG
|
|
||||||
INFO
|
|
||||||
WARN
|
|
||||||
ERROR
|
|
||||||
CRITICAL
|
|
||||||
FATAL
|
|
||||||
)
|
|
||||||
|
|
||||||
// LoggerInterface represents behaviors of a logger provider.
|
|
||||||
type LoggerInterface interface {
|
|
||||||
Init(config string) error
|
|
||||||
WriteMsg(msg string, skip, level int) error
|
|
||||||
Destroy()
|
|
||||||
Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
type loggerType func() LoggerInterface
|
|
||||||
|
|
||||||
// LoggerAsWriter is a io.Writer shim around the gitea log
|
// LoggerAsWriter is a io.Writer shim around the gitea log
|
||||||
type LoggerAsWriter struct {
|
type LoggerAsWriter struct {
|
||||||
level int
|
ourLoggers []*Logger
|
||||||
|
level Level
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewLoggerAsWriter creates a Writer representation of the logger with setable log level
|
// NewLoggerAsWriter creates a Writer representation of the logger with setable log level
|
||||||
func NewLoggerAsWriter(level string) *LoggerAsWriter {
|
func NewLoggerAsWriter(level string, ourLoggers ...*Logger) *LoggerAsWriter {
|
||||||
l := &LoggerAsWriter{}
|
if len(ourLoggers) == 0 {
|
||||||
switch strings.ToUpper(level) {
|
ourLoggers = []*Logger{NamedLoggers[DEFAULT]}
|
||||||
case "TRACE":
|
}
|
||||||
l.level = TRACE
|
l := &LoggerAsWriter{
|
||||||
case "DEBUG":
|
ourLoggers: ourLoggers,
|
||||||
l.level = DEBUG
|
level: FromString(level),
|
||||||
case "INFO":
|
|
||||||
l.level = INFO
|
|
||||||
case "WARN":
|
|
||||||
l.level = WARN
|
|
||||||
case "ERROR":
|
|
||||||
l.level = ERROR
|
|
||||||
case "CRITICAL":
|
|
||||||
l.level = CRITICAL
|
|
||||||
case "FATAL":
|
|
||||||
l.level = FATAL
|
|
||||||
default:
|
|
||||||
l.level = INFO
|
|
||||||
}
|
}
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write implements the io.Writer interface to allow spoofing of macaron
|
// Write implements the io.Writer interface to allow spoofing of macaron
|
||||||
func (l *LoggerAsWriter) Write(p []byte) (int, error) {
|
func (l *LoggerAsWriter) Write(p []byte) (int, error) {
|
||||||
l.Log(string(p))
|
for _, logger := range l.ourLoggers {
|
||||||
|
// Skip = 3 because this presumes that we have been called by log.Println()
|
||||||
|
// If the caller has used log.Output or the like this will be wrong
|
||||||
|
logger.Log(3, l.level, string(p))
|
||||||
|
}
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log takes a given string and logs it at the set log-level
|
// Log takes a given string and logs it at the set log-level
|
||||||
func (l *LoggerAsWriter) Log(msg string) {
|
func (l *LoggerAsWriter) Log(msg string) {
|
||||||
for _, logger := range loggers {
|
for _, logger := range l.ourLoggers {
|
||||||
logger.writerMsg(0, l.level, msg)
|
// Set the skip to reference the call just above this
|
||||||
|
logger.Log(1, l.level, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var adapters = make(map[string]loggerType)
|
func init() {
|
||||||
|
_, filename, _, _ := runtime.Caller(0)
|
||||||
// Register registers given logger provider to adapters.
|
prefix = strings.TrimSuffix(filename, "modules/log/log.go")
|
||||||
func Register(name string, log loggerType) {
|
|
||||||
if log == nil {
|
|
||||||
panic("log: register provider is nil")
|
|
||||||
}
|
|
||||||
if _, dup := adapters[name]; dup {
|
|
||||||
panic("log: register called twice for provider \"" + name + "\"")
|
|
||||||
}
|
|
||||||
adapters[name] = log
|
|
||||||
}
|
|
||||||
|
|
||||||
type logMsg struct {
|
|
||||||
skip, level int
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Logger is default logger in beego application.
|
|
||||||
// it can contain several providers and log message into all providers.
|
|
||||||
type Logger struct {
|
|
||||||
adapter string
|
|
||||||
level int
|
|
||||||
msg chan *logMsg
|
|
||||||
outputs syncmap.Map
|
|
||||||
quit chan bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// newLogger initializes and returns a new logger.
|
|
||||||
func newLogger(buffer int64) *Logger {
|
|
||||||
l := &Logger{
|
|
||||||
msg: make(chan *logMsg, buffer),
|
|
||||||
quit: make(chan bool),
|
|
||||||
}
|
|
||||||
go l.StartLogger()
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLogger sets new logger instance with given logger adapter and config.
|
|
||||||
func (l *Logger) SetLogger(adapter string, config string) error {
|
|
||||||
if log, ok := adapters[adapter]; ok {
|
|
||||||
lg := log()
|
|
||||||
if err := lg.Init(config); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
l.outputs.Store(adapter, lg)
|
|
||||||
l.adapter = adapter
|
|
||||||
} else {
|
|
||||||
panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DelLogger removes a logger adapter instance.
|
|
||||||
func (l *Logger) DelLogger(adapter string) error {
|
|
||||||
if lg, ok := l.outputs.Load(adapter); ok {
|
|
||||||
lg.(LoggerInterface).Destroy()
|
|
||||||
l.outputs.Delete(adapter)
|
|
||||||
} else {
|
|
||||||
panic("log: unknown adapter \"" + adapter + "\" (forgotten register?)")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *Logger) writerMsg(skip, level int, msg string) error {
|
|
||||||
if l.level > level {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
lm := &logMsg{
|
|
||||||
skip: skip,
|
|
||||||
level: level,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Only error information needs locate position for debugging.
|
|
||||||
if lm.level >= ERROR {
|
|
||||||
pc, file, line, ok := runtime.Caller(skip)
|
|
||||||
if ok {
|
|
||||||
// Get caller function name.
|
|
||||||
fn := runtime.FuncForPC(pc)
|
|
||||||
var fnName string
|
|
||||||
if fn == nil {
|
|
||||||
fnName = "?()"
|
|
||||||
} else {
|
|
||||||
fnName = strings.TrimLeft(filepath.Ext(fn.Name()), ".") + "()"
|
|
||||||
}
|
|
||||||
|
|
||||||
fileName := file
|
|
||||||
if len(fileName) > 20 {
|
|
||||||
fileName = "..." + fileName[len(fileName)-20:]
|
|
||||||
}
|
|
||||||
lm.msg = fmt.Sprintf("[%s:%d %s] %s", fileName, line, fnName, msg)
|
|
||||||
} else {
|
|
||||||
lm.msg = msg
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lm.msg = msg
|
|
||||||
}
|
|
||||||
l.msg <- lm
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartLogger starts logger chan reading.
|
|
||||||
func (l *Logger) StartLogger() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case bm := <-l.msg:
|
|
||||||
l.outputs.Range(func(k, v interface{}) bool {
|
|
||||||
if err := v.(LoggerInterface).WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
|
|
||||||
fmt.Println("ERROR, unable to WriteMsg:", err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
case <-l.quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush flushes all chan data.
|
|
||||||
func (l *Logger) Flush() {
|
|
||||||
l.outputs.Range(func(k, v interface{}) bool {
|
|
||||||
v.(LoggerInterface).Flush()
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes logger, flush all chan data and destroy all adapter instances.
|
|
||||||
func (l *Logger) Close() {
|
|
||||||
l.quit <- true
|
|
||||||
for {
|
|
||||||
if len(l.msg) > 0 {
|
|
||||||
bm := <-l.msg
|
|
||||||
l.outputs.Range(func(k, v interface{}) bool {
|
|
||||||
if err := v.(LoggerInterface).WriteMsg(bm.msg, bm.skip, bm.level); err != nil {
|
|
||||||
fmt.Println("ERROR, unable to WriteMsg:", err)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
l.outputs.Range(func(k, v interface{}) bool {
|
|
||||||
v.(LoggerInterface).Flush()
|
|
||||||
v.(LoggerInterface).Destroy()
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trace records trace log
|
|
||||||
func (l *Logger) Trace(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[T] "+format, v...)
|
|
||||||
l.writerMsg(0, TRACE, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug records debug log
|
|
||||||
func (l *Logger) Debug(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[D] "+format, v...)
|
|
||||||
l.writerMsg(0, DEBUG, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info records information log
|
|
||||||
func (l *Logger) Info(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[I] "+format, v...)
|
|
||||||
l.writerMsg(0, INFO, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn records warning log
|
|
||||||
func (l *Logger) Warn(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[W] "+format, v...)
|
|
||||||
l.writerMsg(0, WARN, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error records error log
|
|
||||||
func (l *Logger) Error(skip int, format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[E] "+format, v...)
|
|
||||||
l.writerMsg(skip, ERROR, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Critical records critical log
|
|
||||||
func (l *Logger) Critical(skip int, format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[C] "+format, v...)
|
|
||||||
l.writerMsg(skip, CRITICAL, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal records error log and exit the process
|
|
||||||
func (l *Logger) Fatal(skip int, format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("[F] "+format, v...)
|
|
||||||
l.writerMsg(skip, FATAL, msg)
|
|
||||||
l.Close()
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
154
modules/log/log_test.go
Normal file
154
modules/log/log_test.go
Normal file
|
@ -0,0 +1,154 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func baseConsoleTest(t *testing.T, logger *Logger) (chan []byte, chan bool) {
|
||||||
|
written := make(chan []byte)
|
||||||
|
closed := make(chan bool)
|
||||||
|
|
||||||
|
c := CallbackWriteCloser{
|
||||||
|
callback: func(p []byte, close bool) {
|
||||||
|
written <- p
|
||||||
|
closed <- close
|
||||||
|
},
|
||||||
|
}
|
||||||
|
m := logger.MultiChannelledLog
|
||||||
|
|
||||||
|
channelledLog := m.GetEventLogger("console")
|
||||||
|
assert.NotEmpty(t, channelledLog)
|
||||||
|
realChanLog, ok := channelledLog.(*ChannelledLog)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
realCL, ok := realChanLog.loggerProvider.(*ConsoleLogger)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
assert.Equal(t, INFO, realCL.Level)
|
||||||
|
realCL.out = c
|
||||||
|
|
||||||
|
format := "test: %s"
|
||||||
|
args := []interface{}{"A"}
|
||||||
|
|
||||||
|
logger.Log(0, INFO, format, args...)
|
||||||
|
line := <-written
|
||||||
|
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
|
||||||
|
assert.Equal(t, false, <-closed)
|
||||||
|
|
||||||
|
format = "test2: %s"
|
||||||
|
logger.Warn(format, args...)
|
||||||
|
line = <-written
|
||||||
|
|
||||||
|
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
|
||||||
|
assert.Equal(t, false, <-closed)
|
||||||
|
|
||||||
|
format = "testerror: %s"
|
||||||
|
logger.Error(format, args...)
|
||||||
|
line = <-written
|
||||||
|
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
|
||||||
|
assert.Equal(t, false, <-closed)
|
||||||
|
return written, closed
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewLoggerUnexported(t *testing.T) {
|
||||||
|
level := INFO
|
||||||
|
logger := newLogger("UNEXPORTED", 0)
|
||||||
|
err := logger.SetLogger("console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
out := logger.MultiChannelledLog.GetEventLogger("console")
|
||||||
|
assert.NotEmpty(t, out)
|
||||||
|
chanlog, ok := out.(*ChannelledLog)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
assert.Equal(t, "console", chanlog.provider)
|
||||||
|
assert.Equal(t, INFO, logger.GetLevel())
|
||||||
|
baseConsoleTest(t, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewLoggger(t *testing.T) {
|
||||||
|
level := INFO
|
||||||
|
logger := NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
|
||||||
|
|
||||||
|
assert.Equal(t, INFO, GetLevel())
|
||||||
|
assert.Equal(t, false, IsTrace())
|
||||||
|
assert.Equal(t, false, IsDebug())
|
||||||
|
assert.Equal(t, true, IsInfo())
|
||||||
|
assert.Equal(t, true, IsWarn())
|
||||||
|
assert.Equal(t, true, IsError())
|
||||||
|
|
||||||
|
written, closed := baseConsoleTest(t, logger)
|
||||||
|
|
||||||
|
format := "test: %s"
|
||||||
|
args := []interface{}{"A"}
|
||||||
|
|
||||||
|
Log(0, INFO, format, args...)
|
||||||
|
line := <-written
|
||||||
|
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
|
||||||
|
assert.Equal(t, false, <-closed)
|
||||||
|
|
||||||
|
Info(format, args...)
|
||||||
|
line = <-written
|
||||||
|
assert.Contains(t, string(line), fmt.Sprintf(format, args...))
|
||||||
|
assert.Equal(t, false, <-closed)
|
||||||
|
|
||||||
|
go DelLogger("console")
|
||||||
|
line = <-written
|
||||||
|
assert.Equal(t, "", string(line))
|
||||||
|
assert.Equal(t, true, <-closed)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewLogggerRecreate(t *testing.T) {
|
||||||
|
level := INFO
|
||||||
|
NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
|
||||||
|
|
||||||
|
assert.Equal(t, INFO, GetLevel())
|
||||||
|
assert.Equal(t, false, IsTrace())
|
||||||
|
assert.Equal(t, false, IsDebug())
|
||||||
|
assert.Equal(t, true, IsInfo())
|
||||||
|
assert.Equal(t, true, IsWarn())
|
||||||
|
assert.Equal(t, true, IsError())
|
||||||
|
|
||||||
|
format := "test: %s"
|
||||||
|
args := []interface{}{"A"}
|
||||||
|
|
||||||
|
Log(0, INFO, format, args...)
|
||||||
|
|
||||||
|
NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
|
||||||
|
|
||||||
|
assert.Equal(t, INFO, GetLevel())
|
||||||
|
assert.Equal(t, false, IsTrace())
|
||||||
|
assert.Equal(t, false, IsDebug())
|
||||||
|
assert.Equal(t, true, IsInfo())
|
||||||
|
assert.Equal(t, true, IsWarn())
|
||||||
|
assert.Equal(t, true, IsError())
|
||||||
|
|
||||||
|
Log(0, INFO, format, args...)
|
||||||
|
|
||||||
|
assert.Panics(t, func() {
|
||||||
|
NewLogger(0, "console", "console", fmt.Sprintf(`{"level":"%s"`, level.String()))
|
||||||
|
})
|
||||||
|
|
||||||
|
go DelLogger("console")
|
||||||
|
|
||||||
|
// We should be able to redelete without a problem
|
||||||
|
go DelLogger("console")
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewNamedLogger(t *testing.T) {
|
||||||
|
level := INFO
|
||||||
|
err := NewNamedLogger("test", 0, "console", "console", fmt.Sprintf(`{"level":"%s"}`, level.String()))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
logger := NamedLoggers["test"]
|
||||||
|
assert.Equal(t, level, logger.GetLevel())
|
||||||
|
|
||||||
|
written, closed := baseConsoleTest(t, logger)
|
||||||
|
go DelNamedLogger("test")
|
||||||
|
line := <-written
|
||||||
|
assert.Equal(t, "", string(line))
|
||||||
|
assert.Equal(t, true, <-closed)
|
||||||
|
}
|
156
modules/log/logger.go
Normal file
156
modules/log/logger.go
Normal file
|
@ -0,0 +1,156 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Logger is default logger in the Gitea application.
|
||||||
|
// it can contain several providers and log message into all providers.
|
||||||
|
type Logger struct {
|
||||||
|
*MultiChannelledLog
|
||||||
|
bufferLength int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// newLogger initializes and returns a new logger.
|
||||||
|
func newLogger(name string, buffer int64) *Logger {
|
||||||
|
l := &Logger{
|
||||||
|
MultiChannelledLog: NewMultiChannelledLog(name, buffer),
|
||||||
|
bufferLength: buffer,
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLogger sets new logger instance with given logger provider and config.
|
||||||
|
func (l *Logger) SetLogger(name, provider, config string) error {
|
||||||
|
eventLogger, err := NewChannelledLog(name, provider, config, l.bufferLength)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to create sublogger (%s): %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.MultiChannelledLog.DelLogger(name)
|
||||||
|
|
||||||
|
err = l.MultiChannelledLog.AddLogger(eventLogger)
|
||||||
|
if err != nil {
|
||||||
|
if IsErrDuplicateName(err) {
|
||||||
|
return fmt.Errorf("Duplicate named sublogger %s %v", name, l.MultiChannelledLog.GetEventLoggerNames())
|
||||||
|
}
|
||||||
|
return fmt.Errorf("Failed to add sublogger (%s): %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DelLogger deletes a sublogger from this logger.
|
||||||
|
func (l *Logger) DelLogger(name string) (bool, error) {
|
||||||
|
return l.MultiChannelledLog.DelLogger(name), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Log msg at the provided level with the provided caller defined by skip (0 being the function that calls this function)
|
||||||
|
func (l *Logger) Log(skip int, level Level, format string, v ...interface{}) error {
|
||||||
|
if l.GetLevel() > level {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
caller := "?()"
|
||||||
|
pc, filename, line, ok := runtime.Caller(skip + 1)
|
||||||
|
if ok {
|
||||||
|
// Get caller function name.
|
||||||
|
fn := runtime.FuncForPC(pc)
|
||||||
|
if fn != nil {
|
||||||
|
caller = fn.Name() + "()"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
msg := format
|
||||||
|
if len(v) > 0 {
|
||||||
|
args := make([]interface{}, len(v))
|
||||||
|
for i := 0; i < len(args); i++ {
|
||||||
|
args[i] = NewColoredValuePointer(&v[i])
|
||||||
|
}
|
||||||
|
msg = fmt.Sprintf(format, args...)
|
||||||
|
}
|
||||||
|
stack := ""
|
||||||
|
if l.GetStacktraceLevel() <= level {
|
||||||
|
stack = Stack(skip + 1)
|
||||||
|
}
|
||||||
|
return l.SendLog(level, caller, strings.TrimPrefix(filename, prefix), line, msg, stack)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendLog sends a log event at the provided level with the information given
|
||||||
|
func (l *Logger) SendLog(level Level, caller, filename string, line int, msg string, stack string) error {
|
||||||
|
if l.GetLevel() > level {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
event := &Event{
|
||||||
|
level: level,
|
||||||
|
caller: caller,
|
||||||
|
filename: filename,
|
||||||
|
line: line,
|
||||||
|
msg: msg,
|
||||||
|
time: time.Now(),
|
||||||
|
stacktrace: stack,
|
||||||
|
}
|
||||||
|
l.LogEvent(event)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace records trace log
|
||||||
|
func (l *Logger) Trace(format string, v ...interface{}) {
|
||||||
|
l.Log(1, TRACE, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug records debug log
|
||||||
|
func (l *Logger) Debug(format string, v ...interface{}) {
|
||||||
|
l.Log(1, DEBUG, format, v...)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info records information log
|
||||||
|
func (l *Logger) Info(format string, v ...interface{}) {
|
||||||
|
l.Log(1, INFO, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn records warning log
|
||||||
|
func (l *Logger) Warn(format string, v ...interface{}) {
|
||||||
|
l.Log(1, WARN, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error records error log
|
||||||
|
func (l *Logger) Error(format string, v ...interface{}) {
|
||||||
|
l.Log(1, ERROR, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorWithSkip records error log from "skip" calls back from this function
|
||||||
|
func (l *Logger) ErrorWithSkip(skip int, format string, v ...interface{}) {
|
||||||
|
l.Log(skip+1, ERROR, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Critical records critical log
|
||||||
|
func (l *Logger) Critical(format string, v ...interface{}) {
|
||||||
|
l.Log(1, CRITICAL, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CriticalWithSkip records critical log from "skip" calls back from this function
|
||||||
|
func (l *Logger) CriticalWithSkip(skip int, format string, v ...interface{}) {
|
||||||
|
l.Log(skip+1, CRITICAL, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal records fatal log and exit the process
|
||||||
|
func (l *Logger) Fatal(format string, v ...interface{}) {
|
||||||
|
l.Log(1, FATAL, format, v...)
|
||||||
|
l.Close()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FatalWithSkip records fatal log from "skip" calls back from this function and exits the process
|
||||||
|
func (l *Logger) FatalWithSkip(skip int, format string, v ...interface{}) {
|
||||||
|
l.Log(skip+1, FATAL, format, v...)
|
||||||
|
l.Close()
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
26
modules/log/provider.go
Normal file
26
modules/log/provider.go
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
// LoggerProvider represents behaviors of a logger provider.
|
||||||
|
type LoggerProvider interface {
|
||||||
|
Init(config string) error
|
||||||
|
EventLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
type loggerProvider func() LoggerProvider
|
||||||
|
|
||||||
|
var providers = make(map[string]loggerProvider)
|
||||||
|
|
||||||
|
// Register registers given logger provider to providers.
|
||||||
|
func Register(name string, log loggerProvider) {
|
||||||
|
if log == nil {
|
||||||
|
panic("log: register provider is nil")
|
||||||
|
}
|
||||||
|
if _, dup := providers[name]; dup {
|
||||||
|
panic("log: register called twice for provider \"" + name + "\"")
|
||||||
|
}
|
||||||
|
providers[name] = log
|
||||||
|
}
|
103
modules/log/router.go
Normal file
103
modules/log/router.go
Normal file
|
@ -0,0 +1,103 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
macaron "gopkg.in/macaron.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
var statusToColor = map[int][]byte{
|
||||||
|
100: ColorBytes(Bold),
|
||||||
|
200: ColorBytes(FgGreen),
|
||||||
|
300: ColorBytes(FgYellow),
|
||||||
|
304: ColorBytes(FgCyan),
|
||||||
|
400: ColorBytes(Bold, FgRed),
|
||||||
|
401: ColorBytes(Bold, FgMagenta),
|
||||||
|
403: ColorBytes(Bold, FgMagenta),
|
||||||
|
500: ColorBytes(Bold, BgRed),
|
||||||
|
}
|
||||||
|
|
||||||
|
func coloredStatus(status int, s ...string) *ColoredValue {
|
||||||
|
color, ok := statusToColor[status]
|
||||||
|
if !ok {
|
||||||
|
color, ok = statusToColor[(status/100)*100]
|
||||||
|
}
|
||||||
|
if !ok {
|
||||||
|
color = fgBoldBytes
|
||||||
|
}
|
||||||
|
if len(s) > 0 {
|
||||||
|
return NewColoredValueBytes(s[0], &color)
|
||||||
|
}
|
||||||
|
return NewColoredValueBytes(status, &color)
|
||||||
|
}
|
||||||
|
|
||||||
|
var methodToColor = map[string][]byte{
|
||||||
|
"GET": ColorBytes(FgBlue),
|
||||||
|
"POST": ColorBytes(FgGreen),
|
||||||
|
"DELETE": ColorBytes(FgRed),
|
||||||
|
"PATCH": ColorBytes(FgCyan),
|
||||||
|
"PUT": ColorBytes(FgYellow, Faint),
|
||||||
|
"HEAD": ColorBytes(FgBlue, Faint),
|
||||||
|
}
|
||||||
|
|
||||||
|
func coloredMethod(method string) *ColoredValue {
|
||||||
|
color, ok := methodToColor[method]
|
||||||
|
if !ok {
|
||||||
|
return NewColoredValueBytes(method, &fgBoldBytes)
|
||||||
|
}
|
||||||
|
return NewColoredValueBytes(method, &color)
|
||||||
|
}
|
||||||
|
|
||||||
|
var durations = []time.Duration{
|
||||||
|
10 * time.Millisecond,
|
||||||
|
100 * time.Millisecond,
|
||||||
|
1 * time.Second,
|
||||||
|
5 * time.Second,
|
||||||
|
10 * time.Second,
|
||||||
|
}
|
||||||
|
|
||||||
|
var durationColors = [][]byte{
|
||||||
|
ColorBytes(FgGreen),
|
||||||
|
ColorBytes(Bold),
|
||||||
|
ColorBytes(FgYellow),
|
||||||
|
ColorBytes(FgRed, Bold),
|
||||||
|
ColorBytes(BgRed),
|
||||||
|
}
|
||||||
|
|
||||||
|
var wayTooLong = ColorBytes(BgMagenta)
|
||||||
|
|
||||||
|
func coloredTime(duration time.Duration) *ColoredValue {
|
||||||
|
for i, k := range durations {
|
||||||
|
if duration < k {
|
||||||
|
return NewColoredValueBytes(duration, &durationColors[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NewColoredValueBytes(duration, &wayTooLong)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetupRouterLogger will setup macaron to routing to the main gitea log
|
||||||
|
func SetupRouterLogger(m *macaron.Macaron, level Level) {
|
||||||
|
if GetLevel() <= level {
|
||||||
|
m.Use(RouterHandler(level))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RouterHandler is a macaron handler that will log the routing to the default gitea log
|
||||||
|
func RouterHandler(level Level) func(ctx *macaron.Context) {
|
||||||
|
return func(ctx *macaron.Context) {
|
||||||
|
start := time.Now()
|
||||||
|
|
||||||
|
GetLogger("router").Log(0, level, "Started %s %s for %s", coloredMethod(ctx.Req.Method), ctx.Req.RequestURI, ctx.RemoteAddr())
|
||||||
|
|
||||||
|
rw := ctx.Resp.(macaron.ResponseWriter)
|
||||||
|
ctx.Next()
|
||||||
|
|
||||||
|
status := rw.Status()
|
||||||
|
GetLogger("router").Log(0, level, "Completed %s %s %v %s in %v", coloredMethod(ctx.Req.Method), ctx.Req.RequestURI, coloredStatus(status), coloredStatus(status, http.StatusText(rw.Status())), coloredTime(time.Since(start)))
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,4 +1,5 @@
|
||||||
// Copyright 2014 The Gogs Authors. All rights reserved.
|
// Copyright 2014 The Gogs Authors. All rights reserved.
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
// Use of this source code is governed by a MIT-style
|
// Use of this source code is governed by a MIT-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
@ -6,29 +7,45 @@ package log
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"net/smtp"
|
"net/smtp"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
subjectPhrase = "Diagnostic message from server"
|
subjectPhrase = "Diagnostic message from server"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server.
|
type smtpWriter struct {
|
||||||
type SMTPWriter struct {
|
owner *SMTPLogger
|
||||||
Username string `json:"Username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
Host string `json:"Host"`
|
|
||||||
Subject string `json:"subject"`
|
|
||||||
RecipientAddresses []string `json:"sendTos"`
|
|
||||||
Level int `json:"level"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSMTPWriter creates smtp writer.
|
// Write sends the message as an email
|
||||||
func NewSMTPWriter() LoggerInterface {
|
func (s *smtpWriter) Write(p []byte) (int, error) {
|
||||||
return &SMTPWriter{Level: TRACE}
|
return s.owner.sendMail(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close does nothing
|
||||||
|
func (s *smtpWriter) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SMTPLogger implements LoggerProvider and is used to send emails via given SMTP-server.
|
||||||
|
type SMTPLogger struct {
|
||||||
|
BaseLogger
|
||||||
|
Username string `json:"Username"`
|
||||||
|
Password string `json:"password"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
Subject string `json:"subject"`
|
||||||
|
RecipientAddresses []string `json:"sendTos"`
|
||||||
|
sendMailFn func(string, smtp.Auth, string, []string, []byte) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSMTPLogger creates smtp writer.
|
||||||
|
func NewSMTPLogger() LoggerProvider {
|
||||||
|
s := &SMTPLogger{}
|
||||||
|
s.Level = TRACE
|
||||||
|
s.sendMailFn = smtp.SendMail
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init smtp writer with json config.
|
// Init smtp writer with json config.
|
||||||
|
@ -41,49 +58,54 @@ func NewSMTPWriter() LoggerInterface {
|
||||||
// "sendTos":["email1","email2"],
|
// "sendTos":["email1","email2"],
|
||||||
// "level":LevelError
|
// "level":LevelError
|
||||||
// }
|
// }
|
||||||
func (sw *SMTPWriter) Init(jsonconfig string) error {
|
func (log *SMTPLogger) Init(jsonconfig string) error {
|
||||||
return json.Unmarshal([]byte(jsonconfig), sw)
|
err := json.Unmarshal([]byte(jsonconfig), log)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
log.createLogger(&smtpWriter{
|
||||||
|
owner: log,
|
||||||
|
})
|
||||||
|
log.sendMailFn = smtp.SendMail
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteMsg writes message in smtp writer.
|
// WriteMsg writes message in smtp writer.
|
||||||
// it will send an email with subject and only this message.
|
// it will send an email with subject and only this message.
|
||||||
func (sw *SMTPWriter) WriteMsg(msg string, skip, level int) error {
|
func (log *SMTPLogger) sendMail(p []byte) (int, error) {
|
||||||
if level < sw.Level {
|
hp := strings.Split(log.Host, ":")
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
hp := strings.Split(sw.Host, ":")
|
|
||||||
|
|
||||||
// Set up authentication information.
|
// Set up authentication information.
|
||||||
auth := smtp.PlainAuth(
|
auth := smtp.PlainAuth(
|
||||||
"",
|
"",
|
||||||
sw.Username,
|
log.Username,
|
||||||
sw.Password,
|
log.Password,
|
||||||
hp[0],
|
hp[0],
|
||||||
)
|
)
|
||||||
// Connect to the server, authenticate, set the sender and recipient,
|
// Connect to the server, authenticate, set the sender and recipient,
|
||||||
// and send the email all in one step.
|
// and send the email all in one step.
|
||||||
contentType := "Content-Type: text/plain" + "; charset=UTF-8"
|
contentType := "Content-Type: text/plain" + "; charset=UTF-8"
|
||||||
mailmsg := []byte("To: " + strings.Join(sw.RecipientAddresses, ";") + "\r\nFrom: " + sw.Username + "<" + sw.Username +
|
mailmsg := []byte("To: " + strings.Join(log.RecipientAddresses, ";") + "\r\nFrom: " + log.Username + "<" + log.Username +
|
||||||
">\r\nSubject: " + sw.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", time.Now().Format("2006-01-02 15:04:05")) + msg)
|
">\r\nSubject: " + log.Subject + "\r\n" + contentType + "\r\n\r\n")
|
||||||
|
mailmsg = append(mailmsg, p...)
|
||||||
return smtp.SendMail(
|
return len(p), log.sendMailFn(
|
||||||
sw.Host,
|
log.Host,
|
||||||
auth,
|
auth,
|
||||||
sw.Username,
|
log.Username,
|
||||||
sw.RecipientAddresses,
|
log.RecipientAddresses,
|
||||||
mailmsg,
|
mailmsg,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush when log should be flushed
|
// Flush when log should be flushed
|
||||||
func (sw *SMTPWriter) Flush() {
|
func (log *SMTPLogger) Flush() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Destroy when writer is destroy
|
// GetName returns the default name for this implementation
|
||||||
func (sw *SMTPWriter) Destroy() {
|
func (log *SMTPLogger) GetName() string {
|
||||||
|
return "smtp"
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
Register("smtp", NewSMTPWriter)
|
Register("smtp", NewSMTPLogger)
|
||||||
}
|
}
|
||||||
|
|
86
modules/log/smtp_test.go
Normal file
86
modules/log/smtp_test.go
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/smtp"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSMTPLogger(t *testing.T) {
|
||||||
|
prefix := "TestPrefix "
|
||||||
|
level := INFO
|
||||||
|
flags := LstdFlags | LUTC | Lfuncname
|
||||||
|
username := "testuser"
|
||||||
|
password := "testpassword"
|
||||||
|
host := "testhost"
|
||||||
|
subject := "testsubject"
|
||||||
|
sendTos := []string{"testto1", "testto2"}
|
||||||
|
|
||||||
|
logger := NewSMTPLogger()
|
||||||
|
smtpLogger, ok := logger.(*SMTPLogger)
|
||||||
|
assert.Equal(t, true, ok)
|
||||||
|
|
||||||
|
err := logger.Init(fmt.Sprintf("{\"prefix\":\"%s\",\"level\":\"%s\",\"flags\":%d,\"username\":\"%s\",\"password\":\"%s\",\"host\":\"%s\",\"subject\":\"%s\",\"sendTos\":[\"%s\",\"%s\"]}", prefix, level.String(), flags, username, password, host, subject, sendTos[0], sendTos[1]))
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
assert.Equal(t, flags, smtpLogger.Flags)
|
||||||
|
assert.Equal(t, level, smtpLogger.Level)
|
||||||
|
assert.Equal(t, level, logger.GetLevel())
|
||||||
|
|
||||||
|
location, _ := time.LoadLocation("EST")
|
||||||
|
|
||||||
|
date := time.Date(2019, time.January, 13, 22, 3, 30, 15, location)
|
||||||
|
|
||||||
|
dateString := date.UTC().Format("2006/01/02 15:04:05")
|
||||||
|
|
||||||
|
event := Event{
|
||||||
|
level: INFO,
|
||||||
|
msg: "TEST MSG",
|
||||||
|
caller: "CALLER",
|
||||||
|
filename: "FULL/FILENAME",
|
||||||
|
line: 1,
|
||||||
|
time: date,
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
|
||||||
|
var envToHost string
|
||||||
|
var envFrom string
|
||||||
|
var envTo []string
|
||||||
|
var envMsg []byte
|
||||||
|
smtpLogger.sendMailFn = func(addr string, a smtp.Auth, from string, to []string, msg []byte) error {
|
||||||
|
envToHost = addr
|
||||||
|
envFrom = from
|
||||||
|
envTo = to
|
||||||
|
envMsg = msg
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = logger.LogEvent(&event)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, host, envToHost)
|
||||||
|
assert.Equal(t, username, envFrom)
|
||||||
|
assert.Equal(t, sendTos, envTo)
|
||||||
|
assert.Contains(t, string(envMsg), expected)
|
||||||
|
|
||||||
|
logger.Flush()
|
||||||
|
|
||||||
|
event.level = WARN
|
||||||
|
expected = fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
|
err = logger.LogEvent(&event)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Equal(t, host, envToHost)
|
||||||
|
assert.Equal(t, username, envFrom)
|
||||||
|
assert.Equal(t, sendTos, envTo)
|
||||||
|
assert.Contains(t, string(envMsg), expected)
|
||||||
|
|
||||||
|
logger.Close()
|
||||||
|
}
|
83
modules/log/stack.go
Normal file
83
modules/log/stack.go
Normal file
|
@ -0,0 +1,83 @@
|
||||||
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a MIT-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package log
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
unknown = []byte("???")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Stack will skip back the provided number of frames and return a stack trace with source code.
|
||||||
|
// Although we could just use debug.Stack(), this routine will return the source code and
|
||||||
|
// skip back the provided number of frames - i.e. allowing us to ignore preceding function calls.
|
||||||
|
// A skip of 0 returns the stack trace for the calling function, not including this call.
|
||||||
|
// If the problem is a lack of memory of course all this is not going to work...
|
||||||
|
func Stack(skip int) string {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
|
||||||
|
// Store the last file we opened as its probable that the preceding stack frame
|
||||||
|
// will be in the same file
|
||||||
|
var lines [][]byte
|
||||||
|
var lastFilename string
|
||||||
|
for i := skip + 1; ; i++ { // Skip over frames
|
||||||
|
programCounter, filename, lineNumber, ok := runtime.Caller(i)
|
||||||
|
// If we can't retrieve the information break - basically we're into go internals at this point.
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print equivalent of debug.Stack()
|
||||||
|
fmt.Fprintf(buf, "%s:%d (0x%x)\n", filename, lineNumber, programCounter)
|
||||||
|
// Now try to print the offending line
|
||||||
|
if filename != lastFilename {
|
||||||
|
data, err := ioutil.ReadFile(filename)
|
||||||
|
if err != nil {
|
||||||
|
// can't read this sourcefile
|
||||||
|
// likely we don't have the sourcecode available
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lines = bytes.Split(data, []byte{'\n'})
|
||||||
|
lastFilename = filename
|
||||||
|
}
|
||||||
|
fmt.Fprintf(buf, "\t%s: %s\n", functionName(programCounter), source(lines, lineNumber))
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// functionName converts the provided programCounter into a function name
|
||||||
|
func functionName(programCounter uintptr) []byte {
|
||||||
|
function := runtime.FuncForPC(programCounter)
|
||||||
|
if function == nil {
|
||||||
|
return unknown
|
||||||
|
}
|
||||||
|
name := []byte(function.Name())
|
||||||
|
|
||||||
|
// Because we provide the filename we can drop the preceding package name.
|
||||||
|
if lastslash := bytes.LastIndex(name, []byte("/")); lastslash >= 0 {
|
||||||
|
name = name[lastslash+1:]
|
||||||
|
}
|
||||||
|
// And the current package name.
|
||||||
|
if period := bytes.Index(name, []byte(".")); period >= 0 {
|
||||||
|
name = name[period+1:]
|
||||||
|
}
|
||||||
|
// And we should just replace the interpunct with a dot
|
||||||
|
name = bytes.Replace(name, []byte("·"), []byte("."), -1)
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
// source returns a space-trimmed slice of the n'th line.
|
||||||
|
func source(lines [][]byte, n int) []byte {
|
||||||
|
n-- // in stack trace, lines are 1-indexed but our array is 0-indexed
|
||||||
|
if n < 0 || n >= len(lines) {
|
||||||
|
return unknown
|
||||||
|
}
|
||||||
|
return bytes.TrimSpace(lines[n])
|
||||||
|
}
|
|
@ -12,7 +12,6 @@ import (
|
||||||
|
|
||||||
// XORMLogBridge a logger bridge from Logger to xorm
|
// XORMLogBridge a logger bridge from Logger to xorm
|
||||||
type XORMLogBridge struct {
|
type XORMLogBridge struct {
|
||||||
loggers []*Logger
|
|
||||||
showSQL bool
|
showSQL bool
|
||||||
level core.LogLevel
|
level core.LogLevel
|
||||||
}
|
}
|
||||||
|
@ -22,110 +21,80 @@ var (
|
||||||
XORMLogger *XORMLogBridge
|
XORMLogger *XORMLogBridge
|
||||||
)
|
)
|
||||||
|
|
||||||
// DiscardXORMLogger inits a blank logger for xorm
|
// InitXORMLogger inits a log bridge for xorm
|
||||||
func DiscardXORMLogger() {
|
func InitXORMLogger(showSQL bool) {
|
||||||
XORMLogger = &XORMLogBridge{
|
XORMLogger = &XORMLogBridge{
|
||||||
showSQL: false,
|
showSQL: showSQL,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewXORMLogger generate logger for xorm FIXME: configable
|
// GetGiteaLevel returns the minimum Gitea logger level
|
||||||
func NewXORMLogger(bufferlen int64, mode, config string) {
|
func (l *XORMLogBridge) GetGiteaLevel() Level {
|
||||||
logger := newLogger(bufferlen)
|
return GetLogger("xorm").GetLevel()
|
||||||
logger.SetLogger(mode, config)
|
|
||||||
if XORMLogger == nil {
|
|
||||||
XORMLogger = &XORMLogBridge{
|
|
||||||
showSQL: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
XORMLogger.loggers = append(XORMLogger.loggers, logger)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l *XORMLogBridge) writerMsg(skip, level int, msg string) error {
|
// Log a message with defined skip and at logging level
|
||||||
for _, logger := range l.loggers {
|
func (l *XORMLogBridge) Log(skip int, level Level, format string, v ...interface{}) error {
|
||||||
if err := logger.writerMsg(skip, level, msg); err != nil {
|
return GetLogger("xorm").Log(skip+1, level, format, v...)
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debug show debug log
|
// Debug show debug log
|
||||||
func (l *XORMLogBridge) Debug(v ...interface{}) {
|
func (l *XORMLogBridge) Debug(v ...interface{}) {
|
||||||
if l.level <= core.LOG_DEBUG {
|
l.Log(2, DEBUG, fmt.Sprint(v...))
|
||||||
msg := fmt.Sprint(v...)
|
|
||||||
l.writerMsg(0, DEBUG, "[D]"+msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugf show debug log
|
// Debugf show debug log
|
||||||
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
|
||||||
if l.level <= core.LOG_DEBUG {
|
l.Log(2, DEBUG, format, v...)
|
||||||
for _, logger := range l.loggers {
|
|
||||||
logger.Debug(format, v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error show error log
|
// Error show error log
|
||||||
func (l *XORMLogBridge) Error(v ...interface{}) {
|
func (l *XORMLogBridge) Error(v ...interface{}) {
|
||||||
if l.level <= core.LOG_ERR {
|
l.Log(2, ERROR, fmt.Sprint(v...))
|
||||||
msg := fmt.Sprint(v...)
|
|
||||||
l.writerMsg(0, ERROR, "[E]"+msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf show error log
|
// Errorf show error log
|
||||||
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
|
||||||
if l.level <= core.LOG_ERR {
|
l.Log(2, ERROR, format, v...)
|
||||||
for _, logger := range l.loggers {
|
|
||||||
logger.Error(0, format, v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info show information level log
|
// Info show information level log
|
||||||
func (l *XORMLogBridge) Info(v ...interface{}) {
|
func (l *XORMLogBridge) Info(v ...interface{}) {
|
||||||
if l.level <= core.LOG_INFO {
|
l.Log(2, INFO, fmt.Sprint(v...))
|
||||||
msg := fmt.Sprint(v...)
|
|
||||||
l.writerMsg(0, INFO, "[I]"+msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof show information level log
|
// Infof show information level log
|
||||||
func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
|
||||||
if l.level <= core.LOG_INFO {
|
l.Log(2, INFO, format, v...)
|
||||||
for _, logger := range l.loggers {
|
|
||||||
logger.Info(format, v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn show warning log
|
// Warn show warning log
|
||||||
func (l *XORMLogBridge) Warn(v ...interface{}) {
|
func (l *XORMLogBridge) Warn(v ...interface{}) {
|
||||||
if l.level <= core.LOG_WARNING {
|
l.Log(2, WARN, fmt.Sprint(v...))
|
||||||
msg := fmt.Sprint(v...)
|
|
||||||
l.writerMsg(0, WARN, "[W] "+msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnf show warnning log
|
// Warnf show warnning log
|
||||||
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
|
||||||
if l.level <= core.LOG_WARNING {
|
l.Log(2, WARN, format, v...)
|
||||||
for _, logger := range l.loggers {
|
|
||||||
logger.Warn(format, v...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Level get logger level
|
// Level get logger level
|
||||||
func (l *XORMLogBridge) Level() core.LogLevel {
|
func (l *XORMLogBridge) Level() core.LogLevel {
|
||||||
return l.level
|
switch l.GetGiteaLevel() {
|
||||||
|
case TRACE, DEBUG:
|
||||||
|
return core.LOG_DEBUG
|
||||||
|
case INFO:
|
||||||
|
return core.LOG_INFO
|
||||||
|
case WARN:
|
||||||
|
return core.LOG_WARNING
|
||||||
|
case ERROR, CRITICAL:
|
||||||
|
return core.LOG_ERR
|
||||||
|
}
|
||||||
|
return core.LOG_OFF
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLevel set logger level
|
// SetLevel set the logger level
|
||||||
func (l *XORMLogBridge) SetLevel(level core.LogLevel) {
|
func (l *XORMLogBridge) SetLevel(lvl core.LogLevel) {
|
||||||
l.level = level
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ShowSQL set if record SQL
|
// ShowSQL set if record SQL
|
||||||
|
|
|
@ -258,7 +258,7 @@ func processMailQueue() {
|
||||||
case msg := <-mailQueue:
|
case msg := <-mailQueue:
|
||||||
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info)
|
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info)
|
||||||
if err := gomail.Send(Sender, msg.Message); err != nil {
|
if err := gomail.Send(Sender, msg.Message); err != nil {
|
||||||
log.Error(3, "Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err)
|
log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err)
|
||||||
} else {
|
} else {
|
||||||
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info)
|
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info)
|
||||||
}
|
}
|
||||||
|
|
8
modules/markup/external/external.go
vendored
8
modules/markup/external/external.go
vendored
|
@ -67,7 +67,7 @@ func (p *Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]stri
|
||||||
// write to temp file
|
// write to temp file
|
||||||
f, err := ioutil.TempFile("", "gitea_input")
|
f, err := ioutil.TempFile("", "gitea_input")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "%s create temp file when rendering %s failed: %v", p.Name(), p.Command, err)
|
log.Error("%s create temp file when rendering %s failed: %v", p.Name(), p.Command, err)
|
||||||
return []byte("")
|
return []byte("")
|
||||||
}
|
}
|
||||||
defer os.Remove(f.Name())
|
defer os.Remove(f.Name())
|
||||||
|
@ -75,13 +75,13 @@ func (p *Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]stri
|
||||||
_, err = io.Copy(f, rd)
|
_, err = io.Copy(f, rd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.Close()
|
f.Close()
|
||||||
log.Error(4, "%s write data to temp file when rendering %s failed: %v", p.Name(), p.Command, err)
|
log.Error("%s write data to temp file when rendering %s failed: %v", p.Name(), p.Command, err)
|
||||||
return []byte("")
|
return []byte("")
|
||||||
}
|
}
|
||||||
|
|
||||||
err = f.Close()
|
err = f.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "%s close temp file when rendering %s failed: %v", p.Name(), p.Command, err)
|
log.Error("%s close temp file when rendering %s failed: %v", p.Name(), p.Command, err)
|
||||||
return []byte("")
|
return []byte("")
|
||||||
}
|
}
|
||||||
args = append(args, f.Name())
|
args = append(args, f.Name())
|
||||||
|
@ -98,7 +98,7 @@ func (p *Parser) Render(rawBytes []byte, urlPrefix string, metas map[string]stri
|
||||||
}
|
}
|
||||||
cmd.Stdout = buf
|
cmd.Stdout = buf
|
||||||
if err := cmd.Run(); err != nil {
|
if err := cmd.Run(); err != nil {
|
||||||
log.Error(4, "%s render run command %s %v failed: %v", p.Name(), commands[0], args, err)
|
log.Error("%s render run command %s %v failed: %v", p.Name(), commands[0], args, err)
|
||||||
return []byte("")
|
return []byte("")
|
||||||
}
|
}
|
||||||
return buf.Bytes()
|
return buf.Bytes()
|
||||||
|
|
|
@ -74,7 +74,7 @@ func render(parser Parser, rawBytes []byte, urlPrefix string, metas map[string]s
|
||||||
// TODO: one day the error should be returned.
|
// TODO: one day the error should be returned.
|
||||||
result, err := PostProcess(result, urlPrefix, metas, isWiki)
|
result, err := PostProcess(result, urlPrefix, metas, isWiki)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "PostProcess: %v", err)
|
log.Error("PostProcess: %v", err)
|
||||||
}
|
}
|
||||||
return SanitizeBytes(result)
|
return SanitizeBytes(result)
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,7 +35,7 @@ func (Parser) Extensions() []string {
|
||||||
func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) (result []byte) {
|
func Render(rawBytes []byte, urlPrefix string, metas map[string]string, isWiki bool) (result []byte) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := recover(); err != nil {
|
if err := recover(); err != nil {
|
||||||
log.Error(4, "Panic in orgmode.Render: %v Just returning the rawBytes", err)
|
log.Error("Panic in orgmode.Render: %v Just returning the rawBytes", err)
|
||||||
result = rawBytes
|
result = rawBytes
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -29,7 +29,7 @@ func (r *indexerNotifier) NotifyCreateIssueComment(doer *models.User, repo *mode
|
||||||
if comment.Type == models.CommentTypeComment {
|
if comment.Type == models.CommentTypeComment {
|
||||||
if issue.Comments == nil {
|
if issue.Comments == nil {
|
||||||
if err := issue.LoadDiscussComments(); err != nil {
|
if err := issue.LoadDiscussComments(); err != nil {
|
||||||
log.Error(4, "LoadComments failed: %v", err)
|
log.Error("LoadComments failed: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -63,7 +63,7 @@ func (r *indexerNotifier) NotifyUpdateComment(doer *models.User, c *models.Comme
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
if err := c.Issue.LoadDiscussComments(); err != nil {
|
if err := c.Issue.LoadDiscussComments(); err != nil {
|
||||||
log.Error(4, "LoadComments failed: %v", err)
|
log.Error("LoadComments failed: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ func (r *indexerNotifier) NotifyDeleteComment(doer *models.User, comment *models
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
if err := comment.Issue.LoadDiscussComments(); err != nil {
|
if err := comment.Issue.LoadDiscussComments(); err != nil {
|
||||||
log.Error(4, "LoadComments failed: %v", err)
|
log.Error("LoadComments failed: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,25 +37,25 @@ func (m *mailNotifier) NotifyCreateIssueComment(doer *models.User, repo *models.
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := comment.MailParticipants(act, issue); err != nil {
|
if err := comment.MailParticipants(act, issue); err != nil {
|
||||||
log.Error(4, "MailParticipants: %v", err)
|
log.Error("MailParticipants: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mailNotifier) NotifyNewIssue(issue *models.Issue) {
|
func (m *mailNotifier) NotifyNewIssue(issue *models.Issue) {
|
||||||
if err := issue.MailParticipants(); err != nil {
|
if err := issue.MailParticipants(); err != nil {
|
||||||
log.Error(4, "MailParticipants: %v", err)
|
log.Error("MailParticipants: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mailNotifier) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, isClosed bool) {
|
func (m *mailNotifier) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, isClosed bool) {
|
||||||
if err := issue.MailParticipants(); err != nil {
|
if err := issue.MailParticipants(); err != nil {
|
||||||
log.Error(4, "MailParticipants: %v", err)
|
log.Error("MailParticipants: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *mailNotifier) NotifyNewPullRequest(pr *models.PullRequest) {
|
func (m *mailNotifier) NotifyNewPullRequest(pr *models.PullRequest) {
|
||||||
if err := pr.Issue.MailParticipants(); err != nil {
|
if err := pr.Issue.MailParticipants(); err != nil {
|
||||||
log.Error(4, "MailParticipants: %v", err)
|
log.Error("MailParticipants: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,6 +69,6 @@ func (m *mailNotifier) NotifyPullRequestReview(pr *models.PullRequest, r *models
|
||||||
act = models.ActionCommentIssue
|
act = models.ActionCommentIssue
|
||||||
}
|
}
|
||||||
if err := comment.MailParticipants(act, pr.Issue); err != nil {
|
if err := comment.MailParticipants(act, pr.Issue); err != nil {
|
||||||
log.Error(4, "MailParticipants: %v", err)
|
log.Error("MailParticipants: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,7 +39,7 @@ func (ns *notificationService) Run() {
|
||||||
select {
|
select {
|
||||||
case opts := <-ns.issueQueue:
|
case opts := <-ns.issueQueue:
|
||||||
if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil {
|
if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil {
|
||||||
log.Error(4, "Was unable to create issue notification: %v", err)
|
log.Error("Was unable to create issue notification: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,12 +17,12 @@ import (
|
||||||
func DumpMemProfileForUsername(pprofDataPath, username string) {
|
func DumpMemProfileForUsername(pprofDataPath, username string) {
|
||||||
f, err := ioutil.TempFile(pprofDataPath, fmt.Sprintf("memprofile_%s_", username))
|
f, err := ioutil.TempFile(pprofDataPath, fmt.Sprintf("memprofile_%s_", username))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.GitLogger.Fatal(4, "Could not create memory profile: %v", err)
|
log.GitLogger.Fatal("Could not create memory profile: %v", err)
|
||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
runtime.GC() // get up-to-date statistics
|
runtime.GC() // get up-to-date statistics
|
||||||
if err := pprof.WriteHeapProfile(f); err != nil {
|
if err := pprof.WriteHeapProfile(f); err != nil {
|
||||||
log.GitLogger.Fatal(4, "Could not write memory profile: %v", err)
|
log.GitLogger.Fatal("Could not write memory profile: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ func DumpMemProfileForUsername(pprofDataPath, username string) {
|
||||||
func DumpCPUProfileForUsername(pprofDataPath, username string) func() {
|
func DumpCPUProfileForUsername(pprofDataPath, username string) func() {
|
||||||
f, err := ioutil.TempFile(pprofDataPath, fmt.Sprintf("cpuprofile_%s_", username))
|
f, err := ioutil.TempFile(pprofDataPath, fmt.Sprintf("cpuprofile_%s_", username))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.GitLogger.Fatal(4, "Could not create cpu profile: %v", err)
|
log.GitLogger.Fatal("Could not create cpu profile: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pprof.StartCPUProfile(f)
|
pprof.StartCPUProfile(f)
|
||||||
|
|
|
@ -35,7 +35,7 @@ func newCacheService() {
|
||||||
case "redis", "memcache":
|
case "redis", "memcache":
|
||||||
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
|
CacheService.Conn = strings.Trim(sec.Key("HOST").String(), "\" ")
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Unknown cache adapter: %s", CacheService.Adapter)
|
log.Fatal("Unknown cache adapter: %s", CacheService.Adapter)
|
||||||
}
|
}
|
||||||
CacheService.TTL = sec.Key("ITEM_TTL").MustDuration(16 * time.Hour)
|
CacheService.TTL = sec.Key("ITEM_TTL").MustDuration(16 * time.Hour)
|
||||||
|
|
||||||
|
|
|
@ -119,6 +119,6 @@ var (
|
||||||
|
|
||||||
func newCron() {
|
func newCron() {
|
||||||
if err := Cfg.Section("cron").MapTo(&Cron); err != nil {
|
if err := Cfg.Section("cron").MapTo(&Cron); err != nil {
|
||||||
log.Fatal(4, "Failed to map Cron settings: %v", err)
|
log.Fatal("Failed to map Cron settings: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,13 +56,13 @@ var (
|
||||||
|
|
||||||
func newGit() {
|
func newGit() {
|
||||||
if err := Cfg.Section("git").MapTo(&Git); err != nil {
|
if err := Cfg.Section("git").MapTo(&Git); err != nil {
|
||||||
log.Fatal(4, "Failed to map Git settings: %v", err)
|
log.Fatal("Failed to map Git settings: %v", err)
|
||||||
}
|
}
|
||||||
git.DefaultCommandExecutionTimeout = time.Duration(Git.Timeout.Default) * time.Second
|
git.DefaultCommandExecutionTimeout = time.Duration(Git.Timeout.Default) * time.Second
|
||||||
|
|
||||||
binVersion, err := git.BinVersion()
|
binVersion, err := git.BinVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Error retrieving git version: %v", err)
|
log.Fatal("Error retrieving git version: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if version.Compare(binVersion, "2.9", ">=") {
|
if version.Compare(binVersion, "2.9", ">=") {
|
||||||
|
|
|
@ -5,40 +5,238 @@
|
||||||
package setting
|
package setting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"encoding/json"
|
||||||
|
golog "log"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"code.gitea.io/gitea/modules/log"
|
"code.gitea.io/gitea/modules/log"
|
||||||
"github.com/go-xorm/core"
|
|
||||||
|
ini "gopkg.in/ini.v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var logLevels = map[string]string{
|
type defaultLogOptions struct {
|
||||||
"Trace": "0",
|
levelName string // LogLevel
|
||||||
"Debug": "1",
|
flags string
|
||||||
"Info": "2",
|
filename string //path.Join(LogRootPath, "gitea.log")
|
||||||
"Warn": "3",
|
bufferLength int64
|
||||||
"Error": "4",
|
disableConsole bool
|
||||||
"Critical": "5",
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLogLevel(section string, key string, defaultValue string) string {
|
func newDefaultLogOptions() defaultLogOptions {
|
||||||
validLevels := []string{"Trace", "Debug", "Info", "Warn", "Error", "Critical"}
|
return defaultLogOptions{
|
||||||
return Cfg.Section(section).Key(key).In(defaultValue, validLevels)
|
levelName: LogLevel,
|
||||||
|
flags: "stdflags",
|
||||||
|
filename: filepath.Join(LogRootPath, "gitea.log"),
|
||||||
|
bufferLength: 10000,
|
||||||
|
disableConsole: false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubLogDescription describes a sublogger
|
||||||
|
type SubLogDescription struct {
|
||||||
|
Name string
|
||||||
|
Provider string
|
||||||
|
Config string
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogDescription describes a named logger
|
||||||
|
type LogDescription struct {
|
||||||
|
Name string
|
||||||
|
SubLogDescriptions []SubLogDescription
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLogLevel(section *ini.Section, key string, defaultValue string) string {
|
||||||
|
value := section.Key(key).MustString("info")
|
||||||
|
return log.FromString(value).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStacktraceLogLevel(section *ini.Section, key string, defaultValue string) string {
|
||||||
|
value := section.Key(key).MustString("none")
|
||||||
|
return log.FromString(value).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateLogConfig(sec *ini.Section, name string, defaults defaultLogOptions) (mode, jsonConfig, levelName string) {
|
||||||
|
levelName = getLogLevel(sec, "LEVEL", LogLevel)
|
||||||
|
level := log.FromString(levelName)
|
||||||
|
stacktraceLevelName := getStacktraceLogLevel(sec, "STACKTRACE_LEVEL", StacktraceLogLevel)
|
||||||
|
stacktraceLevel := log.FromString(stacktraceLevelName)
|
||||||
|
mode = name
|
||||||
|
keys := sec.Keys()
|
||||||
|
logPath := defaults.filename
|
||||||
|
flags := log.FlagsFromString(defaults.flags)
|
||||||
|
expression := ""
|
||||||
|
prefix := ""
|
||||||
|
for _, key := range keys {
|
||||||
|
switch key.Name() {
|
||||||
|
case "MODE":
|
||||||
|
mode = key.MustString(name)
|
||||||
|
case "FILE_NAME":
|
||||||
|
logPath = key.MustString(defaults.filename)
|
||||||
|
forcePathSeparator(logPath)
|
||||||
|
if !filepath.IsAbs(logPath) {
|
||||||
|
logPath = path.Join(LogRootPath, logPath)
|
||||||
|
}
|
||||||
|
case "FLAGS":
|
||||||
|
flags = log.FlagsFromString(key.MustString(defaults.flags))
|
||||||
|
case "EXPRESSION":
|
||||||
|
expression = key.MustString("")
|
||||||
|
case "PREFIX":
|
||||||
|
prefix = key.MustString("")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
logConfig := map[string]interface{}{
|
||||||
|
"level": level.String(),
|
||||||
|
"expression": expression,
|
||||||
|
"prefix": prefix,
|
||||||
|
"flags": flags,
|
||||||
|
"stacktraceLevel": stacktraceLevel.String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate log configuration.
|
||||||
|
switch mode {
|
||||||
|
case "console":
|
||||||
|
useStderr := sec.Key("STDERR").MustBool(false)
|
||||||
|
logConfig["stderr"] = useStderr
|
||||||
|
if useStderr {
|
||||||
|
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(log.CanColorStderr)
|
||||||
|
} else {
|
||||||
|
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(log.CanColorStdout)
|
||||||
|
}
|
||||||
|
|
||||||
|
case "file":
|
||||||
|
if err := os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(runtime.GOOS != "windows")
|
||||||
|
logConfig["filename"] = logPath
|
||||||
|
logConfig["rotate"] = sec.Key("LOG_ROTATE").MustBool(true)
|
||||||
|
logConfig["maxsize"] = 1 << uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28))
|
||||||
|
logConfig["daily"] = sec.Key("DAILY_ROTATE").MustBool(true)
|
||||||
|
logConfig["maxdays"] = sec.Key("MAX_DAYS").MustInt(7)
|
||||||
|
logConfig["compress"] = sec.Key("COMPRESS").MustBool(true)
|
||||||
|
logConfig["compressionLevel"] = sec.Key("COMPRESSION_LEVEL").MustInt(-1)
|
||||||
|
case "conn":
|
||||||
|
logConfig["reconnectOnMsg"] = sec.Key("RECONNECT_ON_MSG").MustBool()
|
||||||
|
logConfig["reconnect"] = sec.Key("RECONNECT").MustBool()
|
||||||
|
logConfig["net"] = sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"})
|
||||||
|
logConfig["addr"] = sec.Key("ADDR").MustString(":7020")
|
||||||
|
case "smtp":
|
||||||
|
logConfig["username"] = sec.Key("USER").MustString("example@example.com")
|
||||||
|
logConfig["password"] = sec.Key("PASSWD").MustString("******")
|
||||||
|
logConfig["host"] = sec.Key("HOST").MustString("127.0.0.1:25")
|
||||||
|
logConfig["sendTos"] = sec.Key("RECEIVERS").MustString("[]")
|
||||||
|
logConfig["subject"] = sec.Key("SUBJECT").MustString("Diagnostic message from Gitea")
|
||||||
|
}
|
||||||
|
|
||||||
|
logConfig["colorize"] = sec.Key("COLORIZE").MustBool(false)
|
||||||
|
|
||||||
|
byteConfig, err := json.Marshal(logConfig)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Failed to marshal log configuration: %v %v", logConfig, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
jsonConfig = string(byteConfig)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateNamedLogger(key string, options defaultLogOptions) *LogDescription {
|
||||||
|
description := LogDescription{
|
||||||
|
Name: key,
|
||||||
|
}
|
||||||
|
|
||||||
|
sections := strings.Split(Cfg.Section("log").Key(strings.ToUpper(key)).MustString(""), ",")
|
||||||
|
|
||||||
|
//description.Configs = make([]string, len(description.Sections))
|
||||||
|
|
||||||
|
for i := 0; i < len(sections); i++ {
|
||||||
|
sections[i] = strings.TrimSpace(sections[i])
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, name := range sections {
|
||||||
|
if len(name) == 0 || (name == "console" && options.disableConsole) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sec, err := Cfg.GetSection("log." + name + "." + key)
|
||||||
|
if err != nil {
|
||||||
|
sec, _ = Cfg.NewSection("log." + name + "." + key)
|
||||||
|
}
|
||||||
|
|
||||||
|
provider, config, levelName := generateLogConfig(sec, name, options)
|
||||||
|
|
||||||
|
log.NewNamedLogger(key, options.bufferLength, name, provider, config)
|
||||||
|
|
||||||
|
description.SubLogDescriptions = append(description.SubLogDescriptions, SubLogDescription{
|
||||||
|
Name: name,
|
||||||
|
Provider: provider,
|
||||||
|
Config: config,
|
||||||
|
})
|
||||||
|
log.Info("%s Log: %s(%s:%s)", strings.Title(key), strings.Title(name), provider, levelName)
|
||||||
|
}
|
||||||
|
|
||||||
|
LogDescriptions[key] = &description
|
||||||
|
|
||||||
|
return &description
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMacaronLogService() {
|
||||||
|
options := newDefaultLogOptions()
|
||||||
|
options.filename = filepath.Join(LogRootPath, "macaron.log")
|
||||||
|
options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
|
||||||
|
|
||||||
|
Cfg.Section("log").Key("MACARON").MustString("file")
|
||||||
|
if RedirectMacaronLog {
|
||||||
|
generateNamedLogger("macaron", options)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newAccessLogService() {
|
||||||
|
EnableAccessLog = Cfg.Section("log").Key("ENABLE_ACCESS_LOG").MustBool(false)
|
||||||
|
AccessLogTemplate = Cfg.Section("log").Key("ACCESS_LOG_TEMPLATE").MustString(
|
||||||
|
`{{.Ctx.RemoteAddr}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}\" \"{{.Ctx.Req.UserAgent}}"`)
|
||||||
|
Cfg.Section("log").Key("ACCESS").MustString("file")
|
||||||
|
if EnableAccessLog {
|
||||||
|
options := newDefaultLogOptions()
|
||||||
|
options.filename = filepath.Join(LogRootPath, "access.log")
|
||||||
|
options.flags = "" // For the router we don't want any prefixed flags
|
||||||
|
options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
|
||||||
|
generateNamedLogger("access", options)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRouterLogService() {
|
||||||
|
Cfg.Section("log").Key("ROUTER").MustString("console")
|
||||||
|
|
||||||
|
if !DisableRouterLog && RedirectMacaronLog {
|
||||||
|
options := newDefaultLogOptions()
|
||||||
|
options.filename = filepath.Join(LogRootPath, "router.log")
|
||||||
|
options.flags = "date,time" // For the router we don't want any prefixed flags
|
||||||
|
options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
|
||||||
|
generateNamedLogger("router", options)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLogService() {
|
func newLogService() {
|
||||||
log.Info("Gitea v%s%s", AppVer, AppBuiltWith)
|
log.Info("Gitea v%s%s", AppVer, AppBuiltWith)
|
||||||
|
|
||||||
LogModes = strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
|
options := newDefaultLogOptions()
|
||||||
LogConfigs = make([]string, len(LogModes))
|
options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
|
||||||
|
|
||||||
|
description := LogDescription{
|
||||||
|
Name: log.DEFAULT,
|
||||||
|
}
|
||||||
|
|
||||||
|
sections := strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
|
||||||
|
|
||||||
useConsole := false
|
useConsole := false
|
||||||
for i := 0; i < len(LogModes); i++ {
|
for i := 0; i < len(sections); i++ {
|
||||||
LogModes[i] = strings.TrimSpace(LogModes[i])
|
sections[i] = strings.TrimSpace(sections[i])
|
||||||
if LogModes[i] == "console" {
|
if sections[i] == "console" {
|
||||||
useConsole = true
|
useConsole = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -47,140 +245,47 @@ func newLogService() {
|
||||||
log.DelLogger("console")
|
log.DelLogger("console")
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, mode := range LogModes {
|
for _, name := range sections {
|
||||||
sec, err := Cfg.GetSection("log." + mode)
|
if len(name) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sec, err := Cfg.GetSection("log." + name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sec, _ = Cfg.NewSection("log." + mode)
|
sec, _ = Cfg.NewSection("log." + name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log level.
|
provider, config, levelName := generateLogConfig(sec, name, options)
|
||||||
levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
|
log.NewLogger(options.bufferLength, name, provider, config)
|
||||||
level, ok := logLevels[levelName]
|
description.SubLogDescriptions = append(description.SubLogDescriptions, SubLogDescription{
|
||||||
if !ok {
|
Name: name,
|
||||||
log.Fatal(4, "Unknown log level: %s", levelName)
|
Provider: provider,
|
||||||
}
|
Config: config,
|
||||||
|
})
|
||||||
// Generate log configuration.
|
log.Info("Gitea Log Mode: %s(%s:%s)", strings.Title(name), strings.Title(provider), levelName)
|
||||||
switch mode {
|
|
||||||
case "console":
|
|
||||||
LogConfigs[i] = fmt.Sprintf(`{"level":%s}`, level)
|
|
||||||
case "file":
|
|
||||||
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "gitea.log"))
|
|
||||||
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
LogConfigs[i] = fmt.Sprintf(
|
|
||||||
`{"level":%s,"filename":"%s","rotate":%v,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
|
|
||||||
logPath,
|
|
||||||
sec.Key("LOG_ROTATE").MustBool(true),
|
|
||||||
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
|
|
||||||
sec.Key("DAILY_ROTATE").MustBool(true),
|
|
||||||
sec.Key("MAX_DAYS").MustInt(7))
|
|
||||||
case "conn":
|
|
||||||
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
|
|
||||||
sec.Key("RECONNECT_ON_MSG").MustBool(),
|
|
||||||
sec.Key("RECONNECT").MustBool(),
|
|
||||||
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
|
|
||||||
sec.Key("ADDR").MustString(":7020"))
|
|
||||||
case "smtp":
|
|
||||||
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":["%s"],"subject":"%s"}`, level,
|
|
||||||
sec.Key("USER").MustString("example@example.com"),
|
|
||||||
sec.Key("PASSWD").MustString("******"),
|
|
||||||
sec.Key("HOST").MustString("127.0.0.1:25"),
|
|
||||||
strings.Replace(sec.Key("RECEIVERS").MustString("example@example.com"), ",", "\",\"", -1),
|
|
||||||
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
|
|
||||||
case "database":
|
|
||||||
LogConfigs[i] = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
|
|
||||||
sec.Key("DRIVER").String(),
|
|
||||||
sec.Key("CONN").String())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.NewLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, LogConfigs[i])
|
|
||||||
log.Info("Log Mode: %s(%s)", strings.Title(mode), levelName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LogDescriptions[log.DEFAULT] = &description
|
||||||
|
|
||||||
|
// Finally redirect the default golog to here
|
||||||
|
golog.SetFlags(0)
|
||||||
|
golog.SetPrefix("")
|
||||||
|
golog.SetOutput(log.NewLoggerAsWriter("INFO", log.GetLogger(log.DEFAULT)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewXORMLogService initializes xorm logger service
|
// NewXORMLogService initializes xorm logger service
|
||||||
func NewXORMLogService(disableConsole bool) {
|
func NewXORMLogService(disableConsole bool) {
|
||||||
logModes := strings.Split(Cfg.Section("log").Key("MODE").MustString("console"), ",")
|
EnableXORMLog = Cfg.Section("log").Key("ENABLE_XORM_LOG").MustBool(true)
|
||||||
var logConfigs string
|
if EnableXORMLog {
|
||||||
for _, mode := range logModes {
|
options := newDefaultLogOptions()
|
||||||
mode = strings.TrimSpace(mode)
|
options.filename = filepath.Join(LogRootPath, "xorm.log")
|
||||||
|
options.bufferLength = Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000)
|
||||||
|
options.disableConsole = disableConsole
|
||||||
|
|
||||||
if disableConsole && mode == "console" {
|
Cfg.Section("log").Key("XORM").MustString(",")
|
||||||
continue
|
generateNamedLogger("xorm", options)
|
||||||
}
|
log.InitXORMLogger(LogSQL)
|
||||||
|
} else {
|
||||||
sec, err := Cfg.GetSection("log." + mode)
|
log.InitXORMLogger(false)
|
||||||
if err != nil {
|
|
||||||
sec, _ = Cfg.NewSection("log." + mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Log level.
|
|
||||||
levelName := getLogLevel("log."+mode, "LEVEL", LogLevel)
|
|
||||||
level, ok := logLevels[levelName]
|
|
||||||
if !ok {
|
|
||||||
log.Fatal(4, "Unknown log level: %s", levelName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate log configuration.
|
|
||||||
switch mode {
|
|
||||||
case "console":
|
|
||||||
logConfigs = fmt.Sprintf(`{"level":%s}`, level)
|
|
||||||
case "file":
|
|
||||||
logPath := sec.Key("FILE_NAME").MustString(path.Join(LogRootPath, "xorm.log"))
|
|
||||||
if err = os.MkdirAll(path.Dir(logPath), os.ModePerm); err != nil {
|
|
||||||
panic(err.Error())
|
|
||||||
}
|
|
||||||
logPath = path.Join(filepath.Dir(logPath), "xorm.log")
|
|
||||||
|
|
||||||
logConfigs = fmt.Sprintf(
|
|
||||||
`{"level":%s,"filename":"%s","rotate":%v,"maxsize":%d,"daily":%v,"maxdays":%d}`, level,
|
|
||||||
logPath,
|
|
||||||
sec.Key("LOG_ROTATE").MustBool(true),
|
|
||||||
1<<uint(sec.Key("MAX_SIZE_SHIFT").MustInt(28)),
|
|
||||||
sec.Key("DAILY_ROTATE").MustBool(true),
|
|
||||||
sec.Key("MAX_DAYS").MustInt(7))
|
|
||||||
case "conn":
|
|
||||||
logConfigs = fmt.Sprintf(`{"level":%s,"reconnectOnMsg":%v,"reconnect":%v,"net":"%s","addr":"%s"}`, level,
|
|
||||||
sec.Key("RECONNECT_ON_MSG").MustBool(),
|
|
||||||
sec.Key("RECONNECT").MustBool(),
|
|
||||||
sec.Key("PROTOCOL").In("tcp", []string{"tcp", "unix", "udp"}),
|
|
||||||
sec.Key("ADDR").MustString(":7020"))
|
|
||||||
case "smtp":
|
|
||||||
logConfigs = fmt.Sprintf(`{"level":%s,"username":"%s","password":"%s","host":"%s","sendTos":"%s","subject":"%s"}`, level,
|
|
||||||
sec.Key("USER").MustString("example@example.com"),
|
|
||||||
sec.Key("PASSWD").MustString("******"),
|
|
||||||
sec.Key("HOST").MustString("127.0.0.1:25"),
|
|
||||||
sec.Key("RECEIVERS").MustString("[]"),
|
|
||||||
sec.Key("SUBJECT").MustString("Diagnostic message from serve"))
|
|
||||||
case "database":
|
|
||||||
logConfigs = fmt.Sprintf(`{"level":%s,"driver":"%s","conn":"%s"}`, level,
|
|
||||||
sec.Key("DRIVER").String(),
|
|
||||||
sec.Key("CONN").String())
|
|
||||||
}
|
|
||||||
|
|
||||||
log.NewXORMLogger(Cfg.Section("log").Key("BUFFER_LEN").MustInt64(10000), mode, logConfigs)
|
|
||||||
if !disableConsole {
|
|
||||||
log.Info("XORM Log Mode: %s(%s)", strings.Title(mode), levelName)
|
|
||||||
}
|
|
||||||
|
|
||||||
var lvl core.LogLevel
|
|
||||||
switch levelName {
|
|
||||||
case "Trace", "Debug":
|
|
||||||
lvl = core.LOG_DEBUG
|
|
||||||
case "Info":
|
|
||||||
lvl = core.LOG_INFO
|
|
||||||
case "Warn":
|
|
||||||
lvl = core.LOG_WARNING
|
|
||||||
case "Error", "Critical":
|
|
||||||
lvl = core.LOG_ERR
|
|
||||||
}
|
|
||||||
log.XORMLogger.SetLevel(lvl)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(logConfigs) == 0 {
|
|
||||||
log.DiscardXORMLogger()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ func newMailService() {
|
||||||
|
|
||||||
parsed, err := mail.ParseAddress(MailService.From)
|
parsed, err := mail.ParseAddress(MailService.From)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Invalid mailer.FROM (%s): %v", MailService.From, err)
|
log.Fatal("Invalid mailer.FROM (%s): %v", MailService.From, err)
|
||||||
}
|
}
|
||||||
MailService.FromName = parsed.Name
|
MailService.FromName = parsed.Name
|
||||||
MailService.FromEmail = parsed.Address
|
MailService.FromEmail = parsed.Address
|
||||||
|
@ -96,7 +96,7 @@ func newMailService() {
|
||||||
if MailService.MailerType == "sendmail" {
|
if MailService.MailerType == "sendmail" {
|
||||||
MailService.SendmailArgs, err = shellquote.Split(sec.Key("SENDMAIL_ARGS").String())
|
MailService.SendmailArgs, err = shellquote.Split(sec.Key("SENDMAIL_ARGS").String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Failed to parse Sendmail args: %v", CustomConf, err)
|
log.Error("Failed to parse Sendmail args: %s with error %v", CustomConf, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ var (
|
||||||
func newRepository() {
|
func newRepository() {
|
||||||
homeDir, err := com.HomeDir()
|
homeDir, err := com.HomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to get home directory: %v", err)
|
log.Fatal("Failed to get home directory: %v", err)
|
||||||
}
|
}
|
||||||
homeDir = strings.Replace(homeDir, "\\", "/", -1)
|
homeDir = strings.Replace(homeDir, "\\", "/", -1)
|
||||||
|
|
||||||
|
@ -151,15 +151,15 @@ func newRepository() {
|
||||||
ScriptType = sec.Key("SCRIPT_TYPE").MustString("bash")
|
ScriptType = sec.Key("SCRIPT_TYPE").MustString("bash")
|
||||||
|
|
||||||
if err = Cfg.Section("repository").MapTo(&Repository); err != nil {
|
if err = Cfg.Section("repository").MapTo(&Repository); err != nil {
|
||||||
log.Fatal(4, "Failed to map Repository settings: %v", err)
|
log.Fatal("Failed to map Repository settings: %v", err)
|
||||||
} else if err = Cfg.Section("repository.editor").MapTo(&Repository.Editor); err != nil {
|
} else if err = Cfg.Section("repository.editor").MapTo(&Repository.Editor); err != nil {
|
||||||
log.Fatal(4, "Failed to map Repository.Editor settings: %v", err)
|
log.Fatal("Failed to map Repository.Editor settings: %v", err)
|
||||||
} else if err = Cfg.Section("repository.upload").MapTo(&Repository.Upload); err != nil {
|
} else if err = Cfg.Section("repository.upload").MapTo(&Repository.Upload); err != nil {
|
||||||
log.Fatal(4, "Failed to map Repository.Upload settings: %v", err)
|
log.Fatal("Failed to map Repository.Upload settings: %v", err)
|
||||||
} else if err = Cfg.Section("repository.local").MapTo(&Repository.Local); err != nil {
|
} else if err = Cfg.Section("repository.local").MapTo(&Repository.Local); err != nil {
|
||||||
log.Fatal(4, "Failed to map Repository.Local settings: %v", err)
|
log.Fatal("Failed to map Repository.Local settings: %v", err)
|
||||||
} else if err = Cfg.Section("repository.pull-request").MapTo(&Repository.PullRequest); err != nil {
|
} else if err = Cfg.Section("repository.pull-request").MapTo(&Repository.PullRequest); err != nil {
|
||||||
log.Fatal(4, "Failed to map Repository.PullRequest settings: %v", err)
|
log.Fatal("Failed to map Repository.PullRequest settings: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !filepath.IsAbs(Repository.Upload.TempPath) {
|
if !filepath.IsAbs(Repository.Upload.TempPath) {
|
||||||
|
|
|
@ -7,6 +7,7 @@ package setting
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
|
@ -15,7 +16,6 @@ import (
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -90,7 +90,6 @@ var (
|
||||||
RedirectOtherPort bool
|
RedirectOtherPort bool
|
||||||
PortToRedirect string
|
PortToRedirect string
|
||||||
OfflineMode bool
|
OfflineMode bool
|
||||||
DisableRouterLog bool
|
|
||||||
CertFile string
|
CertFile string
|
||||||
KeyFile string
|
KeyFile string
|
||||||
StaticRootPath string
|
StaticRootPath string
|
||||||
|
@ -259,10 +258,16 @@ var (
|
||||||
|
|
||||||
// Log settings
|
// Log settings
|
||||||
LogLevel string
|
LogLevel string
|
||||||
|
StacktraceLogLevel string
|
||||||
LogRootPath string
|
LogRootPath string
|
||||||
LogModes []string
|
LogDescriptions = make(map[string]*LogDescription)
|
||||||
LogConfigs []string
|
|
||||||
RedirectMacaronLog bool
|
RedirectMacaronLog bool
|
||||||
|
DisableRouterLog bool
|
||||||
|
RouterLogLevel log.Level
|
||||||
|
RouterLogMode string
|
||||||
|
EnableAccessLog bool
|
||||||
|
AccessLogTemplate string
|
||||||
|
EnableXORMLog bool
|
||||||
|
|
||||||
// Attachment settings
|
// Attachment settings
|
||||||
AttachmentPath string
|
AttachmentPath string
|
||||||
|
@ -398,19 +403,19 @@ func getWorkPath(appPath string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
IsWindows = runtime.GOOS == "windows"
|
// We can rely on log.CanColorStdout being set properly because modules/log/console_windows.go comes before modules/setting/setting.go lexicographically
|
||||||
log.NewLogger(0, "console", `{"level": 0}`)
|
log.NewLogger(0, "console", "console", fmt.Sprintf(`{"level": "trace", "colorize": %t, "stacktraceLevel": "none"}`, log.CanColorStdout))
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if AppPath, err = getAppPath(); err != nil {
|
if AppPath, err = getAppPath(); err != nil {
|
||||||
log.Fatal(4, "Failed to get app path: %v", err)
|
log.Fatal("Failed to get app path: %v", err)
|
||||||
}
|
}
|
||||||
AppWorkPath = getWorkPath(AppPath)
|
AppWorkPath = getWorkPath(AppPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func forcePathSeparator(path string) {
|
func forcePathSeparator(path string) {
|
||||||
if strings.Contains(path, "\\") {
|
if strings.Contains(path, "\\") {
|
||||||
log.Fatal(4, "Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
|
log.Fatal("Do not use '\\' or '\\\\' in paths, instead, please use '/' in all places")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -430,16 +435,16 @@ func IsRunUserMatchCurrentUser(runUser string) (string, bool) {
|
||||||
func createPIDFile(pidPath string) {
|
func createPIDFile(pidPath string) {
|
||||||
currentPid := os.Getpid()
|
currentPid := os.Getpid()
|
||||||
if err := os.MkdirAll(filepath.Dir(pidPath), os.ModePerm); err != nil {
|
if err := os.MkdirAll(filepath.Dir(pidPath), os.ModePerm); err != nil {
|
||||||
log.Fatal(4, "Failed to create PID folder: %v", err)
|
log.Fatal("Failed to create PID folder: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Create(pidPath)
|
file, err := os.Create(pidPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to create PID file: %v", err)
|
log.Fatal("Failed to create PID file: %v", err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if _, err := file.WriteString(strconv.FormatInt(int64(currentPid), 10)); err != nil {
|
if _, err := file.WriteString(strconv.FormatInt(int64(currentPid), 10)); err != nil {
|
||||||
log.Fatal(4, "Failed to write PID information: %v", err)
|
log.Fatal("Failed to write PID information: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -451,12 +456,12 @@ func CheckLFSVersion() {
|
||||||
|
|
||||||
binVersion, err := git.BinVersion()
|
binVersion, err := git.BinVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Error retrieving git version: %v", err)
|
log.Fatal("Error retrieving git version: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !version.Compare(binVersion, "2.1.2", ">=") {
|
if !version.Compare(binVersion, "2.1.2", ">=") {
|
||||||
LFS.StartServer = false
|
LFS.StartServer = false
|
||||||
log.Error(4, "LFS server support needs at least Git v2.1.2")
|
log.Error("LFS server support needs at least Git v2.1.2")
|
||||||
} else {
|
} else {
|
||||||
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "filter.lfs.required=",
|
git.GlobalCommandArgs = append(git.GlobalCommandArgs, "-c", "filter.lfs.required=",
|
||||||
"-c", "filter.lfs.smudge=", "-c", "filter.lfs.clean=")
|
"-c", "filter.lfs.smudge=", "-c", "filter.lfs.clean=")
|
||||||
|
@ -488,7 +493,7 @@ func NewContext() {
|
||||||
|
|
||||||
if com.IsFile(CustomConf) {
|
if com.IsFile(CustomConf) {
|
||||||
if err := Cfg.Append(CustomConf); err != nil {
|
if err := Cfg.Append(CustomConf); err != nil {
|
||||||
log.Fatal(4, "Failed to load custom conf '%s': %v", CustomConf, err)
|
log.Fatal("Failed to load custom conf '%s': %v", CustomConf, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
|
log.Warn("Custom config '%s' not found, ignore this if you're running first time", CustomConf)
|
||||||
|
@ -497,14 +502,16 @@ func NewContext() {
|
||||||
|
|
||||||
homeDir, err := com.HomeDir()
|
homeDir, err := com.HomeDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to get home directory: %v", err)
|
log.Fatal("Failed to get home directory: %v", err)
|
||||||
}
|
}
|
||||||
homeDir = strings.Replace(homeDir, "\\", "/", -1)
|
homeDir = strings.Replace(homeDir, "\\", "/", -1)
|
||||||
|
|
||||||
LogLevel = getLogLevel("log", "LEVEL", "Info")
|
LogLevel = getLogLevel(Cfg.Section("log"), "LEVEL", "Info")
|
||||||
|
StacktraceLogLevel = getStacktraceLogLevel(Cfg.Section("log"), "STACKTRACE_LEVEL", "None")
|
||||||
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
|
LogRootPath = Cfg.Section("log").Key("ROOT_PATH").MustString(path.Join(AppWorkPath, "log"))
|
||||||
forcePathSeparator(LogRootPath)
|
forcePathSeparator(LogRootPath)
|
||||||
RedirectMacaronLog = Cfg.Section("log").Key("REDIRECT_MACARON_LOG").MustBool(false)
|
RedirectMacaronLog = Cfg.Section("log").Key("REDIRECT_MACARON_LOG").MustBool(false)
|
||||||
|
RouterLogLevel = log.FromString(Cfg.Section("log").Key("ROUTER_LOG_LEVEL").MustString("Info"))
|
||||||
|
|
||||||
sec := Cfg.Section("server")
|
sec := Cfg.Section("server")
|
||||||
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea")
|
AppName = Cfg.Section("").Key("APP_NAME").MustString("Gitea: Git with a cup of tea")
|
||||||
|
@ -521,7 +528,7 @@ func NewContext() {
|
||||||
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
|
UnixSocketPermissionRaw := sec.Key("UNIX_SOCKET_PERMISSION").MustString("666")
|
||||||
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
|
UnixSocketPermissionParsed, err := strconv.ParseUint(UnixSocketPermissionRaw, 8, 32)
|
||||||
if err != nil || UnixSocketPermissionParsed > 0777 {
|
if err != nil || UnixSocketPermissionParsed > 0777 {
|
||||||
log.Fatal(4, "Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
|
log.Fatal("Failed to parse unixSocketPermission: %s", UnixSocketPermissionRaw)
|
||||||
}
|
}
|
||||||
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
|
UnixSocketPermission = uint32(UnixSocketPermissionParsed)
|
||||||
}
|
}
|
||||||
|
@ -547,7 +554,7 @@ func NewContext() {
|
||||||
// Check if has app suburl.
|
// Check if has app suburl.
|
||||||
url, err := url.Parse(AppURL)
|
url, err := url.Parse(AppURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Invalid ROOT_URL '%s': %s", AppURL, err)
|
log.Fatal("Invalid ROOT_URL '%s': %s", AppURL, err)
|
||||||
}
|
}
|
||||||
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
|
// Suburl should start with '/' and end without '/', such as '/{subpath}'.
|
||||||
// This value is empty if site does not have sub-url.
|
// This value is empty if site does not have sub-url.
|
||||||
|
@ -616,7 +623,7 @@ func NewContext() {
|
||||||
}
|
}
|
||||||
SSH.KeyTestPath = os.TempDir()
|
SSH.KeyTestPath = os.TempDir()
|
||||||
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
|
if err = Cfg.Section("server").MapTo(&SSH); err != nil {
|
||||||
log.Fatal(4, "Failed to map SSH settings: %v", err)
|
log.Fatal("Failed to map SSH settings: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
SSH.KeygenPath = sec.Key("SSH_KEYGEN_PATH").MustString("ssh-keygen")
|
SSH.KeygenPath = sec.Key("SSH_KEYGEN_PATH").MustString("ssh-keygen")
|
||||||
|
@ -630,9 +637,9 @@ func NewContext() {
|
||||||
|
|
||||||
if !SSH.Disabled && !SSH.StartBuiltinServer {
|
if !SSH.Disabled && !SSH.StartBuiltinServer {
|
||||||
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
|
if err := os.MkdirAll(SSH.RootPath, 0700); err != nil {
|
||||||
log.Fatal(4, "Failed to create '%s': %v", SSH.RootPath, err)
|
log.Fatal("Failed to create '%s': %v", SSH.RootPath, err)
|
||||||
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
|
} else if err = os.MkdirAll(SSH.KeyTestPath, 0644); err != nil {
|
||||||
log.Fatal(4, "Failed to create '%s': %v", SSH.KeyTestPath, err)
|
log.Fatal("Failed to create '%s': %v", SSH.KeyTestPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -650,7 +657,7 @@ func NewContext() {
|
||||||
|
|
||||||
sec = Cfg.Section("server")
|
sec = Cfg.Section("server")
|
||||||
if err = sec.MapTo(&LFS); err != nil {
|
if err = sec.MapTo(&LFS); err != nil {
|
||||||
log.Fatal(4, "Failed to map LFS settings: %v", err)
|
log.Fatal("Failed to map LFS settings: %v", err)
|
||||||
}
|
}
|
||||||
LFS.ContentPath = sec.Key("LFS_CONTENT_PATH").MustString(filepath.Join(AppDataPath, "lfs"))
|
LFS.ContentPath = sec.Key("LFS_CONTENT_PATH").MustString(filepath.Join(AppDataPath, "lfs"))
|
||||||
if !filepath.IsAbs(LFS.ContentPath) {
|
if !filepath.IsAbs(LFS.ContentPath) {
|
||||||
|
@ -661,7 +668,7 @@ func NewContext() {
|
||||||
|
|
||||||
if LFS.StartServer {
|
if LFS.StartServer {
|
||||||
if err := os.MkdirAll(LFS.ContentPath, 0700); err != nil {
|
if err := os.MkdirAll(LFS.ContentPath, 0700); err != nil {
|
||||||
log.Fatal(4, "Failed to create '%s': %v", LFS.ContentPath, err)
|
log.Fatal("Failed to create '%s': %v", LFS.ContentPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
LFS.JWTSecretBytes = make([]byte, 32)
|
LFS.JWTSecretBytes = make([]byte, 32)
|
||||||
|
@ -670,7 +677,7 @@ func NewContext() {
|
||||||
if err != nil || n != 32 {
|
if err != nil || n != 32 {
|
||||||
LFS.JWTSecretBase64, err = generate.NewJwtSecret()
|
LFS.JWTSecretBase64, err = generate.NewJwtSecret()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Error generating JWT Secret for custom config: %v", err)
|
log.Fatal("Error generating JWT Secret for custom config: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -679,24 +686,24 @@ func NewContext() {
|
||||||
if com.IsFile(CustomConf) {
|
if com.IsFile(CustomConf) {
|
||||||
// Keeps custom settings if there is already something.
|
// Keeps custom settings if there is already something.
|
||||||
if err := cfg.Append(CustomConf); err != nil {
|
if err := cfg.Append(CustomConf); err != nil {
|
||||||
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
|
log.Error("Failed to load custom conf '%s': %v", CustomConf, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg.Section("server").Key("LFS_JWT_SECRET").SetValue(LFS.JWTSecretBase64)
|
cfg.Section("server").Key("LFS_JWT_SECRET").SetValue(LFS.JWTSecretBase64)
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
|
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
|
||||||
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
|
log.Fatal("Failed to create '%s': %v", CustomConf, err)
|
||||||
}
|
}
|
||||||
if err := cfg.SaveTo(CustomConf); err != nil {
|
if err := cfg.SaveTo(CustomConf); err != nil {
|
||||||
log.Fatal(4, "Error saving generated JWT Secret to custom config: %v", err)
|
log.Fatal("Error saving generated JWT Secret to custom config: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = Cfg.Section("oauth2").MapTo(&OAuth2); err != nil {
|
if err = Cfg.Section("oauth2").MapTo(&OAuth2); err != nil {
|
||||||
log.Fatal(4, "Failed to OAuth2 settings: %v", err)
|
log.Fatal("Failed to OAuth2 settings: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -707,24 +714,24 @@ func NewContext() {
|
||||||
if err != nil || n != 32 {
|
if err != nil || n != 32 {
|
||||||
OAuth2.JWTSecretBase64, err = generate.NewJwtSecret()
|
OAuth2.JWTSecretBase64, err = generate.NewJwtSecret()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "error generating JWT secret: %v", err)
|
log.Fatal("error generating JWT secret: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cfg := ini.Empty()
|
cfg := ini.Empty()
|
||||||
if com.IsFile(CustomConf) {
|
if com.IsFile(CustomConf) {
|
||||||
if err := cfg.Append(CustomConf); err != nil {
|
if err := cfg.Append(CustomConf); err != nil {
|
||||||
log.Error(4, "failed to load custom conf %s: %v", CustomConf, err)
|
log.Error("failed to load custom conf %s: %v", CustomConf, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cfg.Section("oauth2").Key("JWT_SECRET").SetValue(OAuth2.JWTSecretBase64)
|
cfg.Section("oauth2").Key("JWT_SECRET").SetValue(OAuth2.JWTSecretBase64)
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
|
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
|
||||||
log.Fatal(4, "failed to create '%s': %v", CustomConf, err)
|
log.Fatal("failed to create '%s': %v", CustomConf, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := cfg.SaveTo(CustomConf); err != nil {
|
if err := cfg.SaveTo(CustomConf); err != nil {
|
||||||
log.Fatal(4, "error saving generating JWT secret to custom config: %v", err)
|
log.Fatal("error saving generating JWT secret to custom config: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -780,7 +787,7 @@ func NewContext() {
|
||||||
TimeFormat = TimeFormatKey
|
TimeFormat = TimeFormatKey
|
||||||
TestTimeFormat, _ := time.Parse(TimeFormat, TimeFormat)
|
TestTimeFormat, _ := time.Parse(TimeFormat, TimeFormat)
|
||||||
if TestTimeFormat.Format(time.RFC3339) != "2006-01-02T15:04:05Z" {
|
if TestTimeFormat.Format(time.RFC3339) != "2006-01-02T15:04:05Z" {
|
||||||
log.Fatal(4, "Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
|
log.Fatal("Can't create time properly, please check your time format has 2006, 01, 02, 15, 04 and 05")
|
||||||
}
|
}
|
||||||
log.Trace("Custom TimeFormat: %s", TimeFormat)
|
log.Trace("Custom TimeFormat: %s", TimeFormat)
|
||||||
}
|
}
|
||||||
|
@ -790,7 +797,7 @@ func NewContext() {
|
||||||
if InstallLock {
|
if InstallLock {
|
||||||
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
|
currentUser, match := IsRunUserMatchCurrentUser(RunUser)
|
||||||
if !match {
|
if !match {
|
||||||
log.Fatal(4, "Expect user '%s' but current user is: %s", RunUser, currentUser)
|
log.Fatal("Expect user '%s' but current user is: %s", RunUser, currentUser)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -828,7 +835,7 @@ func NewContext() {
|
||||||
if EnableFederatedAvatar || !DisableGravatar {
|
if EnableFederatedAvatar || !DisableGravatar {
|
||||||
GravatarSourceURL, err = url.Parse(GravatarSource)
|
GravatarSourceURL, err = url.Parse(GravatarSource)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to parse Gravatar URL(%s): %v",
|
log.Fatal("Failed to parse Gravatar URL(%s): %v",
|
||||||
GravatarSource, err)
|
GravatarSource, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -845,15 +852,15 @@ func NewContext() {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
|
if err = Cfg.Section("ui").MapTo(&UI); err != nil {
|
||||||
log.Fatal(4, "Failed to map UI settings: %v", err)
|
log.Fatal("Failed to map UI settings: %v", err)
|
||||||
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
|
} else if err = Cfg.Section("markdown").MapTo(&Markdown); err != nil {
|
||||||
log.Fatal(4, "Failed to map Markdown settings: %v", err)
|
log.Fatal("Failed to map Markdown settings: %v", err)
|
||||||
} else if err = Cfg.Section("admin").MapTo(&Admin); err != nil {
|
} else if err = Cfg.Section("admin").MapTo(&Admin); err != nil {
|
||||||
log.Fatal(4, "Fail to map Admin settings: %v", err)
|
log.Fatal("Fail to map Admin settings: %v", err)
|
||||||
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
|
} else if err = Cfg.Section("api").MapTo(&API); err != nil {
|
||||||
log.Fatal(4, "Failed to map API settings: %v", err)
|
log.Fatal("Failed to map API settings: %v", err)
|
||||||
} else if err = Cfg.Section("metrics").MapTo(&Metrics); err != nil {
|
} else if err = Cfg.Section("metrics").MapTo(&Metrics); err != nil {
|
||||||
log.Fatal(4, "Failed to map Metrics settings: %v", err)
|
log.Fatal("Failed to map Metrics settings: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
newCron()
|
newCron()
|
||||||
|
@ -909,35 +916,35 @@ func loadInternalToken(sec *ini.Section) string {
|
||||||
}
|
}
|
||||||
tempURI, err := url.Parse(uri)
|
tempURI, err := url.Parse(uri)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to parse INTERNAL_TOKEN_URI (%s): %v", uri, err)
|
log.Fatal("Failed to parse INTERNAL_TOKEN_URI (%s): %v", uri, err)
|
||||||
}
|
}
|
||||||
switch tempURI.Scheme {
|
switch tempURI.Scheme {
|
||||||
case "file":
|
case "file":
|
||||||
fp, err := os.OpenFile(tempURI.RequestURI(), os.O_RDWR, 0600)
|
fp, err := os.OpenFile(tempURI.RequestURI(), os.O_RDWR, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to open InternalTokenURI (%s): %v", uri, err)
|
log.Fatal("Failed to open InternalTokenURI (%s): %v", uri, err)
|
||||||
}
|
}
|
||||||
defer fp.Close()
|
defer fp.Close()
|
||||||
|
|
||||||
buf, err := ioutil.ReadAll(fp)
|
buf, err := ioutil.ReadAll(fp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to read InternalTokenURI (%s): %v", uri, err)
|
log.Fatal("Failed to read InternalTokenURI (%s): %v", uri, err)
|
||||||
}
|
}
|
||||||
// No token in the file, generate one and store it.
|
// No token in the file, generate one and store it.
|
||||||
if len(buf) == 0 {
|
if len(buf) == 0 {
|
||||||
token, err := generate.NewInternalToken()
|
token, err := generate.NewInternalToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Error generate internal token: %v", err)
|
log.Fatal("Error generate internal token: %v", err)
|
||||||
}
|
}
|
||||||
if _, err := io.WriteString(fp, token); err != nil {
|
if _, err := io.WriteString(fp, token); err != nil {
|
||||||
log.Fatal(4, "Error writing to InternalTokenURI (%s): %v", uri, err)
|
log.Fatal("Error writing to InternalTokenURI (%s): %v", uri, err)
|
||||||
}
|
}
|
||||||
return token
|
return token
|
||||||
}
|
}
|
||||||
|
|
||||||
return string(buf)
|
return string(buf)
|
||||||
default:
|
default:
|
||||||
log.Fatal(4, "Unsupported URI-Scheme %q (INTERNAL_TOKEN_URI = %q)", tempURI.Scheme, uri)
|
log.Fatal("Unsupported URI-Scheme %q (INTERNAL_TOKEN_URI = %q)", tempURI.Scheme, uri)
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
@ -948,7 +955,7 @@ func loadOrGenerateInternalToken(sec *ini.Section) string {
|
||||||
if len(token) == 0 {
|
if len(token) == 0 {
|
||||||
token, err = generate.NewInternalToken()
|
token, err = generate.NewInternalToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Error generate internal token: %v", err)
|
log.Fatal("Error generate internal token: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save secret
|
// Save secret
|
||||||
|
@ -956,17 +963,17 @@ func loadOrGenerateInternalToken(sec *ini.Section) string {
|
||||||
if com.IsFile(CustomConf) {
|
if com.IsFile(CustomConf) {
|
||||||
// Keeps custom settings if there is already something.
|
// Keeps custom settings if there is already something.
|
||||||
if err := cfgSave.Append(CustomConf); err != nil {
|
if err := cfgSave.Append(CustomConf); err != nil {
|
||||||
log.Error(4, "Failed to load custom conf '%s': %v", CustomConf, err)
|
log.Error("Failed to load custom conf '%s': %v", CustomConf, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cfgSave.Section("security").Key("INTERNAL_TOKEN").SetValue(token)
|
cfgSave.Section("security").Key("INTERNAL_TOKEN").SetValue(token)
|
||||||
|
|
||||||
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
|
if err := os.MkdirAll(filepath.Dir(CustomConf), os.ModePerm); err != nil {
|
||||||
log.Fatal(4, "Failed to create '%s': %v", CustomConf, err)
|
log.Fatal("Failed to create '%s': %v", CustomConf, err)
|
||||||
}
|
}
|
||||||
if err := cfgSave.SaveTo(CustomConf); err != nil {
|
if err := cfgSave.SaveTo(CustomConf); err != nil {
|
||||||
log.Fatal(4, "Error saving generated INTERNAL_TOKEN to custom config: %v", err)
|
log.Fatal("Error saving generated INTERNAL_TOKEN to custom config: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return token
|
return token
|
||||||
|
@ -976,6 +983,9 @@ func loadOrGenerateInternalToken(sec *ini.Section) string {
|
||||||
func NewServices() {
|
func NewServices() {
|
||||||
newService()
|
newService()
|
||||||
newLogService()
|
newLogService()
|
||||||
|
newMacaronLogService()
|
||||||
|
newAccessLogService()
|
||||||
|
newRouterLogService()
|
||||||
NewXORMLogService(false)
|
NewXORMLogService(false)
|
||||||
newCacheService()
|
newCacheService()
|
||||||
newSessionService()
|
newSessionService()
|
||||||
|
|
|
@ -43,7 +43,7 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
|
||||||
|
|
||||||
ch, reqs, err := newChan.Accept()
|
ch, reqs, err := newChan.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "Error accepting channel: %v", err)
|
log.Error("Error accepting channel: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
|
||||||
args[0] = strings.TrimLeft(args[0], "\x04")
|
args[0] = strings.TrimLeft(args[0], "\x04")
|
||||||
_, _, err := com.ExecCmdBytes("env", args[0]+"="+args[1])
|
_, _, err := com.ExecCmdBytes("env", args[0]+"="+args[1])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "env: %v", err)
|
log.Error("env: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case "exec":
|
case "exec":
|
||||||
|
@ -79,23 +79,23 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
|
||||||
|
|
||||||
stdout, err := cmd.StdoutPipe()
|
stdout, err := cmd.StdoutPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "SSH: StdoutPipe: %v", err)
|
log.Error("SSH: StdoutPipe: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
stderr, err := cmd.StderrPipe()
|
stderr, err := cmd.StderrPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "SSH: StderrPipe: %v", err)
|
log.Error("SSH: StderrPipe: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
input, err := cmd.StdinPipe()
|
input, err := cmd.StdinPipe()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "SSH: StdinPipe: %v", err)
|
log.Error("SSH: StdinPipe: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: check timeout
|
// FIXME: check timeout
|
||||||
if err = cmd.Start(); err != nil {
|
if err = cmd.Start(); err != nil {
|
||||||
log.Error(3, "SSH: Start: %v", err)
|
log.Error("SSH: Start: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
|
||||||
io.Copy(ch.Stderr(), stderr)
|
io.Copy(ch.Stderr(), stderr)
|
||||||
|
|
||||||
if err = cmd.Wait(); err != nil {
|
if err = cmd.Wait(); err != nil {
|
||||||
log.Error(3, "SSH: Wait: %v", err)
|
log.Error("SSH: Wait: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,13 +121,13 @@ func handleServerConn(keyID string, chans <-chan ssh.NewChannel) {
|
||||||
func listen(config *ssh.ServerConfig, host string, port int) {
|
func listen(config *ssh.ServerConfig, host string, port int) {
|
||||||
listener, err := net.Listen("tcp", host+":"+com.ToStr(port))
|
listener, err := net.Listen("tcp", host+":"+com.ToStr(port))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to start SSH server: %v", err)
|
log.Fatal("Failed to start SSH server: %v", err)
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
// Once a ServerConfig has been configured, connections can be accepted.
|
// Once a ServerConfig has been configured, connections can be accepted.
|
||||||
conn, err := listener.Accept()
|
conn, err := listener.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "SSH: Error accepting incoming connection: %v", err)
|
log.Error("SSH: Error accepting incoming connection: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +142,7 @@ func listen(config *ssh.ServerConfig, host string, port int) {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
log.Warn("SSH: Handshaking with %s was terminated: %v", conn.RemoteAddr(), err)
|
log.Warn("SSH: Handshaking with %s was terminated: %v", conn.RemoteAddr(), err)
|
||||||
} else {
|
} else {
|
||||||
log.Error(3, "SSH: Error on handshaking with %s: %v", conn.RemoteAddr(), err)
|
log.Error("SSH: Error on handshaking with %s: %v", conn.RemoteAddr(), err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ func Listen(host string, port int, ciphers []string, keyExchanges []string, macs
|
||||||
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
|
PublicKeyCallback: func(conn ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) {
|
||||||
pkey, err := models.SearchPublicKeyByContent(strings.TrimSpace(string(ssh.MarshalAuthorizedKey(key))))
|
pkey, err := models.SearchPublicKeyByContent(strings.TrimSpace(string(ssh.MarshalAuthorizedKey(key))))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "SearchPublicKeyByContent: %v", err)
|
log.Error("SearchPublicKeyByContent: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &ssh.Permissions{Extensions: map[string]string{"key-id": com.ToStr(pkey.ID)}}, nil
|
return &ssh.Permissions{Extensions: map[string]string{"key-id": com.ToStr(pkey.ID)}}, nil
|
||||||
|
@ -178,23 +178,23 @@ func Listen(host string, port int, ciphers []string, keyExchanges []string, macs
|
||||||
filePath := filepath.Dir(keyPath)
|
filePath := filepath.Dir(keyPath)
|
||||||
|
|
||||||
if err := os.MkdirAll(filePath, os.ModePerm); err != nil {
|
if err := os.MkdirAll(filePath, os.ModePerm); err != nil {
|
||||||
log.Error(4, "Failed to create dir %s: %v", filePath, err)
|
log.Error("Failed to create dir %s: %v", filePath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := GenKeyPair(keyPath)
|
err := GenKeyPair(keyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "Failed to generate private key: %v", err)
|
log.Fatal("Failed to generate private key: %v", err)
|
||||||
}
|
}
|
||||||
log.Trace("SSH: New private key is generateed: %s", keyPath)
|
log.Trace("SSH: New private key is generateed: %s", keyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
privateBytes, err := ioutil.ReadFile(keyPath)
|
privateBytes, err := ioutil.ReadFile(keyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "SSH: Failed to load private key")
|
log.Fatal("SSH: Failed to load private key")
|
||||||
}
|
}
|
||||||
private, err := ssh.ParsePrivateKey(privateBytes)
|
private, err := ssh.ParsePrivateKey(privateBytes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(4, "SSH: Failed to parse private key")
|
log.Fatal("SSH: Failed to parse private key")
|
||||||
}
|
}
|
||||||
config.AddHostKey(private)
|
config.AddHostKey(private)
|
||||||
|
|
||||||
|
|
|
@ -351,7 +351,7 @@ func RenderCommitMessageLink(msg, urlPrefix, urlDefault string, metas map[string
|
||||||
// shouldn't be any special HTML.
|
// shouldn't be any special HTML.
|
||||||
fullMessage, err := markup.RenderCommitMessage([]byte(cleanMsg), urlPrefix, urlDefault, metas)
|
fullMessage, err := markup.RenderCommitMessage([]byte(cleanMsg), urlPrefix, urlDefault, metas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "RenderCommitMessage: %v", err)
|
log.Error("RenderCommitMessage: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
msgLines := strings.Split(strings.TrimSpace(string(fullMessage)), "\n")
|
msgLines := strings.Split(strings.TrimSpace(string(fullMessage)), "\n")
|
||||||
|
@ -366,7 +366,7 @@ func RenderCommitBody(msg, urlPrefix string, metas map[string]string) template.H
|
||||||
cleanMsg := template.HTMLEscapeString(msg)
|
cleanMsg := template.HTMLEscapeString(msg)
|
||||||
fullMessage, err := markup.RenderCommitMessage([]byte(cleanMsg), urlPrefix, "", metas)
|
fullMessage, err := markup.RenderCommitMessage([]byte(cleanMsg), urlPrefix, "", metas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "RenderCommitMessage: %v", err)
|
log.Error("RenderCommitMessage: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
body := strings.Split(strings.TrimSpace(string(fullMessage)), "\n")
|
body := strings.Split(strings.TrimSpace(string(fullMessage)), "\n")
|
||||||
|
@ -425,7 +425,7 @@ func ActionIcon(opType models.ActionType) string {
|
||||||
func ActionContent2Commits(act Actioner) *models.PushCommits {
|
func ActionContent2Commits(act Actioner) *models.PushCommits {
|
||||||
push := models.NewPushCommits()
|
push := models.NewPushCommits()
|
||||||
if err := json.Unmarshal([]byte(act.GetContent()), push); err != nil {
|
if err := json.Unmarshal([]byte(act.GetContent()), push); err != nil {
|
||||||
log.Error(4, "json.Unmarshal:\n%s\nERROR: %v", act.GetContent(), err)
|
log.Error("json.Unmarshal:\n%s\nERROR: %v", act.GetContent(), err)
|
||||||
}
|
}
|
||||||
return push
|
return push
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,13 +30,13 @@ func URLJoin(base string, elems ...string) string {
|
||||||
}
|
}
|
||||||
baseURL, err := url.Parse(base)
|
baseURL, err := url.Parse(base)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "URLJoin: Invalid base URL %s", base)
|
log.Error("URLJoin: Invalid base URL %s", base)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
joinedPath := path.Join(elems...)
|
joinedPath := path.Join(elems...)
|
||||||
argURL, err := url.Parse(joinedPath)
|
argURL, err := url.Parse(joinedPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "URLJoin: Invalid arg %s", joinedPath)
|
log.Error("URLJoin: Invalid arg %s", joinedPath)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
joinedURL := baseURL.ResolveReference(argURL).String()
|
joinedURL := baseURL.ResolveReference(argURL).String()
|
||||||
|
|
|
@ -1742,6 +1742,16 @@ config.git_gc_timeout = GC Operation Timeout
|
||||||
|
|
||||||
config.log_config = Log Configuration
|
config.log_config = Log Configuration
|
||||||
config.log_mode = Log Mode
|
config.log_mode = Log Mode
|
||||||
|
config.macaron_log_mode = Macaron Log Mode
|
||||||
|
config.own_named_logger = Named Logger
|
||||||
|
config.routes_to_default_logger = Routes To Default Logger
|
||||||
|
config.go_log = Uses Go Log (redirected to default)
|
||||||
|
config.router_log_mode = Router Log Mode
|
||||||
|
config.disabled_logger = Disabled
|
||||||
|
config.access_log_mode = Access Log Mode
|
||||||
|
config.access_log_template = Template
|
||||||
|
config.xorm_log_mode = XORM Log Mode
|
||||||
|
config.xorm_log_sql = Log SQL
|
||||||
|
|
||||||
monitor.cron = Cron Tasks
|
monitor.cron = Cron Tasks
|
||||||
monitor.name = Name
|
monitor.name = Name
|
||||||
|
|
|
@ -259,11 +259,13 @@ func Config(ctx *context.Context) {
|
||||||
type logger struct {
|
type logger struct {
|
||||||
Mode, Config string
|
Mode, Config string
|
||||||
}
|
}
|
||||||
loggers := make([]*logger, len(setting.LogModes))
|
ctx.Data["Loggers"] = setting.LogDescriptions
|
||||||
for i := range setting.LogModes {
|
ctx.Data["RedirectMacaronLog"] = setting.RedirectMacaronLog
|
||||||
loggers[i] = &logger{setting.LogModes[i], setting.LogConfigs[i]}
|
ctx.Data["EnableAccessLog"] = setting.EnableAccessLog
|
||||||
}
|
ctx.Data["AccessLogTemplate"] = setting.AccessLogTemplate
|
||||||
ctx.Data["Loggers"] = loggers
|
ctx.Data["DisableRouterLog"] = setting.DisableRouterLog
|
||||||
|
ctx.Data["EnableXORMLog"] = setting.EnableXORMLog
|
||||||
|
ctx.Data["LogSQL"] = setting.LogSQL
|
||||||
|
|
||||||
ctx.HTML(200, tplConfig)
|
ctx.HTML(200, tplConfig)
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,14 +55,14 @@ func ToCommit(repo *models.Repository, c *git.Commit) *api.PayloadCommit {
|
||||||
if author, err := models.GetUserByEmail(c.Author.Email); err == nil {
|
if author, err := models.GetUserByEmail(c.Author.Email); err == nil {
|
||||||
authorUsername = author.Name
|
authorUsername = author.Name
|
||||||
} else if !models.IsErrUserNotExist(err) {
|
} else if !models.IsErrUserNotExist(err) {
|
||||||
log.Error(4, "GetUserByEmail: %v", err)
|
log.Error("GetUserByEmail: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
committerUsername := ""
|
committerUsername := ""
|
||||||
if committer, err := models.GetUserByEmail(c.Committer.Email); err == nil {
|
if committer, err := models.GetUserByEmail(c.Committer.Email); err == nil {
|
||||||
committerUsername = committer.Name
|
committerUsername = committer.Name
|
||||||
} else if !models.IsErrUserNotExist(err) {
|
} else if !models.IsErrUserNotExist(err) {
|
||||||
log.Error(4, "GetUserByEmail: %v", err)
|
log.Error("GetUserByEmail: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
verif := models.ParseCommitWithSignature(c)
|
verif := models.ParseCommitWithSignature(c)
|
||||||
|
|
|
@ -234,7 +234,7 @@ func CreateUserRepo(ctx *context.APIContext, owner *models.User, opt api.CreateR
|
||||||
} else {
|
} else {
|
||||||
if repo != nil {
|
if repo != nil {
|
||||||
if err = models.DeleteRepository(ctx.User, ctx.User.ID, repo.ID); err != nil {
|
if err = models.DeleteRepository(ctx.User, ctx.User.ID, repo.ID); err != nil {
|
||||||
log.Error(4, "DeleteRepository: %v", err)
|
log.Error("DeleteRepository: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx.Error(500, "CreateRepository", err)
|
ctx.Error(500, "CreateRepository", err)
|
||||||
|
@ -417,7 +417,7 @@ func Migrate(ctx *context.APIContext, form auth.MigrateRepoForm) {
|
||||||
err = util.URLSanitizedError(err, remoteAddr)
|
err = util.URLSanitizedError(err, remoteAddr)
|
||||||
if repo != nil {
|
if repo != nil {
|
||||||
if errDelete := models.DeleteRepository(ctx.User, ctxUser.ID, repo.ID); errDelete != nil {
|
if errDelete := models.DeleteRepository(ctx.User, ctxUser.ID, repo.ID); errDelete != nil {
|
||||||
log.Error(4, "DeleteRepository: %v", errDelete)
|
log.Error("DeleteRepository: %v", errDelete)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx.Error(500, "MigrateRepository", err)
|
ctx.Error(500, "MigrateRepository", err)
|
||||||
|
@ -597,7 +597,7 @@ func TopicSearch(ctx *context.Context) {
|
||||||
Limit: 10,
|
Limit: 10,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(2, "SearchTopics failed: %v", err)
|
log.Error("SearchTopics failed: %v", err)
|
||||||
ctx.JSON(500, map[string]interface{}{
|
ctx.JSON(500, map[string]interface{}{
|
||||||
"message": "Search topics failed.",
|
"message": "Search topics failed.",
|
||||||
})
|
})
|
||||||
|
|
|
@ -79,11 +79,11 @@ func GlobalInit() {
|
||||||
if err := initDBEngine(); err == nil {
|
if err := initDBEngine(); err == nil {
|
||||||
log.Info("ORM engine initialization successful!")
|
log.Info("ORM engine initialization successful!")
|
||||||
} else {
|
} else {
|
||||||
log.Fatal(4, "ORM engine initialization failed: %v", err)
|
log.Fatal("ORM engine initialization failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := models.InitOAuth2(); err != nil {
|
if err := models.InitOAuth2(); err != nil {
|
||||||
log.Fatal(4, "Failed to initialize OAuth2 support: %v", err)
|
log.Fatal("Failed to initialize OAuth2 support: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
models.LoadRepoConfig()
|
models.LoadRepoConfig()
|
||||||
|
@ -92,7 +92,7 @@ func GlobalInit() {
|
||||||
// Booting long running goroutines.
|
// Booting long running goroutines.
|
||||||
cron.NewContext()
|
cron.NewContext()
|
||||||
if err := issue_indexer.InitIssueIndexer(false); err != nil {
|
if err := issue_indexer.InitIssueIndexer(false); err != nil {
|
||||||
log.Fatal(4, "Failed to initialize issue indexer: %v", err)
|
log.Fatal("Failed to initialize issue indexer: %v", err)
|
||||||
}
|
}
|
||||||
models.InitRepoIndexer()
|
models.InitRepoIndexer()
|
||||||
models.InitSyncMirrors()
|
models.InitSyncMirrors()
|
||||||
|
|
|
@ -236,7 +236,7 @@ func InstallPost(ctx *context.Context, form auth.InstallForm) {
|
||||||
if com.IsFile(setting.CustomConf) {
|
if com.IsFile(setting.CustomConf) {
|
||||||
// Keeps custom settings if there is already something.
|
// Keeps custom settings if there is already something.
|
||||||
if err = cfg.Append(setting.CustomConf); err != nil {
|
if err = cfg.Append(setting.CustomConf); err != nil {
|
||||||
log.Error(4, "Failed to load custom conf '%s': %v", setting.CustomConf, err)
|
log.Error("Failed to load custom conf '%s': %v", setting.CustomConf, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
cfg.Section("database").Key("DB_TYPE").SetValue(models.DbCfg.Type)
|
cfg.Section("database").Key("DB_TYPE").SetValue(models.DbCfg.Type)
|
||||||
|
|
|
@ -78,7 +78,7 @@ func MembersAction(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(4, "Action(%s): %v", ctx.Params(":action"), err)
|
log.Error("Action(%s): %v", ctx.Params(":action"), err)
|
||||||
ctx.JSON(200, map[string]interface{}{
|
ctx.JSON(200, map[string]interface{}{
|
||||||
"ok": false,
|
"ok": false,
|
||||||
"err": err.Error(),
|
"err": err.Error(),
|
||||||
|
|
|
@ -109,7 +109,7 @@ func TeamsAction(ctx *context.Context) {
|
||||||
if models.IsErrLastOrgOwner(err) {
|
if models.IsErrLastOrgOwner(err) {
|
||||||
ctx.Flash.Error(ctx.Tr("form.last_org_owner"))
|
ctx.Flash.Error(ctx.Tr("form.last_org_owner"))
|
||||||
} else {
|
} else {
|
||||||
log.Error(3, "Action(%s): %v", ctx.Params(":action"), err)
|
log.Error("Action(%s): %v", ctx.Params(":action"), err)
|
||||||
ctx.JSON(200, map[string]interface{}{
|
ctx.JSON(200, map[string]interface{}{
|
||||||
"ok": false,
|
"ok": false,
|
||||||
"err": err.Error(),
|
"err": err.Error(),
|
||||||
|
@ -156,7 +156,7 @@ func TeamsRepoAction(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(3, "Action(%s): '%s' %v", ctx.Params(":action"), ctx.Org.Team.Name, err)
|
log.Error("Action(%s): '%s' %v", ctx.Params(":action"), ctx.Org.Team.Name, err)
|
||||||
ctx.ServerError("TeamsRepoAction", err)
|
ctx.ServerError("TeamsRepoAction", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Reference in a new issue