Better logging (#6038) (#6095)

* Panic don't fatal on create new logger

Fixes #5854

Signed-off-by: Andrew Thornton <art27@cantab.net>

* partial broken

* Update the logging infrastrcture

Signed-off-by: Andrew Thornton <art27@cantab.net>

* Reset the skip levels for Fatal and Error

Signed-off-by: Andrew Thornton <art27@cantab.net>

* broken ncsa

* More log.Error fixes

Signed-off-by: Andrew Thornton <art27@cantab.net>

* Remove nal

* set log-levels to lowercase

* Make console_test test all levels

* switch to lowercased levels

* OK now working

* Fix vetting issues

* Fix lint

* Fix tests

* change default logging to match current gitea

* Improve log testing

Signed-off-by: Andrew Thornton <art27@cantab.net>

* reset error skip levels to 0

* Update documentation and access logger configuration

* Redirect the router log back to gitea if redirect macaron log but also allow setting the log level - i.e. TRACE

* Fix broken level caching

* Refactor the router log

* Add Router logger

* Add colorizing options

* Adjust router colors

* Only create logger if they will be used

* update app.ini.sample

* rename Attribute ColorAttribute

* Change from white to green for function

* Set fatal/error levels

* Restore initial trace logger

* Fix Trace arguments in modules/auth/auth.go

* Properly handle XORMLogger

* Improve admin/config page

* fix fmt

* Add auto-compression of old logs

* Update error log levels

* Remove the unnecessary skip argument from Error, Fatal and Critical

* Add stacktrace support

* Fix tests

* Remove x/sync from vendors?

* Add stderr option to console logger

* Use filepath.ToSlash to protect against Windows in tests

* Remove prefixed underscores from names in colors.go

* Remove not implemented database logger

This was removed from Gogs on 4 Mar 2016 but left in the configuration
since then.

* Ensure that log paths are relative to ROOT_PATH

* use path.Join

* rename jsonConfig to logConfig

* Rename "config" to "jsonConfig" to make it clearer

* Requested changes

* Requested changes: XormLogger

* Try to color the windows terminal

If successful default to colorizing the console logs

* fixup

* Colorize initially too

* update vendor

* Colorize logs on default and remove if this is not a colorizing logger

* Fix documentation

* fix test

* Use go-isatty to detect if on windows we are on msys or cygwin

* Fix spelling mistake

* Add missing vendors

* More changes

* Rationalise the ANSI writer protection

* Adjust colors on advice from @0x5c

* Make Flags a comma separated list

* Move to use the windows constant for ENABLE_VIRTUAL_TERMINAL_PROCESSING

* Ensure matching is done on the non-colored message - to simpify EXPRESSION
pull/6492/head
zeripath 2019-04-02 08:48:31 +01:00 committed by GitHub
parent ef2a343e27
commit 704da08fdc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
301 changed files with 36993 additions and 8244 deletions

View File

@ -204,14 +204,14 @@ func runHookPostReceive(c *cli.Context) error {
RepoUserName: repoUser,
RepoName: repoName,
}); err != nil {
log.GitLogger.Error(2, "Update: %v", err)
log.GitLogger.Error("Update: %v", err)
}
if newCommitID != git.EmptySHA && strings.HasPrefix(refFullName, git.BranchPrefix) {
branch := strings.TrimPrefix(refFullName, git.BranchPrefix)
repo, pullRequestAllowed, err := private.GetRepository(repoID)
if err != nil {
log.GitLogger.Error(2, "get repo: %v", err)
log.GitLogger.Error("get repo: %v", err)
break
}
if !pullRequestAllowed {
@ -229,7 +229,7 @@ func runHookPostReceive(c *cli.Context) error {
pr, err := private.ActivePullRequest(baseRepo.ID, repo.ID, baseRepo.DefaultBranch, branch)
if err != nil {
log.GitLogger.Error(2, "get active pr: %v", err)
log.GitLogger.Error("get active pr: %v", err)
break
}

View File

@ -44,7 +44,7 @@ func runMigrate(ctx *cli.Context) error {
models.LoadConfigs()
if err := models.NewEngine(migrations.Migrate); err != nil {
log.Fatal(4, "Failed to initialize ORM engine: %v", err)
log.Fatal("Failed to initialize ORM engine: %v", err)
return err
}

View File

@ -100,7 +100,7 @@ func fail(userMessage, logMessage string, args ...interface{}) {
if !setting.ProdMode {
fmt.Fprintf(os.Stderr, logMessage+"\n", args...)
}
log.GitLogger.Fatal(3, logMessage, args...)
log.GitLogger.Fatal(logMessage, args...)
return
}

View File

@ -69,7 +69,7 @@ func runHTTPRedirector() {
var err = runHTTP(source, context2.ClearHandler(handler))
if err != nil {
log.Fatal(4, "Failed to start port redirection: %v", err)
log.Fatal("Failed to start port redirection: %v", err)
}
}
@ -84,7 +84,7 @@ func runLetsEncrypt(listenAddr, domain, directory, email string, m http.Handler)
log.Info("Running Let's Encrypt handler on %s", setting.HTTPAddr+":"+setting.PortToRedirect)
var err = http.ListenAndServe(setting.HTTPAddr+":"+setting.PortToRedirect, certManager.HTTPHandler(http.HandlerFunc(runLetsEncryptFallbackHandler))) // all traffic coming into HTTP will be redirect to HTTPS automatically (LE HTTP-01 validation happens here)
if err != nil {
log.Fatal(4, "Failed to start the Let's Encrypt handler on port %s: %v", setting.PortToRedirect, err)
log.Fatal("Failed to start the Let's Encrypt handler on port %s: %v", setting.PortToRedirect, err)
}
}()
server := &http.Server{
@ -192,13 +192,13 @@ func runWeb(ctx *cli.Context) error {
case setting.FCGI:
listener, err := net.Listen("tcp", listenAddr)
if err != nil {
log.Fatal(4, "Failed to bind %s", listenAddr, err)
log.Fatal("Failed to bind %s: %v", listenAddr, err)
}
defer listener.Close()
err = fcgi.Serve(listener, context2.ClearHandler(m))
case setting.UnixSocket:
if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) {
log.Fatal(4, "Failed to remove unix socket directory %s: %v", listenAddr, err)
log.Fatal("Failed to remove unix socket directory %s: %v", listenAddr, err)
}
var listener *net.UnixListener
listener, err = net.ListenUnix("unix", &net.UnixAddr{Name: listenAddr, Net: "unix"})
@ -209,15 +209,15 @@ func runWeb(ctx *cli.Context) error {
// FIXME: add proper implementation of signal capture on all protocols
// execute this on SIGTERM or SIGINT: listener.Close()
if err = os.Chmod(listenAddr, os.FileMode(setting.UnixSocketPermission)); err != nil {
log.Fatal(4, "Failed to set permission of unix socket: %v", err)
log.Fatal("Failed to set permission of unix socket: %v", err)
}
err = http.Serve(listener, context2.ClearHandler(m))
default:
log.Fatal(4, "Invalid protocol: %s", setting.Protocol)
log.Fatal("Invalid protocol: %s", setting.Protocol)
}
if err != nil {
log.Fatal(4, "Failed to start server: %v", err)
log.Fatal("Failed to start server: %v", err)
}
return nil

View File

@ -34,7 +34,7 @@ func runHTTPS(listenAddr, certFile, keyFile string, m http.Handler) error {
var err error
config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
log.Fatal(4, "Failed to load https cert file %s: %v", listenAddr, err)
log.Fatal("Failed to load https cert file %s: %v", listenAddr, err)
}
return gracehttp.Serve(&http.Server{

View File

@ -1,3 +1,4 @@
; This file lists the default values used by Gitea
; Copy required sections to your own app.ini (default is custom/conf/app.ini)
; and modify as needed.
@ -33,7 +34,7 @@ PREFERRED_LICENSES = Apache License 2.0,MIT License
DISABLE_HTTP_GIT = false
; Value for Access-Control-Allow-Origin header, default is not to present
; WARNING: This maybe harmful to you website if you do not give it a right value.
ACCESS_CONTROL_ALLOW_ORIGIN =
ACCESS_CONTROL_ALLOW_ORIGIN =
; Force ssh:// clone url instead of scp-style uri when default SSH port is used
USE_COMPAT_SSH_URI = false
; Close issues as long as a commit on any branch marks it as fixed
@ -260,7 +261,7 @@ ISSUE_INDEXER_TYPE = bleve
ISSUE_INDEXER_PATH = indexers/issues.bleve
; Issue indexer queue, currently support: channel or levelqueue, default is levelqueue
ISSUE_INDEXER_QUEUE_TYPE = levelqueue
; When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path,
; When ISSUE_INDEXER_QUEUE_TYPE is levelqueue, this will be the queue will be saved path,
; default is indexers/issues.queue
ISSUE_INDEXER_QUEUE_DIR = indexers/issues.queue
; Batch queue number, default is 20
@ -390,8 +391,8 @@ NO_REPLY_ADDRESS = noreply.example.org
; Show Registration button
SHOW_REGISTRATION_BUTTON = true
; Default value for AutoWatchNewRepos
; When adding a repo to a team or creating a new repo all team members will watch the
; repo automatically if enabled
; When adding a repo to a team or creating a new repo all team members will watch the
; repo automatically if enabled
AUTO_WATCH_NEW_REPOS = true
[webhook]
@ -516,17 +517,37 @@ ROOT_PATH =
MODE = console
; Buffer length of the channel, keep it as it is if you don't know what it is.
BUFFER_LEN = 10000
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
LEVEL = Trace
REDIRECT_MACARON_LOG = false
MACARON = file
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Info"
ROUTER_LOG_LEVEL = Info
ROUTER = console
ENABLE_ACCESS_LOG = false
ACCESS_LOG_TEMPLATE = {{.Ctx.RemoteAddr}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}\" \"{{.Ctx.Req.UserAgent}}"
ACCESS = file
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace"
LEVEL = Info
; Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "None"
STACKTRACE_LEVEL = None
; Generic log modes
[log.x]
FLAGS = stdflags
EXPRESSION =
PREFIX =
COLORIZE = false
; For "console" mode only
[log.console]
LEVEL =
STDERR = false
; For "file" mode only
[log.file]
LEVEL =
; Set the file_name for the logger. If this is a relative path this
; will be relative to ROOT_PATH
FILE_NAME =
; This enables automated log rotate(switch of following options), default is true
LOG_ROTATE = true
; Max number of lines in a single file, default is 1000000
@ -537,6 +558,10 @@ MAX_SIZE_SHIFT = 28
DAILY_ROTATE = true
; delete the log file after n days, default is 7
MAX_DAYS = 7
; compress logs with gzip
COMPRESS = true
; compression level see godoc for compress/gzip
COMPRESSION_LEVEL = -1
; For "conn" mode only
[log.conn]
@ -564,14 +589,6 @@ PASSWD =
; Receivers, can be one or more, e.g. 1@example.com,2@example.com
RECEIVERS =
; For "database" mode only
[log.database]
LEVEL =
; Either "mysql" or "postgres"
DRIVER =
; Based on xorm, e.g.: root:root@localhost/gitea?charset=utf8
CONN =
[cron]
; Enable running cron tasks periodically.
ENABLED = true

View File

@ -68,10 +68,12 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
- `DEFAULT_CLOSE_ISSUES_VIA_COMMITS_IN_ANY_BRANCH`: **false**: Close an issue if a commit on a non default branch marks it as closed.
### Repository - Pull Request (`repository.pull-request`)
- `WORK_IN_PROGRESS_PREFIXES`: **WIP:,\[WIP\]**: List of prefixes used in Pull Request
title to mark them as Work In Progress
### Repository - Issue (`repository.issue`)
- `LOCK_REASONS`: **Too heated,Off-topic,Resolved,Spam**: A list of reasons why a Pull Request or Issue can be locked
## UI (`ui`)
@ -287,9 +289,65 @@ Values containing `#` or `;` must be quoted using `` ` `` or `"""`.
## Log (`log`)
- `ROOT_PATH`: **\<empty\>**: Root path for log files.
- `MODE`: **console**: Logging mode. For multiple modes, use a comma to separate values.
- `LEVEL`: **Trace**: General log level. \[Trace, Debug, Info, Warn, Error, Critical\]
- `REDIRECT_MACARON_LOG`: **false**: Redirects the Macaron log to the Gitea logger.
- `MODE`: **console**: Logging mode. For multiple modes, use a comma to separate values. You can configure each mode in per mode log subsections `\[log.modename\]`. By default the file mode will log to `$ROOT_PATH/gitea.log`.
- `LEVEL`: **Info**: General log level. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\]
- `STACKTRACE_LEVEL`: **None**: Default log level at which to log create stack traces. \[Trace, Debug, Info, Warn, Error, Critical, Fatal, None\]
- `REDIRECT_MACARON_LOG`: **false**: Redirects the Macaron log to its own logger or the default logger.
- `MACARON`: **file**: Logging mode for the macaron logger, use a comma to separate values. Configure each mode in per mode log subsections `\[log.modename.macaron\]`. By default the file mode will log to `$ROOT_PATH/macaron.log`. (If you set this to `,` it will log to default gitea logger.)
- `ROUTER_LOG_LEVEL`: **Info**: The log level that the router should log at. (If you are setting the access log, its recommended to place this at Debug.)
- `ROUTER`: **console**: The mode or name of the log the router should log to. (If you set this to `,` it will log to default gitea logger.)
NB: You must `REDIRECT_MACARON_LOG` and have `DISABLE_ROUTER_LOG` set to `false` for this option to take effect. Configure each mode in per mode log subsections `\[log.modename.router\]`.
- `ENABLE_ACCESS_LOG`: **false**: Creates an access.log in NCSA common log format, or as per the following template
- `ACCESS`: **file**: Logging mode for the access logger, use a comma to separate values. Configure each mode in per mode log subsections `\[log.modename.access\]`. By default the file mode will log to `$ROOT_PATH/access.log`. (If you set this to `,` it will log to the default gitea logger.)
- `ACCESS_LOG_TEMPLATE`: **`{{.Ctx.RemoteAddr}} - {{.Identity}} {{.Start.Format "[02/Jan/2006:15:04:05 -0700]" }} "{{.Ctx.Req.Method}} {{.Ctx.Req.RequestURI}} {{.Ctx.Req.Proto}}" {{.ResponseWriter.Status}} {{.ResponseWriter.Size}} "{{.Ctx.Req.Referer}}\" \"{{.Ctx.Req.UserAgent}}"`**: Sets the template used to create the access log.
- The following variables are available:
- `Ctx`: the `macaron.Context` of the request.
- `Identity`: the SignedUserName or `"-"` if not logged in.
- `Start`: the start time of the request.
- `ResponseWriter`: the responseWriter from the request.
- You must be very careful to ensure that this template does not throw errors or panics as this template runs outside of the panic/recovery script.
- `ENABLE_XORM_LOG`: **true**: Set whether to perform XORM logging. Please note SQL statement logging can be disabled by setting `LOG_SQL` to false in the `[database]` section.
### Log subsections (`log.name`, `log.name.*`)
- `LEVEL`: **log.LEVEL**: Sets the log-level of this sublogger. Defaults to the `LEVEL` set in the global `[log]` section.
- `STACKTRACE_LEVEL`: **log.STACKTRACE_LEVEL**: Sets the log level at which to log stack traces.
- `MODE`: **name**: Sets the mode of this sublogger - Defaults to the provided subsection name. This allows you to have two different file loggers at different levels.
- `EXPRESSION`: **""**: A regular expression to match either the function name, file or message. Defaults to empty. Only log messages that match the expression will be saved in the logger.
- `FLAGS`: **stdflags**: A comma separated string representing the log flags. Defaults to `stdflags` which represents the prefix: `2009/01/23 01:23:23 ...a/b/c/d.go:23:runtime.Caller() [I]: message`. `none` means don't prefix log lines. See `modules/log/base.go` for more information.
- `PREFIX`: **""**: An additional prefix for every log line in this logger. Defaults to empty.
- `COLORIZE`: **false**: Colorize the log lines by default
### Console log mode (`log.console`, `log.console.*`, or `MODE=console`)
- For the console logger `COLORIZE` will default to `true` if not on windows.
- `STDERR`: **false**: Use Stderr instead of Stdout.
### File log mode (`log.file`, `log.file.*` or `MODE=file`)
- `FILE_NAME`: Set the file name for this logger. Defaults as described above. If relative will be relative to the `ROOT_PATH`
- `LOG_ROTATE`: **true**: Rotate the log files.
- `MAX_SIZE_SHIFT`: **28**: Maximum size shift of a single file, 28 represents 256Mb.
- `DAILY_ROTATE`: **true**: Rotate logs daily.
- `MAX_DAYS`: **7**: Delete the log file after n days
- NB: `COLORIZE`: will default to `true` if not on windows.
- `COMPRESS`: **true**: Compress old log files by default with gzip
- `COMPRESSION_LEVEL`: **-1**: Compression level
### Conn log mode (`log.conn`, `log.conn.*` or `MODE=conn`)
- `RECONNECT_ON_MSG`: **false**: Reconnect host for every single message.
- `RECONNECT`: **false**: Try to reconnect when connection is lost.
- `PROTOCOL`: **tcp**: Set the protocol, either "tcp", "unix" or "udp".
- `ADDR`: **:7020**: Sets the address to connect to.
### SMTP log mode (`log.smtp`, `log.smtp.*` or `MODE=smtp`)
- `USER`: User email address to send from.
- `PASSWD`: Password for the smtp server.
- `HOST`: **127.0.0.1:25**: The SMTP host to connect to.
- `RECEIVERS`: Email addresses to send to.
- `SUBJECT`: **Diagnostic message from Gitea**
## Cron (`cron`)

4
go.mod
View File

@ -79,6 +79,7 @@ require (
github.com/lunny/log v0.0.0-20160921050905-7887c61bf0de // indirect
github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af // indirect
github.com/markbates/goth v1.49.0
github.com/mattn/go-isatty v0.0.7
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d // indirect
github.com/mattn/go-sqlite3 v1.10.0
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
@ -114,8 +115,7 @@ require (
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519
golang.org/x/oauth2 v0.0.0-20181101160152-c453e0c75759 // indirect
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f
golang.org/x/sys v0.0.0-20181026144532-2772b66316d2
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223
golang.org/x/text v0.3.0
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 // indirect

6
go.sum
View File

@ -207,6 +207,8 @@ github.com/lunny/nodb v0.0.0-20160621015157-fc1ef06ad4af/go.mod h1:Cqz6pqow14VOb
github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA=
github.com/markbates/goth v1.49.0 h1:qQ4Ti4WaqAxNAggOC+4s5M85sMVfMJwQn/Xkp73wfgI=
github.com/markbates/goth v1.49.0/go.mod h1:zZmAw0Es0Dpm7TT/4AdN14QrkiWLMrrU9Xei1o+/mdA=
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d h1:m+dSK37rFf2fqppZhg15yI2IwC9BtucBiRwSDm9VL8g=
github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d/go.mod h1:/M9VLO+lUPmxvoOK2PfWRZ8mTtB4q1Hy9lEGijv9Nr8=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
@ -311,8 +313,8 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6Zh
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026144532-2772b66316d2 h1:W7CqTdBJ1CmxLKe7LptKDnBYV6PHrVLiGnoyBjaG/JQ=
golang.org/x/sys v0.0.0-20181026144532-2772b66316d2/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@ -56,7 +56,7 @@ arguments - which can alternatively be run by running the subcommand web.`
app.Action = cmd.CmdWeb.Action
err := app.Run(os.Args)
if err != nil {
log.Fatal(4, "Failed to run app with %s: %v", os.Args, err)
log.Fatal("Failed to run app with %s: %v", os.Args, err)
}
}

View File

@ -110,7 +110,7 @@ func (a *Action) loadActUser() {
} else if IsErrUserNotExist(err) {
a.ActUser = NewGhostUser()
} else {
log.Error(4, "GetUserByID(%d): %v", a.ActUserID, err)
log.Error("GetUserByID(%d): %v", a.ActUserID, err)
}
}
@ -121,7 +121,7 @@ func (a *Action) loadRepo() {
var err error
a.Repo, err = GetRepositoryByID(a.RepoID)
if err != nil {
log.Error(4, "GetRepositoryByID(%d): %v", a.RepoID, err)
log.Error("GetRepositoryByID(%d): %v", a.RepoID, err)
}
}
@ -256,7 +256,7 @@ func (a *Action) GetIssueTitle() string {
index := com.StrTo(a.GetIssueInfos()[0]).MustInt64()
issue, err := GetIssueByIndex(a.RepoID, index)
if err != nil {
log.Error(4, "GetIssueByIndex: %v", err)
log.Error("GetIssueByIndex: %v", err)
return "500 when get issue"
}
return issue.Title
@ -268,7 +268,7 @@ func (a *Action) GetIssueContent() string {
index := com.StrTo(a.GetIssueInfos()[0]).MustInt64()
issue, err := GetIssueByIndex(a.RepoID, index)
if err != nil {
log.Error(4, "GetIssueByIndex: %v", err)
log.Error("GetIssueByIndex: %v", err)
return "500 when get issue"
}
return issue.Content
@ -419,7 +419,7 @@ func (pc *PushCommits) AvatarLink(email string) string {
if err != nil {
pc.avatars[email] = base.AvatarLink(email)
if !IsErrUserNotExist(err) {
log.Error(4, "GetUserByEmail: %v", err)
log.Error("GetUserByEmail: %v", err)
return ""
}
} else {
@ -619,7 +619,7 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
}
if err = UpdateIssuesCommit(pusher, repo, opts.Commits.Commits, refName); err != nil {
log.Error(4, "updateIssuesCommit: %v", err)
log.Error("updateIssuesCommit: %v", err)
}
}
@ -661,12 +661,12 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
if isNewBranch {
gitRepo, err := git.OpenRepository(repo.RepoPath())
if err != nil {
log.Error(4, "OpenRepository[%s]: %v", repo.RepoPath(), err)
log.Error("OpenRepository[%s]: %v", repo.RepoPath(), err)
}
shaSum, err = gitRepo.GetBranchCommitID(refName)
if err != nil {
log.Error(4, "GetBranchCommitID[%s]: %v", opts.RefFullName, err)
log.Error("GetBranchCommitID[%s]: %v", opts.RefFullName, err)
}
if err = PrepareWebhooks(repo, HookEventCreate, &api.CreatePayload{
Ref: refName,
@ -697,11 +697,11 @@ func CommitRepoAction(opts CommitRepoActionOptions) error {
gitRepo, err := git.OpenRepository(repo.RepoPath())
if err != nil {
log.Error(4, "OpenRepository[%s]: %v", repo.RepoPath(), err)
log.Error("OpenRepository[%s]: %v", repo.RepoPath(), err)
}
shaSum, err = gitRepo.GetTagCommitID(refName)
if err != nil {
log.Error(4, "GetTagCommitID[%s]: %v", opts.RefFullName, err)
log.Error("GetTagCommitID[%s]: %v", opts.RefFullName, err)
}
if err = PrepareWebhooks(repo, HookEventCreate, &api.CreatePayload{
Ref: refName,

View File

@ -65,7 +65,7 @@ func removeAllWithNotice(e Engine, title, path string) {
desc := fmt.Sprintf("%s [%s]: %v", title, path, err)
log.Warn(desc)
if err = createNotice(e, NoticeRepository, desc); err != nil {
log.Error(4, "CreateRepositoryNotice: %v", err)
log.Error("CreateRepositoryNotice: %v", err)
}
}
}

View File

@ -61,7 +61,7 @@ func (protectBranch *ProtectedBranch) CanUserPush(userID int64) bool {
in, err := IsUserInTeams(userID, protectBranch.WhitelistTeamIDs)
if err != nil {
log.Error(1, "IsUserInTeams:", err)
log.Error("IsUserInTeams: %v", err)
return false
}
return in
@ -83,7 +83,7 @@ func (protectBranch *ProtectedBranch) CanUserMerge(userID int64) bool {
in, err := IsUserInTeams(userID, protectBranch.MergeWhitelistTeamIDs)
if err != nil {
log.Error(1, "IsUserInTeams:", err)
log.Error("IsUserInTeams: %v", err)
return false
}
return in
@ -101,7 +101,7 @@ func (protectBranch *ProtectedBranch) HasEnoughApprovals(pr *PullRequest) bool {
func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest) int64 {
reviews, err := GetReviewersByPullID(pr.Issue.ID)
if err != nil {
log.Error(1, "GetReviewersByPullID:", err)
log.Error("GetReviewersByPullID: %v", err)
return 0
}
@ -119,7 +119,7 @@ func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest)
}
approvalTeamCount, err := UsersInTeamsCount(userIDs, protectBranch.ApprovalsWhitelistTeamIDs)
if err != nil {
log.Error(1, "UsersInTeamsCount:", err)
log.Error("UsersInTeamsCount: %v", err)
return 0
}
return approvalTeamCount + approvals
@ -466,6 +466,6 @@ func RemoveOldDeletedBranches() {
deleteBefore := time.Now().Add(-setting.Cron.DeletedBranchesCleanup.OlderThan)
_, err := x.Where("deleted_unix < ?", deleteBefore.Unix()).Delete(new(DeletedBranch))
if err != nil {
log.Error(4, "DeletedBranchesCleanup: %v", err)
log.Error("DeletedBranchesCleanup: %v", err)
}
}

View File

@ -52,7 +52,7 @@ func (key *GPGKey) BeforeInsert() {
func (key *GPGKey) AfterLoad(session *xorm.Session) {
err := session.Where("primary_key_id=?", key.KeyID).Find(&key.SubsKey)
if err != nil {
log.Error(3, "Find Sub GPGkeys[%d]: %v", key.KeyID, err)
log.Error("Find Sub GPGkeys[%s]: %v", key.KeyID, err)
}
}
@ -364,7 +364,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
//Parsing signature
sig, err := extractSignature(c.Signature.Signature)
if err != nil { //Skipping failed to extract sign
log.Error(3, "SignatureRead err: %v", err)
log.Error("SignatureRead err: %v", err)
return &CommitVerification{
Verified: false,
Reason: "gpg.error.extract_sign",
@ -377,7 +377,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
// We can expect this to often be an ErrUserNotExist. in the case
// it is not, however, it is important to log it.
if !IsErrUserNotExist(err) {
log.Error(3, "GetUserByEmail: %v", err)
log.Error("GetUserByEmail: %v", err)
}
return &CommitVerification{
Verified: false,
@ -387,7 +387,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
keys, err := ListGPGKeys(committer.ID)
if err != nil { //Skipping failed to get gpg keys of user
log.Error(3, "ListGPGKeys: %v", err)
log.Error("ListGPGKeys: %v", err)
return &CommitVerification{
Verified: false,
Reason: "gpg.error.failed_retrieval_gpg_keys",
@ -411,7 +411,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
//Generating hash of commit
hash, err := populateHash(sig.Hash, []byte(c.Signature.Payload))
if err != nil { //Skipping ailed to generate hash
log.Error(3, "PopulateHash: %v", err)
log.Error("PopulateHash: %v", err)
return &CommitVerification{
Verified: false,
Reason: "gpg.error.generate_hash",
@ -432,7 +432,7 @@ func ParseCommitWithSignature(c *git.Commit) *CommitVerification {
//Generating hash of commit
hash, err := populateHash(sig.Hash, []byte(c.Signature.Payload))
if err != nil { //Skipping ailed to generate hash
log.Error(3, "PopulateHash: %v", err)
log.Error("PopulateHash: %v", err)
return &CommitVerification{
Verified: false,
Reason: "gpg.error.generate_hash",

View File

@ -112,7 +112,7 @@ func (issue *Issue) IsTimetrackerEnabled() bool {
func (issue *Issue) isTimetrackerEnabled(e Engine) bool {
if err := issue.loadRepo(e); err != nil {
log.Error(4, fmt.Sprintf("loadRepo: %v", err))
log.Error(fmt.Sprintf("loadRepo: %v", err))
return false
}
return issue.Repo.IsTimetrackerEnabled()
@ -423,23 +423,23 @@ func (issue *Issue) sendLabelUpdatedWebhook(doer *User) {
var err error
if err = issue.loadRepo(x); err != nil {
log.Error(4, "loadRepo: %v", err)
log.Error("loadRepo: %v", err)
return
}
if err = issue.loadPoster(x); err != nil {
log.Error(4, "loadPoster: %v", err)
log.Error("loadPoster: %v", err)
return
}
mode, _ := AccessLevel(issue.Poster, issue.Repo)
if issue.IsPull {
if err = issue.loadPullRequest(x); err != nil {
log.Error(4, "loadPullRequest: %v", err)
log.Error("loadPullRequest: %v", err)
return
}
if err = issue.PullRequest.LoadIssue(); err != nil {
log.Error(4, "LoadIssue: %v", err)
log.Error("LoadIssue: %v", err)
return
}
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
@ -459,7 +459,7 @@ func (issue *Issue) sendLabelUpdatedWebhook(doer *User) {
})
}
if err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
} else {
go HookQueue.Add(issue.RepoID)
}
@ -584,7 +584,7 @@ func (issue *Issue) ClearLabels(doer *User) (err error) {
if issue.IsPull {
err = issue.PullRequest.LoadIssue()
if err != nil {
log.Error(4, "LoadIssue: %v", err)
log.Error("LoadIssue: %v", err)
return
}
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
@ -604,7 +604,7 @@ func (issue *Issue) ClearLabels(doer *User) (err error) {
})
}
if err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
} else {
go HookQueue.Add(issue.RepoID)
}
@ -819,7 +819,7 @@ func (issue *Issue) ChangeStatus(doer *User, isClosed bool) (err error) {
err = PrepareWebhooks(issue.Repo, HookEventIssues, apiIssue)
}
if err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v, is_closed: %v]: %v", issue.IsPull, isClosed, err)
log.Error("PrepareWebhooks [is_pull: %v, is_closed: %v]: %v", issue.IsPull, isClosed, err)
} else {
go HookQueue.Add(issue.Repo.ID)
}
@ -888,7 +888,7 @@ func (issue *Issue) ChangeTitle(doer *User, title string) (err error) {
}
if err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
} else {
go HookQueue.Add(issue.RepoID)
}
@ -953,7 +953,7 @@ func (issue *Issue) ChangeContent(doer *User, content string) (err error) {
})
}
if err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
} else {
go HookQueue.Add(issue.RepoID)
}
@ -1169,7 +1169,7 @@ func NewIssue(repo *Repository, issue *Issue, labelIDs []int64, assigneeIDs []in
Repo: repo,
IsPrivate: repo.IsPrivate,
}); err != nil {
log.Error(4, "NotifyWatchers: %v", err)
log.Error("NotifyWatchers: %v", err)
}
mode, _ := AccessLevel(issue.Poster, issue.Repo)
@ -1180,7 +1180,7 @@ func NewIssue(repo *Repository, issue *Issue, labelIDs []int64, assigneeIDs []in
Repository: repo.APIFormat(mode),
Sender: issue.Poster.APIFormat(),
}); err != nil {
log.Error(4, "PrepareWebhooks: %v", err)
log.Error("PrepareWebhooks: %v", err)
} else {
go HookQueue.Add(issue.RepoID)
}

View File

@ -187,7 +187,7 @@ func (issue *Issue) changeAssignee(sess *xorm.Session, doer *User, assigneeID in
apiPullRequest.Action = api.HookIssueAssigned
}
if err := prepareWebhooks(sess, issue.Repo, HookEventPullRequest, apiPullRequest); err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
log.Error("PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
return nil
}
} else {
@ -205,7 +205,7 @@ func (issue *Issue) changeAssignee(sess *xorm.Session, doer *User, assigneeID in
apiIssue.Action = api.HookIssueAssigned
}
if err := prepareWebhooks(sess, issue.Repo, HookEventIssues, apiIssue); err != nil {
log.Error(4, "PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
log.Error("PrepareWebhooks [is_pull: %v, remove_assignee: %v]: %v", issue.IsPull, removed, err)
return nil
}
}

View File

@ -171,12 +171,12 @@ func (c *Comment) AfterDelete() {
func (c *Comment) HTMLURL() string {
err := c.LoadIssue()
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
log.Error("LoadIssue(%d): %v", c.IssueID, err)
return ""
}
err = c.Issue.loadRepo(x)
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
return ""
}
if c.Type == CommentTypeCode {
@ -200,7 +200,7 @@ func (c *Comment) HTMLURL() string {
func (c *Comment) IssueURL() string {
err := c.LoadIssue()
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
log.Error("LoadIssue(%d): %v", c.IssueID, err)
return ""
}
@ -210,7 +210,7 @@ func (c *Comment) IssueURL() string {
err = c.Issue.loadRepo(x)
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
return ""
}
return c.Issue.HTMLURL()
@ -220,13 +220,13 @@ func (c *Comment) IssueURL() string {
func (c *Comment) PRURL() string {
err := c.LoadIssue()
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
log.Error("LoadIssue(%d): %v", c.IssueID, err)
return ""
}
err = c.Issue.loadRepo(x)
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
return ""
}
@ -318,7 +318,7 @@ func (c *Comment) LoadPoster() error {
c.PosterID = -1
c.Poster = NewGhostUser()
} else {
log.Error(3, "getUserByID[%d]: %v", c.ID, err)
log.Error("getUserByID[%d]: %v", c.ID, err)
}
}
return nil
@ -333,7 +333,7 @@ func (c *Comment) LoadAttachments() error {
var err error
c.Attachments, err = getAttachmentsByCommentID(x, c.ID)
if err != nil {
log.Error(3, "getAttachmentsByCommentID[%d]: %v", c.ID, err)
log.Error("getAttachmentsByCommentID[%d]: %v", c.ID, err)
}
return nil
}
@ -384,7 +384,7 @@ func (c *Comment) mailParticipants(e Engine, opType ActionType, issue *Issue) (e
content = fmt.Sprintf("Reopened #%d", issue.Index)
}
if err = mailIssueCommentToParticipants(e, issue, c.Poster, content, c, mentions); err != nil {
log.Error(4, "mailIssueCommentToParticipants: %v", err)
log.Error("mailIssueCommentToParticipants: %v", err)
}
return nil
@ -492,12 +492,12 @@ func (c *Comment) MustAsDiff() *Diff {
func (c *Comment) CodeCommentURL() string {
err := c.LoadIssue()
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "LoadIssue(%d): %v", c.IssueID, err)
log.Error("LoadIssue(%d): %v", c.IssueID, err)
return ""
}
err = c.Issue.loadRepo(x)
if err != nil { // Silently dropping errors :unamused:
log.Error(4, "loadRepo(%d): %v", c.Issue.RepoID, err)
log.Error("loadRepo(%d): %v", c.Issue.RepoID, err)
return ""
}
return fmt.Sprintf("%s/files#%s", c.Issue.HTMLURL(), c.HashTag())
@ -638,7 +638,7 @@ func sendCreateCommentAction(e *xorm.Session, opts *CreateCommentOptions, commen
// Notify watchers for whatever action comes in, ignore if no action type.
if act.OpType > 0 {
if err = notifyWatchers(e, act); err != nil {
log.Error(4, "notifyWatchers: %v", err)
log.Error("notifyWatchers: %v", err)
}
}
return nil
@ -850,7 +850,7 @@ func CreateIssueComment(doer *User, repo *Repository, issue *Issue, content stri
Repository: repo.APIFormat(mode),
Sender: doer.APIFormat(),
}); err != nil {
log.Error(2, "PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
log.Error("PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
} else {
go HookQueue.Add(repo.ID)
}
@ -1053,7 +1053,7 @@ func UpdateComment(doer *User, c *Comment, oldContent string) error {
Repository: c.Issue.Repo.APIFormat(mode),
Sender: doer.APIFormat(),
}); err != nil {
log.Error(2, "PrepareWebhooks [comment_id: %d]: %v", c.ID, err)
log.Error("PrepareWebhooks [comment_id: %d]: %v", c.ID, err)
} else {
go HookQueue.Add(c.Issue.Repo.ID)
}
@ -1108,7 +1108,7 @@ func DeleteComment(doer *User, comment *Comment) error {
Repository: comment.Issue.Repo.APIFormat(mode),
Sender: doer.APIFormat(),
}); err != nil {
log.Error(2, "PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
log.Error("PrepareWebhooks [comment_id: %d]: %v", comment.ID, err)
} else {
go HookQueue.Add(comment.Issue.Repo.ID)
}

View File

@ -129,7 +129,7 @@ func (issue *Issue) mailParticipants(e Engine) (err error) {
}
if err = mailIssueCommentToParticipants(e, issue, issue.Poster, issue.Content, nil, mentions); err != nil {
log.Error(4, "mailIssueCommentToParticipants: %v", err)
log.Error("mailIssueCommentToParticipants: %v", err)
}
return nil

View File

@ -393,7 +393,7 @@ func ChangeMilestoneAssign(issue *Issue, doer *User, oldMilestoneID int64) (err
if issue.IsPull {
err = issue.PullRequest.LoadIssue()
if err != nil {
log.Error(2, "LoadIssue: %v", err)
log.Error("LoadIssue: %v", err)
return
}
err = PrepareWebhooks(issue.Repo, HookEventPullRequest, &api.PullRequestPayload{
@ -413,7 +413,7 @@ func ChangeMilestoneAssign(issue *Issue, doer *User, oldMilestoneID int64) (err
})
}
if err != nil {
log.Error(2, "PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
log.Error("PrepareWebhooks [is_pull: %v]: %v", issue.IsPull, err)
} else {
go HookQueue.Add(issue.RepoID)
}

View File

@ -39,11 +39,11 @@ func (l *LFSLock) AfterLoad(session *xorm.Session) {
var err error
l.Owner, err = getUserByID(session, l.OwnerID)
if err != nil {
log.Error(2, "LFS lock AfterLoad failed OwnerId[%d] not found: %v", l.OwnerID, err)
log.Error("LFS lock AfterLoad failed OwnerId[%d] not found: %v", l.OwnerID, err)
}
l.Repo, err = getRepositoryByID(session, l.RepoID)
if err != nil {
log.Error(2, "LFS lock AfterLoad failed RepoId[%d] not found: %v", l.RepoID, err)
log.Error("LFS lock AfterLoad failed RepoId[%d] not found: %v", l.RepoID, err)
}
}

View File

@ -56,7 +56,7 @@ func SendUserMail(c *macaron.Context, u *User, tpl base.TplName, code, subject,
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(tpl), data); err != nil {
log.Error(3, "Template: %v", err)
log.Error("Template: %v", err)
return
}
@ -88,7 +88,7 @@ func SendActivateEmailMail(c *macaron.Context, u *User, email *EmailAddress) {
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(mailAuthActivateEmail), data); err != nil {
log.Error(3, "Template: %v", err)
log.Error("Template: %v", err)
return
}
@ -107,7 +107,7 @@ func SendRegisterNotifyMail(c *macaron.Context, u *User) {
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(mailAuthRegisterNotify), data); err != nil {
log.Error(3, "Template: %v", err)
log.Error("Template: %v", err)
return
}
@ -131,7 +131,7 @@ func SendCollaboratorMail(u, doer *User, repo *Repository) {
var content bytes.Buffer
if err := templates.ExecuteTemplate(&content, string(mailNotifyCollaborator), data); err != nil {
log.Error(3, "Template: %v", err)
log.Error("Template: %v", err)
return
}
@ -165,7 +165,7 @@ func composeIssueCommentMessage(issue *Issue, doer *User, content string, commen
var mailBody bytes.Buffer
if err := templates.ExecuteTemplate(&mailBody, string(tplName), data); err != nil {
log.Error(3, "Template: %v", err)
log.Error("Template: %v", err)
}
msg := mailer.NewMessageFrom(tos, doer.DisplayName(), setting.MailService.FromEmail, subject, mailBody.String())

View File

@ -244,7 +244,7 @@ func Migrate(x *xorm.Engine) error {
v := currentVersion.Version
if minDBVersion > v {
log.Fatal(4, `Gitea no longer supports auto-migration from your previously installed version.
log.Fatal(`Gitea no longer supports auto-migration from your previously installed version.
Please try to upgrade to a lower version (>= v0.6.0) first, then upgrade to current version.`)
return nil
}
@ -315,7 +315,7 @@ func dropTableColumns(sess *xorm.Session, tableName string, columnNames ...strin
return sess.Commit()
default:
log.Fatal(4, "Unrecognized DB")
log.Fatal("Unrecognized DB")
}
return nil

View File

@ -26,7 +26,7 @@ func removeActionColumns(x *xorm.Engine) error {
return fmt.Errorf("DROP COLUMN repo_name: %v", err)
}
default:
log.Fatal(4, "Unrecognized DB")
log.Fatal("Unrecognized DB")
}
return nil
}

View File

@ -21,7 +21,7 @@ func removeIndexColumnFromRepoUnitTable(x *xorm.Engine) (err error) {
log.Warn("DROP COLUMN index: %v", err)
}
default:
log.Fatal(4, "Unrecognized DB")
log.Fatal("Unrecognized DB")
}
return nil

View File

@ -48,7 +48,7 @@ func migrateProtectedBranchStruct(x *xorm.Engine) error {
log.Warn("DROP COLUMN can_push (skipping): %v", err)
}
default:
log.Fatal(4, "Unrecognized DB")
log.Fatal("Unrecognized DB")
}
return nil

View File

@ -33,7 +33,7 @@ func addSizeToAttachment(x *xorm.Engine) error {
localPath := path.Join(setting.AttachmentPath, attach.UUID[0:1], attach.UUID[1:2], attach.UUID)
fi, err := os.Stat(localPath)
if err != nil {
log.Error(4, "calculate file size of attachment[UUID: %s]: %v", attach.UUID, err)
log.Error("calculate file size of attachment[UUID: %s]: %v", attach.UUID, err)
continue
}
attach.Size = fi.Size()

View File

@ -303,7 +303,7 @@ func isOrganizationOwner(e Engine, orgID, uid int64) (bool, error) {
if has, err := e.Get(ownerTeam); err != nil {
return false, err
} else if !has {
log.Error(4, "Organization does not have owner team: %d", orgID)
log.Error("Organization does not have owner team: %d", orgID)
return false, nil
}
return isTeamMember(e, orgID, ownerTeam.ID, uid)

View File

@ -69,7 +69,7 @@ func (t *Team) IsOwnerTeam() bool {
func (t *Team) IsMember(userID int64) bool {
isMember, err := IsTeamMember(t.OrgID, t.ID, userID)
if err != nil {
log.Error(4, "IsMember: %v", err)
log.Error("IsMember: %v", err)
return false
}
return isMember

View File

@ -135,7 +135,7 @@ func (pr *PullRequest) GetDefaultMergeMessage() string {
var err error
pr.HeadRepo, err = GetRepositoryByID(pr.HeadRepoID)
if err != nil {
log.Error(4, "GetRepositoryById[%d]: %v", pr.HeadRepoID, err)
log.Error("GetRepositoryById[%d]: %v", pr.HeadRepoID, err)
return ""
}
}
@ -145,7 +145,7 @@ func (pr *PullRequest) GetDefaultMergeMessage() string {
// GetDefaultSquashMessage returns default message used when squash and merging pull request
func (pr *PullRequest) GetDefaultSquashMessage() string {
if err := pr.LoadIssue(); err != nil {
log.Error(4, "LoadIssue: %v", err)
log.Error("LoadIssue: %v", err)
return ""
}
return fmt.Sprintf("%s (#%d)", pr.Issue.Title, pr.Issue.Index)
@ -172,21 +172,21 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest {
err error
)
if err = pr.Issue.loadRepo(e); err != nil {
log.Error(log.ERROR, "loadRepo[%d]: %v", pr.ID, err)
log.Error("loadRepo[%d]: %v", pr.ID, err)
return nil
}
apiIssue := pr.Issue.apiFormat(e)
if pr.BaseRepo == nil {
pr.BaseRepo, err = getRepositoryByID(e, pr.BaseRepoID)
if err != nil {
log.Error(log.ERROR, "GetRepositoryById[%d]: %v", pr.ID, err)
log.Error("GetRepositoryById[%d]: %v", pr.ID, err)
return nil
}
}
if pr.HeadRepo == nil {
pr.HeadRepo, err = getRepositoryByID(e, pr.HeadRepoID)
if err != nil {
log.Error(log.ERROR, "GetRepositoryById[%d]: %v", pr.ID, err)
log.Error("GetRepositoryById[%d]: %v", pr.ID, err)
return nil
}
}
@ -581,11 +581,11 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
pr.MergerID = doer.ID
if err = pr.setMerged(); err != nil {
log.Error(4, "setMerged [%d]: %v", pr.ID, err)
log.Error("setMerged [%d]: %v", pr.ID, err)
}
if err = MergePullRequestAction(doer, pr.Issue.Repo, pr.Issue); err != nil {
log.Error(4, "MergePullRequestAction [%d]: %v", pr.ID, err)
log.Error("MergePullRequestAction [%d]: %v", pr.ID, err)
}
// Reset cached commit count
@ -593,7 +593,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
// Reload pull request information.
if err = pr.LoadAttributes(); err != nil {
log.Error(4, "LoadAttributes: %v", err)
log.Error("LoadAttributes: %v", err)
return nil
}
@ -605,14 +605,14 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
Repository: pr.Issue.Repo.APIFormat(mode),
Sender: doer.APIFormat(),
}); err != nil {
log.Error(4, "PrepareWebhooks: %v", err)
log.Error("PrepareWebhooks: %v", err)
} else {
go HookQueue.Add(pr.Issue.Repo.ID)
}
l, err := baseGitRepo.CommitsBetweenIDs(pr.MergedCommitID, pr.MergeBase)
if err != nil {
log.Error(4, "CommitsBetweenIDs: %v", err)
log.Error("CommitsBetweenIDs: %v", err)
return nil
}
@ -621,7 +621,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
// to avoid strange diff commits produced.
mergeCommit, err := baseGitRepo.GetBranchCommit(pr.BaseBranch)
if err != nil {
log.Error(4, "GetBranchCommit: %v", err)
log.Error("GetBranchCommit: %v", err)
return nil
}
if mergeStyle == MergeStyleMerge {
@ -639,7 +639,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
Sender: doer.APIFormat(),
}
if err = PrepareWebhooks(pr.BaseRepo, HookEventPush, p); err != nil {
log.Error(4, "PrepareWebhooks: %v", err)
log.Error("PrepareWebhooks: %v", err)
} else {
go HookQueue.Add(pr.BaseRepo.ID)
}
@ -692,7 +692,7 @@ func (pr *PullRequest) setMerged() (err error) {
func (pr *PullRequest) manuallyMerged() bool {
commit, err := pr.getMergeCommit()
if err != nil {
log.Error(4, "PullRequest[%d].getMergeCommit: %v", pr.ID, err)
log.Error("PullRequest[%d].getMergeCommit: %v", pr.ID, err)
return false
}
if commit != nil {
@ -705,7 +705,7 @@ func (pr *PullRequest) manuallyMerged() bool {
if merger == nil {
if pr.BaseRepo.Owner == nil {
if err = pr.BaseRepo.getOwner(x); err != nil {
log.Error(4, "BaseRepo.getOwner[%d]: %v", pr.ID, err)
log.Error("BaseRepo.getOwner[%d]: %v", pr.ID, err)
return false
}
}
@ -715,7 +715,7 @@ func (pr *PullRequest) manuallyMerged() bool {
pr.MergerID = merger.ID
if err = pr.setMerged(); err != nil {
log.Error(4, "PullRequest[%d].setMerged : %v", pr.ID, err)
log.Error("PullRequest[%d].setMerged : %v", pr.ID, err)
return false
}
log.Info("manuallyMerged[%d]: Marked as manually merged into %s/%s by commit id: %s", pr.ID, pr.BaseRepo.Name, pr.BaseBranch, commit.ID.String())
@ -936,7 +936,7 @@ func NewPullRequest(repo *Repository, pull *Issue, labelIDs []int64, uuids []str
Repo: repo,
IsPrivate: repo.IsPrivate,
}); err != nil {
log.Error(4, "NotifyWatchers: %v", err)
log.Error("NotifyWatchers: %v", err)
}
pr.Issue = pull
@ -949,7 +949,7 @@ func NewPullRequest(repo *Repository, pull *Issue, labelIDs []int64, uuids []str
Repository: repo.APIFormat(mode),
Sender: pull.Poster.APIFormat(),
}); err != nil {
log.Error(4, "PrepareWebhooks: %v", err)
log.Error("PrepareWebhooks: %v&