Dump: add output format tar and output to stdout (#10376)

* Dump: Use mholt/archive/v3 to support tar including many compressions

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* Dump: Allow dump output to stdout

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* Dump: Fixed bug present since #6677 where SessionConfig.Provider is never "file"

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* Dump: never pack RepoRootPath, LFS.ContentPath and LogRootPath when they are below AppDataPath

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* Dump: also dump LFS (fixes #10058)

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* Dump: never dump CustomPath if CustomPath is a subdir of or equal to AppDataPath (fixes #10365)

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* Use log.Info instead of fmt.Fprintf

Signed-off-by: Philipp Homann <homann.philipp@googlemail.com>

* import ordering

* make fmt

Co-authored-by: zeripath <art27@cantab.net>
Co-authored-by: techknowlogick <techknowlogick@gitea.io>
Co-authored-by: Matti R <matti@mdranta.net>
pull/11780/head
PhilippHomann 3 years ago committed by GitHub
parent 209b17c4e2
commit 684b7a999f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
  1. 269
      cmd/dump.go
  2. 2
      go.mod
  3. 22
      go.sum
  4. 3
      modules/setting/setting.go
  5. 19
      vendor/github.com/andybalholm/brotli/LICENSE
  6. 5
      vendor/github.com/andybalholm/brotli/README.md
  7. 177
      vendor/github.com/andybalholm/brotli/backward_references.go
  8. 795
      vendor/github.com/andybalholm/brotli/backward_references_hq.go
  9. 436
      vendor/github.com/andybalholm/brotli/bit_cost.go
  10. 266
      vendor/github.com/andybalholm/brotli/bit_reader.go
  11. 153
      vendor/github.com/andybalholm/brotli/block_splitter.go
  12. 433
      vendor/github.com/andybalholm/brotli/block_splitter_command.go
  13. 433
      vendor/github.com/andybalholm/brotli/block_splitter_distance.go
  14. 433
      vendor/github.com/andybalholm/brotli/block_splitter_literal.go
  15. 1265
      vendor/github.com/andybalholm/brotli/brotli_bit_stream.go
  16. 30
      vendor/github.com/andybalholm/brotli/cluster.go
  17. 326
      vendor/github.com/andybalholm/brotli/cluster_command.go
  18. 326
      vendor/github.com/andybalholm/brotli/cluster_distance.go
  19. 326
      vendor/github.com/andybalholm/brotli/cluster_literal.go
  20. 252
      vendor/github.com/andybalholm/brotli/command.go
  21. 840
      vendor/github.com/andybalholm/brotli/compress_fragment.go
  22. 749
      vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go
  23. 77
      vendor/github.com/andybalholm/brotli/constants.go
  24. 2176
      vendor/github.com/andybalholm/brotli/context.go
  25. 2632
      vendor/github.com/andybalholm/brotli/decode.go
  26. 122890
      vendor/github.com/andybalholm/brotli/dictionary.go
  27. 32779
      vendor/github.com/andybalholm/brotli/dictionary_hash.go
  28. 1737
      vendor/github.com/andybalholm/brotli/encode.go
  29. 22
      vendor/github.com/andybalholm/brotli/encoder_dict.go
  30. 593
      vendor/github.com/andybalholm/brotli/entropy_encode.go
  31. 4394
      vendor/github.com/andybalholm/brotli/entropy_encode_static.go
  32. 296
      vendor/github.com/andybalholm/brotli/fast_log.go
  33. 16
      vendor/github.com/andybalholm/brotli/find_match_length.go
  34. 5
      vendor/github.com/andybalholm/brotli/go.mod
  35. 2
      vendor/github.com/andybalholm/brotli/go.sum
  36. 287
      vendor/github.com/andybalholm/brotli/h10.go
  37. 214
      vendor/github.com/andybalholm/brotli/h5.go
  38. 216
      vendor/github.com/andybalholm/brotli/h6.go
  39. 344
      vendor/github.com/andybalholm/brotli/hash.go
  40. 93
      vendor/github.com/andybalholm/brotli/hash_composite.go
  41. 253
      vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go
  42. 214
      vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go
  43. 169
      vendor/github.com/andybalholm/brotli/hash_rolling.go
  44. 227
      vendor/github.com/andybalholm/brotli/histogram.go
  45. 653
      vendor/github.com/andybalholm/brotli/huffman.go
  46. 182
      vendor/github.com/andybalholm/brotli/literal_cost.go
  47. 56
      vendor/github.com/andybalholm/brotli/memory.go
  48. 555
      vendor/github.com/andybalholm/brotli/metablock.go
  49. 162
      vendor/github.com/andybalholm/brotli/metablock_command.go
  50. 162
      vendor/github.com/andybalholm/brotli/metablock_distance.go
  51. 162
      vendor/github.com/andybalholm/brotli/metablock_literal.go
  52. 37
      vendor/github.com/andybalholm/brotli/params.go
  53. 103
      vendor/github.com/andybalholm/brotli/platform.go
  54. 30
      vendor/github.com/andybalholm/brotli/prefix.go
  55. 723
      vendor/github.com/andybalholm/brotli/prefix_dec.go
  56. 196
      vendor/github.com/andybalholm/brotli/quality.go
  57. 100
      vendor/github.com/andybalholm/brotli/reader.go
  58. 132
      vendor/github.com/andybalholm/brotli/ringbuffer.go
  59. 295
      vendor/github.com/andybalholm/brotli/state.go
  60. 666
      vendor/github.com/andybalholm/brotli/static_dict.go
  61. 75094
      vendor/github.com/andybalholm/brotli/static_dict_lut.go
  62. 22
      vendor/github.com/andybalholm/brotli/symbol_list.go
  63. 641
      vendor/github.com/andybalholm/brotli/transform.go
  64. 71
      vendor/github.com/andybalholm/brotli/utf8_util.go
  65. 7
      vendor/github.com/andybalholm/brotli/util.go
  66. 56
      vendor/github.com/andybalholm/brotli/write_bits.go
  67. 155
      vendor/github.com/andybalholm/brotli/writer.go
  68. 36
      vendor/github.com/dsnet/compress/.travis.yml
  69. 24
      vendor/github.com/dsnet/compress/LICENSE.md
  70. 75
      vendor/github.com/dsnet/compress/README.md
  71. 74
      vendor/github.com/dsnet/compress/api.go
  72. 110
      vendor/github.com/dsnet/compress/bzip2/bwt.go
  73. 110
      vendor/github.com/dsnet/compress/bzip2/common.go
  74. 13
      vendor/github.com/dsnet/compress/bzip2/fuzz_off.go
  75. 77
      vendor/github.com/dsnet/compress/bzip2/fuzz_on.go
  76. 28
      vendor/github.com/dsnet/compress/bzip2/internal/sais/common.go
  77. 661
      vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_byte.go
  78. 661
      vendor/github.com/dsnet/compress/bzip2/internal/sais/sais_int.go
  79. 131
      vendor/github.com/dsnet/compress/bzip2/mtf_rle2.go
  80. 374
      vendor/github.com/dsnet/compress/bzip2/prefix.go
  81. 274
      vendor/github.com/dsnet/compress/bzip2/reader.go
  82. 101
      vendor/github.com/dsnet/compress/bzip2/rle1.go
  83. 307
      vendor/github.com/dsnet/compress/bzip2/writer.go
  84. 10
      vendor/github.com/dsnet/compress/go.mod
  85. 8
      vendor/github.com/dsnet/compress/go.sum
  86. 107
      vendor/github.com/dsnet/compress/internal/common.go
  87. 12
      vendor/github.com/dsnet/compress/internal/debug.go
  88. 120
      vendor/github.com/dsnet/compress/internal/errors/errors.go
  89. 12
      vendor/github.com/dsnet/compress/internal/gofuzz.go
  90. 159
      vendor/github.com/dsnet/compress/internal/prefix/debug.go
  91. 136
      vendor/github.com/dsnet/compress/internal/prefix/decoder.go
  92. 66
      vendor/github.com/dsnet/compress/internal/prefix/encoder.go
  93. 400
      vendor/github.com/dsnet/compress/internal/prefix/prefix.go
  94. 93
      vendor/github.com/dsnet/compress/internal/prefix/range.go
  95. 335
      vendor/github.com/dsnet/compress/internal/prefix/reader.go
  96. 146
      vendor/github.com/dsnet/compress/internal/prefix/wrap.go
  97. 166
      vendor/github.com/dsnet/compress/internal/prefix/writer.go
  98. 21
      vendor/github.com/dsnet/compress/internal/release.go
  99. 12
      vendor/github.com/dsnet/compress/zbench.sh
  100. 10
      vendor/github.com/dsnet/compress/zfuzz.sh
  101. Some files were not shown because too many files have changed in this diff Show More

@ -6,22 +6,120 @@
package cmd
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"time"
"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"github.com/unknwon/cae/zip"
"gitea.com/macaron/session"
archiver "github.com/mholt/archiver/v3"
"github.com/unknwon/com"
"github.com/urfave/cli"
)
func addFile(w archiver.Writer, filePath string, absPath string, verbose bool) error {
if verbose {
log.Info("Adding file %s\n", filePath)
}
file, err := os.Open(absPath)
if err != nil {
return err
}
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
return w.Write(archiver.File{
FileInfo: archiver.FileInfo{
FileInfo: fileInfo,
CustomName: filePath,
},
ReadCloser: file,
})
}
func addRecursive(w archiver.Writer, dirPath string, absPath string, verbose bool) error {
if verbose {
log.Info("Adding dir %s\n", dirPath)
}
dir, err := os.Open(absPath)
if err != nil {
return fmt.Errorf("Could not open directory %s: %s", absPath, err)
}
files, err := dir.Readdir(0)
if err != nil {
return fmt.Errorf("Unable to list files in %s: %s", absPath, err)
}
if err := addFile(w, dirPath, absPath, false); err != nil {
return err
}
for _, fileInfo := range files {
if fileInfo.IsDir() {
err = addRecursive(w, filepath.Join(dirPath, fileInfo.Name()), filepath.Join(absPath, fileInfo.Name()), verbose)
} else {
err = addFile(w, filepath.Join(dirPath, fileInfo.Name()), filepath.Join(absPath, fileInfo.Name()), verbose)
}
if err != nil {
return err
}
}
return nil
}
func isSubdir(upper string, lower string) (bool, error) {
if relPath, err := filepath.Rel(upper, lower); err != nil {
return false, err
} else if relPath == "." || !strings.HasPrefix(relPath, ".") {
return true, nil
}
return false, nil
}
type outputType struct {
Enum []string
Default string
selected string
}
func (o outputType) Join() string {
return strings.Join(o.Enum, ", ")
}
func (o *outputType) Set(value string) error {
for _, enum := range o.Enum {
if enum == value {
o.selected = value
return nil
}
}
return fmt.Errorf("allowed values are %s", o.Join())
}
func (o outputType) String() string {
if o.selected == "" {
return o.Default
}
return o.selected
}
var outputTypeEnum = &outputType{
Enum: []string{"zip", "tar", "tar.gz", "tar.xz", "tar.bz2"},
Default: "zip",
}
// CmdDump represents the available dump sub-command.
var CmdDump = cli.Command{
Name: "dump",
@ -33,7 +131,7 @@ It can be used for backup and capture Gitea server image to send to maintainer`,
cli.StringFlag{
Name: "file, f",
Value: fmt.Sprintf("gitea-dump-%d.zip", time.Now().Unix()),
Usage: "Name of the dump file which will be created.",
Usage: "Name of the dump file which will be created. Supply '-' for stdout. See type for available types.",
},
cli.BoolFlag{
Name: "verbose, V",
@ -56,6 +154,11 @@ It can be used for backup and capture Gitea server image to send to maintainer`,
Name: "skip-log, L",
Usage: "Skip the log dumping",
},
cli.GenericFlag{
Name: "type",
Value: outputTypeEnum,
Usage: fmt.Sprintf("Dump output format: %s", outputTypeEnum.Join()),
},
},
}
@ -65,7 +168,23 @@ func fatal(format string, args ...interface{}) {
}
func runDump(ctx *cli.Context) error {
var file *os.File
fileName := ctx.String("file")
if fileName == "-" {
file = os.Stdout
err := log.DelLogger("console")
if err != nil {
fatal("Deleting default logger failed. Can not write to stdout: %v", err)
}
}
setting.NewContext()
// make sure we are logging to the console no matter what the configuration tells us do to
if _, err := setting.Cfg.Section("log").NewKey("MODE", "console"); err != nil {
fatal("Setting logging mode to console failed: %v", err)
}
if _, err := setting.Cfg.Section("log.console").NewKey("STDERR", "true"); err != nil {
fatal("Setting console logger to stderr failed: %v", err)
}
setting.NewServices() // cannot access session settings otherwise
err := models.SetEngine()
@ -73,45 +192,59 @@ func runDump(ctx *cli.Context) error {
return err
}
tmpDir := ctx.String("tempdir")
if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
fatal("Path does not exist: %s", tmpDir)
}
tmpWorkDir, err := ioutil.TempDir(tmpDir, "gitea-dump-")
if err != nil {
fatal("Failed to create tmp work directory: %v", err)
if file == nil {
file, err = os.Create(fileName)
if err != nil {
fatal("Unable to open %s: %v", fileName, err)
}
}
log.Info("Creating tmp work dir: %s", tmpWorkDir)
defer file.Close()
// work-around #1103
if os.Getenv("TMPDIR") == "" {
os.Setenv("TMPDIR", tmpWorkDir)
verbose := ctx.Bool("verbose")
outType := ctx.String("type")
var iface interface{}
if fileName == "-" {
iface, err = archiver.ByExtension(fmt.Sprintf(".%s", outType))
} else {
iface, err = archiver.ByExtension(fileName)
}
dbDump := path.Join(tmpWorkDir, "gitea-db.sql")
fileName := ctx.String("file")
log.Info("Packing dump files...")
z, err := zip.Create(fileName)
if err != nil {
fatal("Failed to create %s: %v", fileName, err)
fatal("Unable to get archiver for extension: %v", err)
}
zip.Verbose = ctx.Bool("verbose")
w, _ := iface.(archiver.Writer)
if err := w.Create(file); err != nil {
fatal("Creating archiver.Writer failed: %v", err)
}
defer w.Close()
if ctx.IsSet("skip-repository") && ctx.Bool("skip-repository") {
log.Info("Skip dumping local repositories")
} else {
log.Info("Dumping local repositories...%s", setting.RepoRootPath)
reposDump := path.Join(tmpWorkDir, "gitea-repo.zip")
if err := zip.PackTo(setting.RepoRootPath, reposDump, true); err != nil {
fatal("Failed to dump local repositories: %v", err)
log.Info("Dumping local repositories... %s", setting.RepoRootPath)
if err := addRecursive(w, "repos", setting.RepoRootPath, verbose); err != nil {
fatal("Failed to include repositories: %v", err)
}
if err := z.AddFile("gitea-repo.zip", reposDump); err != nil {
fatal("Failed to include gitea-repo.zip: %v", err)
if _, err := os.Stat(setting.LFS.ContentPath); !os.IsNotExist(err) {
log.Info("Dumping lfs... %s", setting.LFS.ContentPath)
if err := addRecursive(w, "lfs", setting.LFS.ContentPath, verbose); err != nil {
fatal("Failed to include lfs: %v", err)
}
}
}
tmpDir := ctx.String("tempdir")
if _, err := os.Stat(tmpDir); os.IsNotExist(err) {
fatal("Path does not exist: %s", tmpDir)
}
dbDump, err := ioutil.TempFile(tmpDir, "gitea-db.sql")
if err != nil {
fatal("Failed to create tmp file: %v", err)
}
defer os.Remove(dbDump.Name())
targetDBType := ctx.String("database")
if len(targetDBType) > 0 && targetDBType != setting.Database.Type {
log.Info("Dumping database %s => %s...", setting.Database.Type, targetDBType)
@ -119,25 +252,29 @@ func runDump(ctx *cli.Context) error {
log.Info("Dumping database...")
}
if err := models.DumpDatabase(dbDump, targetDBType); err != nil {
if err := models.DumpDatabase(dbDump.Name(), targetDBType); err != nil {
fatal("Failed to dump database: %v", err)
}
if err := z.AddFile("gitea-db.sql", dbDump); err != nil {
if err := addFile(w, "gitea-db.sql", dbDump.Name(), verbose); err != nil {
fatal("Failed to include gitea-db.sql: %v", err)
}
if len(setting.CustomConf) > 0 {
log.Info("Adding custom configuration file from %s", setting.CustomConf)
if err := z.AddFile("app.ini", setting.CustomConf); err != nil {
if err := addFile(w, "app.ini", setting.CustomConf, verbose); err != nil {
fatal("Failed to include specified app.ini: %v", err)
}
}
customDir, err := os.Stat(setting.CustomPath)
if err == nil && customDir.IsDir() {
if err := z.AddDir("custom", setting.CustomPath); err != nil {
fatal("Failed to include custom: %v", err)
if is, _ := isSubdir(setting.AppDataPath, setting.CustomPath); !is {
if err := addRecursive(w, "custom", setting.CustomPath, verbose); err != nil {
fatal("Failed to include custom: %v", err)
}
} else {
log.Info("Custom dir %s is inside data dir %s, skipped", setting.CustomPath, setting.AppDataPath)
}
} else {
log.Info("Custom dir %s doesn't exist, skipped", setting.CustomPath)
@ -146,11 +283,19 @@ func runDump(ctx *cli.Context) error {
if com.IsExist(setting.AppDataPath) {
log.Info("Packing data directory...%s", setting.AppDataPath)
var sessionAbsPath string
if setting.SessionConfig.Provider == "file" {
sessionAbsPath = setting.SessionConfig.ProviderConfig
var excludes []string
if setting.Cfg.Section("session").Key("PROVIDER").Value() == "file" {
var opts session.Options
if err = json.Unmarshal([]byte(setting.SessionConfig.ProviderConfig), &opts); err != nil {
return err
}
excludes = append(excludes, opts.ProviderConfig)
}
if err := zipAddDirectoryExclude(z, "data", setting.AppDataPath, sessionAbsPath); err != nil {
excludes = append(excludes, setting.RepoRootPath)
excludes = append(excludes, setting.LFS.ContentPath)
excludes = append(excludes, setting.LogRootPath)
if err := addRecursiveExclude(w, "data", setting.AppDataPath, excludes, verbose); err != nil {
fatal("Failed to include data directory: %v", err)
}
}
@ -161,32 +306,42 @@ func runDump(ctx *cli.Context) error {
if ctx.IsSet("skip-log") && ctx.Bool("skip-log") {
log.Info("Skip dumping log files")
} else if com.IsExist(setting.LogRootPath) {
if err := z.AddDir("log", setting.LogRootPath); err != nil {
if err := addRecursive(w, "log", setting.LogRootPath, verbose); err != nil {
fatal("Failed to include log: %v", err)
}
}
if err = z.Close(); err != nil {
_ = os.Remove(fileName)
fatal("Failed to save %s: %v", fileName, err)
}
if fileName != "-" {
if err = w.Close(); err != nil {
_ = os.Remove(fileName)
fatal("Failed to save %s: %v", fileName, err)
}
if err := os.Chmod(fileName, 0600); err != nil {
log.Info("Can't change file access permissions mask to 0600: %v", err)
if err := os.Chmod(fileName, 0600); err != nil {
log.Info("Can't change file access permissions mask to 0600: %v", err)
}
}
log.Info("Removing tmp work dir: %s", tmpWorkDir)
if err := os.RemoveAll(tmpWorkDir); err != nil {
fatal("Failed to remove %s: %v", tmpWorkDir, err)
if fileName != "-" {
log.Info("Finish dumping in file %s", fileName)
} else {
log.Info("Finish dumping to stdout")
}
log.Info("Finish dumping in file %s", fileName)
return nil
}
// zipAddDirectoryExclude zips absPath to specified zipPath inside z excluding excludeAbsPath
func zipAddDirectoryExclude(zip *zip.ZipArchive, zipPath, absPath string, excludeAbsPath string) error {
func contains(slice []string, s string) bool {
for _, v := range slice {
if v == s {
return true
}
}
return false
}
// addRecursiveExclude zips absPath to specified insidePath inside writer excluding excludeAbsPath
func addRecursiveExclude(w archiver.Writer, insidePath, absPath string, excludeAbsPath []string, verbose bool) error {
absPath, err := filepath.Abs(absPath)
if err != nil {
return err
@ -197,24 +352,24 @@ func zipAddDirectoryExclude(zip *zip.ZipArchive, zipPath, absPath string, exclud
}
defer dir.Close()
zip.AddEmptyDir(zipPath)
files, err := dir.Readdir(0)
if err != nil {
return err
}
for _, file := range files {
currentAbsPath := path.Join(absPath, file.Name())
currentZipPath := path.Join(zipPath, file.Name())
currentInsidePath := path.Join(insidePath, file.Name())
if file.IsDir() {
if currentAbsPath != excludeAbsPath {
if err = zipAddDirectoryExclude(zip, currentZipPath, currentAbsPath, excludeAbsPath); err != nil {
if !contains(excludeAbsPath, currentAbsPath) {
if err := addFile(w, currentInsidePath, currentAbsPath, false); err != nil {
return err
}
if err = addRecursiveExclude(w, currentInsidePath, currentAbsPath, excludeAbsPath, verbose); err != nil {
return err
}
}
} else {
if err = zip.AddFile(currentZipPath, currentAbsPath); err != nil {
if err = addFile(w, currentInsidePath, currentAbsPath, verbose); err != nil {
return err
}
}

@ -71,6 +71,7 @@ require (
github.com/mcuadros/go-version v0.0.0-20190308113854-92cdf37c5b75
github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81
github.com/mgechev/revive v1.0.2
github.com/mholt/archiver/v3 v3.3.0
github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912
github.com/mitchellh/go-homedir v1.1.0
github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc
@ -93,7 +94,6 @@ require (
github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect
github.com/tinylib/msgp v1.1.2 // indirect
github.com/tstranex/u2f v1.0.0
github.com/unknwon/cae v1.0.0
github.com/unknwon/com v1.0.1
github.com/unknwon/i18n v0.0.0-20190805065654-5c6446a380b6
github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141

@ -65,6 +65,8 @@ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBb
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6 h1:bZ28Hqta7TFAK3Q08CMvv8y3/8ATaEqv2nGoc6yff6c=
github.com/andybalholm/brotli v0.0.0-20190621154722-5f990b63d2d6/go.mod h1:+lx6/Aqd1kLJ1GQfkvOnaZ1WGmLpMpbprPuIOOZX30U=
github.com/andybalholm/cascadia v1.0.0 h1:hOCXnnZ5A+3eVDX8pvgl4kofXv2ELss0bKcqRySc45o=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
@ -154,6 +156,9 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
@ -286,6 +291,8 @@ github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14 h1:yXtpJr/LV6PFu4nTLgfjQ
github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14/go.mod h1:jPoNZLWDAqA5N3G5amEoiNbhVrmM+ZQEcnQvNQ2KaZk=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721 h1:KRMr9A3qfbVM7iV/WcLY/rL5LICqwMHLhwRXKu99fXw=
github.com/golang/gddo v0.0.0-20190419222130-af0f2af80721/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -405,10 +412,14 @@ github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4 h1:cTxwSmnaqLoo+
github.com/keybase/go-crypto v0.0.0-20200123153347-de78d2cb44f4/go.mod h1:ghbZscTyKdM07+Fw3KSi0hcJm+AlEUWj8QLlPtijN/M=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY=
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0=
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@ -464,6 +475,8 @@ github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc8
github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg=
github.com/mgechev/revive v1.0.2 h1:v0NxxQ7fSFz/u1NQydPo6EGdq7va0J1BtsZmae6kzUg=
github.com/mgechev/revive v1.0.2/go.mod h1:rb0dQy1LVAxW9SWy5R3LPUjevzUbUS316U5MFySA2lo=
github.com/mholt/archiver/v3 v3.3.0 h1:vWjhY8SQp5yzM9P6OJ/eZEkmi3UAbRrxCq48MxjAzig=
github.com/mholt/archiver/v3 v3.3.0/go.mod h1:YnQtqsp+94Rwd0D/rk5cnLrxusUBUXg+08Ebtr1Mqao=
github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912 h1:hJde9rA24hlTcAYSwJoXpDUyGtfKQ/jsofw+WaDqGrI=
github.com/microcosm-cc/bluemonday v1.0.3-0.20191119130333-0a75d7616912/go.mod h1:8iwZnFn2CDDNZ0r6UXhF4xawGvzaqzCRa1n3/lO3W2w=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@ -489,6 +502,8 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWb
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/niklasfasching/go-org v0.1.9 h1:Toz8WMIt+qJb52uYEk1YD/muLuOOmRt1CfkV+bKVMkI=
github.com/niklasfasching/go-org v0.1.9/go.mod h1:AsLD6X7djzRIz4/RFZu8vwRL0VGjUvGZCCH1Nz0VdrU=
github.com/nwaples/rardecode v1.0.0 h1:r7vGuS5akxOnR4JQSkko62RJ1ReCMXxQRPtxsiFMBOs=
github.com/nwaples/rardecode v1.0.0/go.mod h1:5DzqNKiOdpKKBH87u8VlvAnPZMXcGRhxWkRpHbbfGS0=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn+Ejf/w8=
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
@ -511,6 +526,7 @@ github.com/pelletier/go-toml v1.4.0 h1:u3Z1r+oOXJIkxqw34zVhyPgjBsm6X2wn21NWs/HfS
github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ=
github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
@ -622,8 +638,8 @@ github.com/tstranex/u2f v1.0.0 h1:HhJkSzDDlVSVIVt7pDJwCHQj67k7A5EeBgPmeD+pVsQ=
github.com/tstranex/u2f v1.0.0/go.mod h1:eahSLaqAS0zsIEv80+vXT7WanXs7MQQDg3j3wGBSayo=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/unknwon/cae v1.0.0 h1:i39lOFaBXZxhGjQOy/RNbi8uzettCs6OQxpR0xXohGU=
github.com/unknwon/cae v1.0.0/go.mod h1:QaSeRctcea9fK6piJpAMCCPKxzJ01+xFcr2k1m3WRPU=
github.com/ulikunitz/xz v0.5.6 h1:jGHAfXawEGZQ3blwU5wnWKQJvAraT7Ftq9EXjnXYgt8=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM=
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM=
github.com/unknwon/com v1.0.1 h1:3d1LTxD+Lnf3soQiD4Cp/0BRB+Rsa/+RTvz8GMMzIXs=
@ -640,6 +656,8 @@ github.com/xanzy/go-gitlab v0.31.0 h1:+nHztQuCXGSMluKe5Q9IRaPdz6tO8O0gMkQ0vqGpiB
github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yohcop/openid-go v1.0.0 h1:EciJ7ZLETHR3wOtxBvKXx9RV6eyHZpCaSZ1inbBaUXE=

@ -29,7 +29,6 @@ import (
shellquote "github.com/kballard/go-shellquote"
version "github.com/mcuadros/go-version"
"github.com/unknwon/cae/zip"
"github.com/unknwon/com"
ini "gopkg.in/ini.v1"
"strk.kbt.io/projects/go/libravatar"
@ -1032,8 +1031,6 @@ func NewContext() {
U2F.TrustedFacets, _ = shellquote.Split(sec.Key("TRUSTED_FACETS").MustString(strings.TrimRight(AppURL, "/")))
U2F.AppID = sec.Key("APP_ID").MustString(strings.TrimRight(AppURL, "/"))
zip.Verbose = false
UI.ReactionsMap = make(map[string]bool)
for _, reaction := range UI.Reactions {
UI.ReactionsMap[reaction] = true

@ -0,0 +1,19 @@
Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

@ -0,0 +1,5 @@
This package is a brotli compressor and decompressor implemented in Go.
It was translated from the reference implementation (https://github.com/google/brotli)
with the `c2go` tool at https://github.com/andybalholm/c2go.
I am using it in production with https://github.com/andybalholm/redwood.

@ -0,0 +1,177 @@
package brotli
/* Copyright 2013 Google Inc. All Rights Reserved.
Distributed under MIT license.
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
/* Function to find backward reference copies. */
func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint {
if distance <= max_distance {
var distance_plus_3 uint = distance + 3
var offset0 uint = distance_plus_3 - uint(dist_cache[0])
var offset1 uint = distance_plus_3 - uint(dist_cache[1])
if distance == uint(dist_cache[0]) {
return 0
} else if distance == uint(dist_cache[1]) {
return 1
} else if offset0 < 7 {
return (0x9750468 >> (4 * offset0)) & 0xF
} else if offset1 < 7 {
return (0xFDB1ACE >> (4 * offset1)) & 0xF
} else if distance == uint(dist_cache[2]) {
return 2
} else if distance == uint(dist_cache[3]) {
return 3
}
}
return distance + numDistanceShortCodes - 1
}
/* "commands" points to the next output command to write to, "*num_commands" is
initially the total amount of commands output by previous
CreateBackwardReferences calls, and must be incremented by the amount written
by this call. */
func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands []command, num_commands *uint, num_literals *uint) {
var max_backward_limit uint = maxBackwardLimit(params.lgwin)
var orig_commands []command = commands
var insert_length uint = *last_insert_len
var pos_end uint = position + num_bytes
var store_end uint
if num_bytes >= hasher.StoreLookahead() {
store_end = position + num_bytes - hasher.StoreLookahead() + 1
} else {
store_end = position
}
var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params)
var apply_random_heuristics uint = position + random_heuristics_window_size
var gap uint = 0
/* Set maximum distance, see section 9.1. of the spec. */
const kMinScore uint = scoreBase + 100
/* For speed up heuristics for random data. */
/* Minimum score to accept a backward reference. */
hasher.PrepareDistanceCache(dist_cache)
var sr2 hasherSearchResult
var sr hasherSearchResult
for position+hasher.HashTypeLength() < pos_end {
var max_length uint = pos_end - position
var max_distance uint = brotli_min_size_t(position, max_backward_limit)
sr.len = 0
sr.len_code_delta = 0
sr.distance = 0
sr.score = kMinScore
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, &sr)
if sr.score > kMinScore {
/* Found a match. Let's look for something even better ahead. */
var delayed_backward_references_in_row int = 0
max_length--
for ; ; max_length-- {
var cost_diff_lazy uint = 175
if params.quality < minQualityForExtensiveReferenceSearch {
sr2.len = brotli_min_size_t(sr.len-1, max_length)
} else {
sr2.len = 0
}
sr2.len_code_delta = 0
sr2.distance = 0
sr2.score = kMinScore
max_distance = brotli_min_size_t(position+1, max_backward_limit)
hasher.FindLongestMatch(&params.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, &sr2)
if sr2.score >= sr.score+cost_diff_lazy {
/* Ok, let's just write one byte for now and start a match from the
next byte. */
position++
insert_length++
sr = sr2
delayed_backward_references_in_row++
if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end {
continue
}
}
break
}
apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size
max_distance = brotli_min_size_t(position, max_backward_limit)
{
/* The first 16 codes are special short-codes,
and the minimum offset is 1. */
var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache)
if (sr.distance <= (max_distance + gap)) && distance_code > 0 {
dist_cache[3] = dist_cache[2]
dist_cache[2] = dist_cache[1]
dist_cache[1] = dist_cache[0]
dist_cache[0] = int(sr.distance)
hasher.PrepareDistanceCache(dist_cache)
}
initCommand(&commands[0], &params.dist, insert_length, sr.len, sr.len_code_delta, distance_code)
commands = commands[1:]
}
*num_literals += insert_length
insert_length = 0
/* Put the hash keys into the table, if there are enough bytes left.
Depending on the hasher implementation, it can push all positions
in the given range or only a subset of them.
Avoid hash poisoning with RLE data. */
{
var range_start uint = position + 2
var range_end uint = brotli_min_size_t(position+sr.len, store_end)
if sr.distance < sr.len>>2 {
range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2)))
}
hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end)
}
position += sr.len
} else {
insert_length++
position++
/* If we have not seen matches for a long time, we can skip some
match lookups. Unsuccessful match lookups are very very expensive
and this kind of a heuristic speeds up compression quite
a lot. */
if position > apply_random_heuristics {
/* Going through uncompressible data, jump. */
if position > apply_random_heuristics+4*random_heuristics_window_size {
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4)
/* It is quite a long time since we saw a copy, so we assume
that this data is not compressible, and store hashes less
often. Hashes of non compressible data are less likely to
turn out to be useful in the future, too, so we store less of
them to not to flood out the hash table of good compressible
data. */
var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin)
for ; position < pos_jump; position += 4 {
hasher.Store(ringbuffer, ringbuffer_mask, position)
insert_length += 4
}
} else {
var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2)
var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin)
for ; position < pos_jump; position += 2 {
hasher.Store(ringbuffer, ringbuffer_mask, position)
insert_length += 2
}
}
}
}
}
insert_length += pos_end - position
*last_insert_len = insert_length
*num_commands += uint(-cap(commands) + cap(orig_commands))
}

@ -0,0 +1,795 @@
package brotli
import "math"
type zopfliNode struct {
length uint32
distance uint32
dcode_insert_length uint32
u struct {
cost float32
next uint32
shortcut uint32
}
}
const maxEffectiveDistanceAlphabetSize = 544
const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */
var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1}
var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3}
func initZopfliNodes(array []zopfliNode, length uint) {
var stub zopfliNode
var i uint
stub.length = 1
stub.distance = 0
stub.dcode_insert_length = 0
stub.u.cost = kInfinity
for i = 0; i < length; i++ {
array[i] = stub
}
}
func zopfliNodeCopyLength(self *zopfliNode) uint32 {
return self.length & 0x1FFFFFF
}
func zopfliNodeLengthCode(self *zopfliNode) uint32 {
var modifier uint32 = self.length >> 25
return zopfliNodeCopyLength(self) + 9 - modifier
}
func zopfliNodeCopyDistance(self *zopfliNode) uint32 {
return self.distance
}
func zopfliNodeDistanceCode(self *zopfliNode) uint32 {
var short_code uint32 = self.dcode_insert_length >> 27
if short_code == 0 {
return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1
} else {
return short_code - 1
}
}
func zopfliNodeCommandLength(self *zopfliNode) uint32 {
return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF)
}
/* Histogram based cost model for zopflification. */
type zopfliCostModel struct {
cost_cmd_ [numCommandSymbols]float32
cost_dist_ []float32
distance_histogram_size uint32
literal_costs_ []float32
min_cost_cmd_ float32
num_bytes_ uint
}
func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) {
var distance_histogram_size uint32 = dist.alphabet_size
if distance_histogram_size > maxEffectiveDistanceAlphabetSize {
distance_histogram_size = maxEffectiveDistanceAlphabetSize
}
self.num_bytes_ = num_bytes
self.literal_costs_ = make([]float32, (num_bytes + 2))
self.cost_dist_ = make([]float32, (dist.alphabet_size))
self.distance_histogram_size = distance_histogram_size
}
func cleanupZopfliCostModel(self *zopfliCostModel) {
self.literal_costs_ = nil
self.cost_dist_ = nil
}
func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) {
var sum uint = 0
var missing_symbol_sum uint
var log2sum float32
var missing_symbol_cost float32
var i uint
for i = 0; i < histogram_size; i++ {
sum += uint(histogram[i])
}
log2sum = float32(fastLog2(sum))
missing_symbol_sum = sum
if !literal_histogram {
for i = 0; i < histogram_size; i++ {
if histogram[i] == 0 {
missing_symbol_sum++
}
}
}
missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2
for i = 0; i < histogram_size; i++ {
if histogram[i] == 0 {
cost[i] = missing_symbol_cost
continue
}
/* Shannon bits for this symbol. */
cost[i] = log2sum - float32(fastLog2(uint(histogram[i])))
/* Cannot be coded with less than 1 bit */
if cost[i] < 1 {
cost[i] = 1
}
}
}
func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, num_commands uint, last_insert_len uint) {
var histogram_literal [numLiteralSymbols]uint32
var histogram_cmd [numCommandSymbols]uint32
var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32
var cost_literal [numLiteralSymbols]float32
var pos uint = position - last_insert_len
var min_cost_cmd float32 = kInfinity
var i uint
var cost_cmd []float32 = self.cost_cmd_[:]
var literal_costs []float32
histogram_literal = [numLiteralSymbols]uint32{}
histogram_cmd = [numCommandSymbols]uint32{}
histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{}
for i = 0; i < num_commands; i++ {
var inslength uint = uint(commands[i].insert_len_)
var copylength uint = uint(commandCopyLen(&commands[i]))
var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF
var cmdcode uint = uint(commands[i].cmd_prefix_)
var j uint
histogram_cmd[cmdcode]++
if cmdcode >= 128 {
histogram_dist[distcode]++
}
for j = 0; j < inslength; j++ {
histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++
}
pos += inslength + copylength
}
setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:])
setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd)
setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_)
for i = 0; i < numCommandSymbols; i++ {
min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i])
}
self.min_cost_cmd_ = min_cost_cmd
{
literal_costs = self.literal_costs_
var literal_carry float32 = 0.0
var num_bytes uint = self.num_bytes_
literal_costs[0] = 0.0
for i = 0; i < num_bytes; i++ {
literal_carry += cost_literal[ringbuffer[(position+i)&ringbuffer_mask]]
literal_costs[i+1] = literal_costs[i] + literal_carry
literal_carry -= literal_costs[i+1] - literal_costs[i]
}
}
}
func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) {
var literal_costs []float32 = self.literal_costs_
var literal_carry float32 = 0.0
var cost_dist []float32 = self.cost_dist_
var cost_cmd []float32 = self.cost_cmd_[:]
var num_bytes uint = self.num_bytes_
var i uint
estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:])
literal_costs[0] = 0.0
for i = 0; i < num_bytes; i++ {
literal_carry += literal_costs[i+1]
literal_costs[i+1] = literal_costs[i] + literal_carry
literal_carry -= literal_costs[i+1] - literal_costs[i]
}
for i = 0; i < numCommandSymbols; i++ {
cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i))))
}
for i = 0; uint32(i) < self.distance_histogram_size; i++ {
cost_dist[i] = float32(fastLog2(uint(20 + uint32(i))))
}
self.min_cost_cmd_ = float32(fastLog2(11))
}
func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 {
return self.cost_cmd_[cmdcode]
}
func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 {
return self.cost_dist_[distcode]
}
func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 {
return self.literal_costs_[to] - self.literal_costs_[from]
}
func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 {
return self.min_cost_cmd_
}
/* REQUIRES: len >= 2, start_pos <= pos */
/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */
/* Maintains the "ZopfliNode array invariant". */
func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) {
var next *zopfliNode = &nodes[pos+len]
next.length = uint32(len | (len+9-len_code)<<25)
next.distance = uint32(dist)
next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos))
next.u.cost = cost
}
type posData struct {
pos uint
distance_cache [4]int
costdiff float32
cost float32
}
/* Maintains the smallest 8 cost difference together with their positions */
type startPosQueue struct {
q_ [8]posData
idx_ uint
}
func initStartPosQueue(self *startPosQueue) {
self.idx_ = 0
}
func startPosQueueSize(self *startPosQueue) uint {
return brotli_min_size_t(self.idx_, 8)
}
func startPosQueuePush(self *startPosQueue, posdata *posData) {
var offset uint = ^(self.idx_) & 7
self.idx_++
var len uint = startPosQueueSize(self)
var i uint
var q []posData = self.q_[:]
q[offset] = *posdata
/* Restore the sorted order. In the list of |len| items at most |len - 1|
adjacent element comparisons / swaps are required. */
for i = 1; i < len; i++ {
if q[offset&7].costdiff > q[(offset+1)&7].costdiff {
var tmp posData = q[offset&7]
q[offset&7] = q[(offset+1)&7]
q[(offset+1)&7] = tmp
}
offset++
}
}
func startPosQueueAt(self *startPosQueue, k uint) *posData {
return &self.q_[(k-self.idx_)&7]
}
/* Returns the minimum possible copy length that can improve the cost of any */
/* future position. */
func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint {
var min_cost float32 = start_cost
var len uint = 2
var next_len_bucket uint = 4
/* Compute the minimum possible cost of reaching any future position. */
var next_len_offset uint = 10
for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost {
/* We already reached (pos + len) with no more cost than the minimum
possible cost of reaching anything from this pos, so there is no point in
looking for lengths <= len. */
len++
if len == next_len_offset {
/* We reached the next copy length code bucket, so we add one more
extra bit to the minimum cost. */
min_cost += 1.0
next_len_offset += next_len_bucket
next_len_bucket *= 2
}
}
return uint(len)
}
/* REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 {
var clen uint = uint(zopfliNodeCopyLength(&nodes[pos]))
var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF)
var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos]))
/* Since |block_start + pos| is the end position of the command, the copy part
starts from |block_start + pos - clen|. Distances that are greater than
this or greater than |max_backward_limit| + |gap| are static dictionary
references, and do not update the last distances.
Also distance code 0 (last distance) does not update the last distances. */
if pos == 0 {
return 0
} else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 {
return uint32(pos)
} else {
return nodes[pos-clen-ilen].u.shortcut
}
}
/* Fills in dist_cache[0..3] with the last four distances (as defined by
Section 4. of the Spec) that would be used at (block_start + pos) if we
used the shortest path of commands from block_start, computed from
nodes[0..pos]. The last four distances at block_start are in
starting_dist_cache[0..3].
REQUIRES: nodes[pos].cost < kInfinity
REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */
func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) {
var idx int = 0
var p uint = uint(nodes[pos].u.shortcut)
for idx < 4 && p > 0 {
var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF)
var clen uint = uint(zopfliNodeCopyLength(&nodes[p]))
var dist uint = uint(zopfliNodeCopyDistance(&nodes[p]))
dist_cache[idx] = int(dist)
idx++
/* Because of prerequisite, p >= clen + ilen >= 2. */
p = uint(nodes[p-clen-ilen].u.shortcut)
}
for ; idx < 4; idx++ {
dist_cache[idx] = starting_dist_cache[0]
starting_dist_cache = starting_dist_cache[1:]
}
}
/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it
is eligible. */
func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) {
/* Save cost, because ComputeDistanceCache invalidates it. */
var node_cost float32 = nodes[pos].u.cost
nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes)
if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) {
var posdata posData
posdata.pos = pos
posdata.cost = node_cost
posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos)
computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:])
startPosQueuePush(queue, &posdata)
}
}
/* Returns longest copy length. */
func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint {
var cur_ix uint = block_start + pos
var cur_ix_masked uint = cur_ix & ringbuffer_mask
var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit)
var max_len uint = num_bytes - pos
var max_zopfli_len uint = maxZopfliLen(params)
var max_iters uint = maxZopfliCandidates(params)
var min_len uint
var result uint = 0
var k uint
var gap uint = 0
evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes)
{
var posdata *posData = startPosQueueAt(queue, 0)
var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos))
min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos)
}
/* Go over the command starting positions in order of increasing cost
difference. */
for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ {
var posdata *posData = startPosQueueAt(queue, k)
var start uint = posdata.pos
var inscode uint16 = getInsertLengthCode(pos - start)
var start_costdiff float32 = posdata.costdiff
var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos)
var best_len uint = min_len - 1
var j uint = 0
/* Look for last distance matches using the distance cache from this
starting position. */
for ; j < numDistanceShortCodes && best_len < max_len; j++ {
var idx uint = uint(kDistanceCacheIndex[j])
var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j])
var prev_ix uint = cur_ix - backward
var len uint = 0
var continuation byte = ringbuffer[cur_ix_masked+best_len]
if cur_ix_masked+best_len > ringbuffer_mask {
break
}
if backward > max_distance+gap {
/* Word dictionary -> ignore. */
continue
}
if backward <= max_distance {
/* Regular backward reference. */
if prev_ix >= cur_ix {
continue
}
prev_ix &= ringbuffer_mask
if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] {
continue
}
len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len)
} else {
continue
}
{
var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j)
var l uint
for l = best_len + 1; l <= len; l++ {
var copycode uint16 = getCopyLengthCode(l)
var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0)
var tmp float32
if cmdcode < 128 {
tmp = base_cost
} else {
tmp = dist_cost
}
var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
if cost < nodes[pos+l].u.cost {
updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost)
result = brotli_max_size_t(result, l)
}
best_len = l
}
}
}
/* At higher iterations look only for new last distance matches, since
looking only for new command start positions with the same distances
does not help much. */
if k >= 2 {
continue
}
{
/* Loop through all possible copy lengths at this position. */
var len uint = min_len
for j = 0; j < num_matches; j++ {
var match backwardMatch = matches[j]
var dist uint = uint(match.distance)
var is_dictionary_match bool = (dist > max_distance+gap)
var dist_code uint = dist + numDistanceShortCodes - 1
var dist_symbol uint16
var distextra uint32
var distnumextra uint32
var dist_cost float32
var max_match_len uint
/* We already tried all possible last distance matches, so we can use
normal distance code here. */
prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra)
distnumextra = uint32(dist_symbol) >> 10
dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF)
/* Try all copy lengths up until the maximum copy length corresponding
to this distance. If the distance refers to the static dictionary, or
the maximum length is long enough, try only one maximum length. */
max_match_len = backwardMatchLength(&match)
if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) {
len = max_match_len
}
for ; len <= max_match_len; len++ {
var len_code uint
if is_dictionary_match {
len_code = backwardMatchLengthCode(&match)
} else {
len_code = len
}
var copycode uint16 = getCopyLengthCode(len_code)
var cmdcode uint16 = combineLengthCodes(inscode, copycode, false)
var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode)
if cost < nodes[pos+len].u.cost {
updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost)
result = brotli_max_size_t(result, uint(len))
}
}
}
}
}