mirror of https://github.com/go-gitea/gitea.git
Move modules/gzip to gitea.com/macaron/gzip (#9058)
* Move modules/gzip to gitea.com/macaron/gzip * Fix vendorpull/9059/head
parent
ba4e8f221b
commit
9ff6312627
@ -1,131 +0,0 @@ |
||||
// Copyright 2019 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gzip |
||||
|
||||
import ( |
||||
"archive/zip" |
||||
"bytes" |
||||
"io/ioutil" |
||||
"net/http" |
||||
"net/http/httptest" |
||||
"testing" |
||||
|
||||
"gitea.com/macaron/macaron" |
||||
gzipp "github.com/klauspost/compress/gzip" |
||||
"github.com/stretchr/testify/assert" |
||||
) |
||||
|
||||
func setup(sampleResponse []byte) (*macaron.Macaron, *[]byte) { |
||||
m := macaron.New() |
||||
m.Use(Middleware()) |
||||
m.Get("/", func() *[]byte { return &sampleResponse }) |
||||
return m, &sampleResponse |
||||
} |
||||
|
||||
func reqNoAcceptGzip(t *testing.T, m *macaron.Macaron, sampleResponse *[]byte) { |
||||
// Request without accept gzip: Should not gzip
|
||||
resp := httptest.NewRecorder() |
||||
req, err := http.NewRequest("GET", "/", nil) |
||||
assert.NoError(t, err) |
||||
m.ServeHTTP(resp, req) |
||||
|
||||
_, ok := resp.HeaderMap[contentEncodingHeader] |
||||
assert.False(t, ok) |
||||
|
||||
contentEncoding := resp.Header().Get(contentEncodingHeader) |
||||
assert.NotContains(t, contentEncoding, "gzip") |
||||
|
||||
result := resp.Body.Bytes() |
||||
assert.Equal(t, *sampleResponse, result) |
||||
} |
||||
|
||||
func reqAcceptGzip(t *testing.T, m *macaron.Macaron, sampleResponse *[]byte, expectGzip bool) { |
||||
// Request without accept gzip: Should not gzip
|
||||
resp := httptest.NewRecorder() |
||||
req, err := http.NewRequest("GET", "/", nil) |
||||
assert.NoError(t, err) |
||||
req.Header.Set(acceptEncodingHeader, "gzip") |
||||
m.ServeHTTP(resp, req) |
||||
|
||||
_, ok := resp.HeaderMap[contentEncodingHeader] |
||||
assert.Equal(t, ok, expectGzip) |
||||
|
||||
contentEncoding := resp.Header().Get(contentEncodingHeader) |
||||
if expectGzip { |
||||
assert.Contains(t, contentEncoding, "gzip") |
||||
gzippReader, err := gzipp.NewReader(resp.Body) |
||||
assert.NoError(t, err) |
||||
result, err := ioutil.ReadAll(gzippReader) |
||||
assert.NoError(t, err) |
||||
assert.Equal(t, *sampleResponse, result) |
||||
} else { |
||||
assert.NotContains(t, contentEncoding, "gzip") |
||||
result := resp.Body.Bytes() |
||||
assert.Equal(t, *sampleResponse, result) |
||||
} |
||||
} |
||||
|
||||
func TestMiddlewareSmall(t *testing.T) { |
||||
m, sampleResponse := setup([]byte("Small response")) |
||||
|
||||
reqNoAcceptGzip(t, m, sampleResponse) |
||||
|
||||
reqAcceptGzip(t, m, sampleResponse, false) |
||||
} |
||||
|
||||
func TestMiddlewareLarge(t *testing.T) { |
||||
b := make([]byte, MinSize+1) |
||||
for i := range b { |
||||
b[i] = byte(i % 256) |
||||
} |
||||
m, sampleResponse := setup(b) |
||||
|
||||
reqNoAcceptGzip(t, m, sampleResponse) |
||||
|
||||
// This should be gzipped as we accept gzip
|
||||
reqAcceptGzip(t, m, sampleResponse, true) |
||||
} |
||||
|
||||
func TestMiddlewareGzip(t *testing.T) { |
||||
b := make([]byte, MinSize*10) |
||||
for i := range b { |
||||
b[i] = byte(i % 256) |
||||
} |
||||
outputBuffer := bytes.NewBuffer([]byte{}) |
||||
gzippWriter := gzipp.NewWriter(outputBuffer) |
||||
gzippWriter.Write(b) |
||||
gzippWriter.Flush() |
||||
gzippWriter.Close() |
||||
output := outputBuffer.Bytes() |
||||
|
||||
m, sampleResponse := setup(output) |
||||
|
||||
reqNoAcceptGzip(t, m, sampleResponse) |
||||
|
||||
// This should not be gzipped even though we accept gzip
|
||||
reqAcceptGzip(t, m, sampleResponse, false) |
||||
} |
||||
|
||||
func TestMiddlewareZip(t *testing.T) { |
||||
b := make([]byte, MinSize*10) |
||||
for i := range b { |
||||
b[i] = byte(i % 256) |
||||
} |
||||
outputBuffer := bytes.NewBuffer([]byte{}) |
||||
zipWriter := zip.NewWriter(outputBuffer) |
||||
fileWriter, err := zipWriter.Create("default") |
||||
assert.NoError(t, err) |
||||
fileWriter.Write(b) |
||||
//fileWriter.Close()
|
||||
zipWriter.Close() |
||||
output := outputBuffer.Bytes() |
||||
|
||||
m, sampleResponse := setup(output) |
||||
|
||||
reqNoAcceptGzip(t, m, sampleResponse) |
||||
|
||||
// This should not be gzipped even though we accept gzip
|
||||
reqAcceptGzip(t, m, sampleResponse, false) |
||||
} |
@ -0,0 +1,9 @@ |
||||
module gitea.com/macaron/gzip |
||||
|
||||
go 1.12 |
||||
|
||||
require ( |
||||
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb |
||||
github.com/klauspost/compress v1.9.2 |
||||
github.com/stretchr/testify v1.4.0 |
||||
) |
@ -0,0 +1,42 @@ |
||||
gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591 h1:UbCTjPcLrNxR9LzKDjQBMT2zoxZuEnca1pZCpgeMuhQ= |
||||
gitea.com/macaron/inject v0.0.0-20190803172902-8375ba841591/go.mod h1:h6E4kLao1Yko6DOU6QDnQPcuoNzvbZqzj2mtPcEn1aM= |
||||
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb h1:amL0md6orTj1tXY16ANzVU9FmzQB+W7aJwp8pVDbrmA= |
||||
gitea.com/macaron/macaron v1.3.3-0.20190821202302-9646c0587edb/go.mod h1:0coI+mSPSwbsyAbOuFllVS38awuk9mevhLD52l50Gjs= |
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= |
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= |
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= |
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= |
||||
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= |
||||
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= |
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= |
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= |
||||
github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCWIWY= |
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= |
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= |
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= |
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= |
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY= |
||||
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= |
||||
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= |
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 h1:WN9BUFbdyOsSH/XohnWpXOlq9NBD5sGAB2FciQMUEe8= |
||||
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= |
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= |
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= |
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= |
||||
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e h1:GSGeB9EAKY2spCABz6xOX5DbxZEXolK+nBSvmsQwRjM= |
||||
github.com/unknwon/com v0.0.0-20190804042917-757f69c95f3e/go.mod h1:tOOxU81rwgoCLoOVVPHb6T/wt8HZygqH5id+GNnlCXM= |
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= |
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= |
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= |
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= |
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= |
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= |
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= |
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= |
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= |
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= |
||||
gopkg.in/ini.v1 v1.44.0 h1:YRJzTUp0kSYWUVFF5XAbDFfyiqwsl0Vb9R8TVP5eRi0= |
||||
gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= |
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= |
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= |
@ -1,32 +0,0 @@ |
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate |
||||
|
||||
// forwardCopy is like the built-in copy function except that it always goes
|
||||
// forward from the start, even if the dst and src overlap.
|
||||
// It is equivalent to:
|
||||
// for i := 0; i < n; i++ {
|
||||
// mem[dst+i] = mem[src+i]
|
||||
// }
|
||||
func forwardCopy(mem []byte, dst, src, n int) { |
||||
if dst <= src { |
||||
copy(mem[dst:dst+n], mem[src:src+n]) |
||||
return |
||||
} |
||||
for { |
||||
if dst >= src+n { |
||||
copy(mem[dst:dst+n], mem[src:src+n]) |
||||
return |
||||
} |
||||
// There is some forward overlap. The destination
|
||||
// will be filled with a repeated pattern of mem[src:src+k].
|
||||
// We copy one instance of the pattern here, then repeat.
|
||||
// Each time around this loop k will double.
|
||||
k := dst - src |
||||
copy(mem[dst:dst+k], mem[src:src+k]) |
||||
n -= k |
||||
dst += k |
||||
} |
||||
} |
@ -1,41 +0,0 @@ |
||||
//+build !noasm
|
||||
//+build !appengine
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate |
||||
|
||||
import ( |
||||
"github.com/klauspost/cpuid" |
||||
) |
||||
|
||||
// crc32sse returns a hash for the first 4 bytes of the slice
|
||||
// len(a) must be >= 4.
|
||||
//go:noescape
|
||||
func crc32sse(a []byte) uint32 |
||||
|
||||
// crc32sseAll calculates hashes for each 4-byte set in a.
|
||||
// dst must be east len(a) - 4 in size.
|
||||
// The size is not checked by the assembly.
|
||||
//go:noescape
|
||||
func crc32sseAll(a []byte, dst []uint32) |
||||
|
||||
// matchLenSSE4 returns the number of matching bytes in a and b
|
||||
// up to length 'max'. Both slices must be at least 'max'
|
||||
// bytes in size.
|
||||
//
|
||||
// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions.
|
||||
//
|
||||
//go:noescape
|
||||
func matchLenSSE4(a, b []byte, max int) int |
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
// h must be at least 256 entries in length,
|
||||
// and must be cleared before calling this function.
|
||||
//go:noescape
|
||||
func histogram(b []byte, h []int32) |
||||
|
||||
// Detect SSE 4.2 feature.
|
||||
func init() { |
||||
useSSE42 = cpuid.CPU.SSE42() |
||||
} |
@ -1,213 +0,0 @@ |
||||
//+build !noasm |
||||
//+build !appengine |
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details. |
||||
|
||||
// func crc32sse(a []byte) uint32 |
||||
TEXT ยทcrc32sse(SB), 4, $0 |
||||
MOVQ a+0(FP), R10 |
||||
XORQ BX, BX |
||||
|
||||
// CRC32 dword (R10), EBX |
||||
BYTE $0xF2; BYTE $0x41; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0x1a
|
||||
|
||||
MOVL BX, ret+24(FP) |
||||
RET |
||||
|
||||
// func crc32sseAll(a []byte, dst []uint32) |
||||
TEXT ยทcrc32sseAll(SB), 4, $0 |
||||
MOVQ a+0(FP), R8 // R8: src |
||||
MOVQ a_len+8(FP), R10 // input length |
||||
MOVQ dst+24(FP), R9 // R9: dst |
||||
SUBQ $4, R10 |
||||
JS end |
||||
JZ one_crc |
||||
MOVQ R10, R13 |
||||
SHRQ $2, R10 // len/4 |
||||
ANDQ $3, R13 // len&3 |
||||
XORQ BX, BX |
||||
ADDQ $1, R13 |
||||
TESTQ R10, R10 |
||||
JZ rem_loop |
||||
|
||||
crc_loop: |
||||
MOVQ (R8), R11 |
||||
XORQ BX, BX |
||||
XORQ DX, DX |
||||
XORQ DI, DI |
||||
MOVQ R11, R12 |
||||
SHRQ $8, R11 |
||||
MOVQ R12, AX |
||||
MOVQ R11, CX |
||||
SHRQ $16, R12 |
||||
SHRQ $16, R11 |
||||
MOVQ R12, SI |
||||
|
||||
// CRC32 EAX, EBX |
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
// CRC32 ECX, EDX |
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd1
|
||||
|
||||
// CRC32 ESI, EDI |
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xfe
|
||||
MOVL BX, (R9) |
||||
MOVL DX, 4(R9) |
||||
MOVL DI, 8(R9) |
||||
|
||||
XORQ BX, BX |
||||
MOVL R11, AX |
||||
|
||||
// CRC32 EAX, EBX |
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
MOVL BX, 12(R9) |
||||
|
||||
ADDQ $16, R9 |
||||
ADDQ $4, R8 |
||||
XORQ BX, BX |
||||
SUBQ $1, R10 |
||||
JNZ crc_loop |
||||
|
||||
rem_loop: |
||||
MOVL (R8), AX |
||||
|
||||
// CRC32 EAX, EBX |
||||
BYTE $0xF2; BYTE $0x0f
|
||||
BYTE $0x38; BYTE $0xf1; BYTE $0xd8
|
||||
|
||||
MOVL BX, (R9) |
||||
ADDQ $4, R9 |
||||
ADDQ $1, R8 |
||||
XORQ BX, BX |
||||
SUBQ $1, R13 |
||||
JNZ rem_loop |
||||
|
||||
end: |
||||
RET |
||||
|
||||
one_crc: |
||||
MOVQ $1, R13 |
||||
XORQ BX, BX |
||||
JMP rem_loop |
||||
|
||||
// func matchLenSSE4(a, b []byte, max int) int |
||||
TEXT ยทmatchLenSSE4(SB), 4, $0 |
||||
MOVQ a_base+0(FP), SI |
||||
MOVQ b_base+24(FP), DI |
||||
MOVQ DI, DX |
||||
MOVQ max+48(FP), CX |
||||
|
||||
cmp8: |
||||
// As long as we are 8 or more bytes before the end of max, we can load and |
||||
// compare 8 bytes at a time. If those 8 bytes are equal, repeat. |
||||
CMPQ CX, $8 |
||||
JLT cmp1 |
||||
MOVQ (SI), AX |
||||
MOVQ (DI), BX |
||||
CMPQ AX, BX |
||||
JNE bsf |
||||
ADDQ $8, SI |
||||
ADDQ $8, DI |
||||
SUBQ $8, CX |
||||
JMP cmp8 |
||||
|
||||
bsf: |
||||
// If those 8 bytes were not equal, XOR the two 8 byte values, and return |
||||
// the index of the first byte that differs. The BSF instruction finds the |
||||
// least significant 1 bit, the amd64 architecture is little-endian, and |
||||
// the shift by 3 converts a bit index to a byte index. |
||||
XORQ AX, BX |
||||
BSFQ BX, BX |
||||
SHRQ $3, BX |
||||
ADDQ BX, DI |
||||
|
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return. |
||||
SUBQ DX, DI |
||||
MOVQ DI, ret+56(FP) |
||||
RET |
||||
|
||||
cmp1: |
||||
// In the slices' tail, compare 1 byte at a time. |
||||
CMPQ CX, $0 |
||||
JEQ matchLenEnd |
||||
MOVB (SI), AX |
||||
MOVB (DI), BX |
||||
CMPB AX, BX |
||||
JNE matchLenEnd |
||||
ADDQ $1, SI |
||||
ADDQ $1, DI |
||||
SUBQ $1, CX |
||||
JMP cmp1 |
||||
|
||||
matchLenEnd: |
||||
// Subtract off &b[0] to convert from &b[ret] to ret, and return. |
||||
SUBQ DX, DI |
||||
MOVQ DI, ret+56(FP) |
||||
RET |
||||
|
||||
// func histogram(b []byte, h []int32) |
||||
TEXT ยทhistogram(SB), 4, $0 |
||||
MOVQ b+0(FP), SI // SI: &b |
||||
MOVQ b_len+8(FP), R9 // R9: len(b) |
||||
MOVQ h+24(FP), DI // DI: Histogram |
||||
MOVQ R9, R8 |
||||
SHRQ $3, R8 |
||||
JZ hist1 |
||||
XORQ R11, R11 |
||||
|
||||
loop_hist8: |
||||
MOVQ (SI), R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
MOVB R10, R11 |
||||
INCL (DI)(R11*4) |
||||
SHRQ $8, R10 |
||||
|
||||
INCL (DI)(R10*4) |
||||
|
||||
ADDQ $8, SI |
||||
DECQ R8 |
||||
JNZ loop_hist8 |
||||
|
||||
hist1: |
||||
ANDQ $7, R9 |
||||
JZ end_hist |
||||
XORQ R10, R10 |
||||
|
||||
loop_hist1: |
||||
MOVB (SI), R10 |
||||
INCL (DI)(R10*4) |
||||
INCQ SI |
||||
DECQ R9 |
||||
JNZ loop_hist1 |
||||
|
||||
end_hist: |
||||
RET |
@ -1,35 +0,0 @@ |
||||
//+build !amd64 noasm appengine
|
||||
|
||||
// Copyright 2015, Klaus Post, see LICENSE for details.
|
||||
|
||||
package flate |
||||
|
||||
func init() { |
||||
useSSE42 = false |
||||
} |
||||
|
||||
// crc32sse should never be called.
|
||||
func crc32sse(a []byte) uint32 { |
||||
panic("no assembler") |
||||
} |
||||
|
||||
// crc32sseAll should never be called.
|
||||
func crc32sseAll(a []byte, dst []uint32) { |
||||
panic("no assembler") |
||||
} |
||||
|
||||
// matchLenSSE4 should never be called.
|
||||
func matchLenSSE4(a, b []byte, max int) int { |
||||
panic("no assembler") |
||||
return 0 |
||||
} |
||||
|
||||
// histogram accumulates a histogram of b in h.
|
||||
//
|
||||
// len(h) must be >= 256, and h's elements must be all zeroes.
|
||||
func histogram(b []byte, h []int32) { |
||||
h = h[:256] |
||||
for _, t := range b { |
||||
h[t]++ |
||||
} |
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,257 @@ |
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Modified for deflate by Klaus Post (c) 2015.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flate |
||||
|
||||
import ( |
||||
"fmt" |
||||
"math/bits" |
||||
) |
||||
|
||||
type fastEnc interface { |
||||
Encode(dst *tokens, src []byte) |
||||
Reset() |
||||
} |
||||
|
||||
func newFastEnc(level int) fastEnc { |
||||
switch level { |
||||
case 1: |
||||
return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}} |
||||
case 2: |
||||
return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}} |
||||
case 3: |
||||
return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}} |
||||
case 4: |
||||
return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}} |
||||
case 5: |
||||
return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}} |
||||
case 6: |
||||
return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}} |
||||
default: |
||||
panic("invalid level specified") |
||||
} |
||||
} |
||||
|
||||
const ( |
||||
tableBits = 16 // Bits used in the table
|
||||
tableSize = 1 << tableBits // Size of the table
|
||||
tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
|
||||
baseMatchOffset = 1 // The smallest match offset
|
||||
baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
|
||||
maxMatchOffset = 1 << 15 // The largest match offset
|
||||
|
||||
bTableBits = 18 // Bits used in the big tables
|
||||
bTableSize = 1 << bTableBits // Size of the table
|
||||
allocHistory = maxMatchOffset * 10 // Size to preallocate for history.
|
||||
bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize // Reset the buffer offset when reaching this.
|
||||
) |
||||
|
||||
const ( |
||||
prime3bytes = 506832829 |
||||
prime4bytes = 2654435761 |
||||
prime5bytes = 889523592379 |
||||
prime6bytes = 227718039650203 |
||||
prime7bytes = 58295818150454627 |
||||
prime8bytes = 0xcf1bbcdcb7a56463 |
||||
) |
||||
|
||||
func load32(b []byte, i int) uint32 { |
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:] |
||||
b = b[:4] |
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 |
||||
} |
||||
|
||||
func load64(b []byte, i int) uint64 { |
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:] |
||||
b = b[:8] |
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | |
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 |
||||
} |
||||
|
||||
func load3232(b []byte, i int32) uint32 { |
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:] |
||||
b = b[:4] |
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 |
||||
} |
||||
|
||||
func load6432(b []byte, i int32) uint64 { |
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:] |
||||
b = b[:8] |
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | |
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 |
||||
} |
||||
|
||||
func hash(u uint32) uint32 { |
||||
return (u * 0x1e35a7bd) >> tableShift |
||||
} |
||||
|
||||
type tableEntry struct { |
||||
val uint32 |
||||
offset int32 |
||||
} |
||||
|
||||
// fastGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
// This is the generic implementation.
|
||||
type fastGen struct { |
||||
hist []byte |
||||
cur int32 |
||||
} |
||||
|
||||
func (e *fastGen) addBlock(src []byte) int32 { |
||||
// check if we have space already
|
||||
if len(e.hist)+len(src) > cap(e.hist) { |
||||
if cap(e.hist) == 0 { |
||||
e.hist = make([]byte, 0, allocHistory) |
||||
} else { |
||||
if cap(e.hist) < maxMatchOffset*2 { |
||||
panic("unexpected buffer size") |
||||
} |
||||
// Move down
|
||||
offset := int32(len(e.hist)) - maxMatchOffset |
||||
copy(e.hist[0:maxMatchOffset], e.hist[offset:]) |
||||
e.cur += offset |
||||
e.hist = e.hist[:maxMatchOffset] |
||||
} |
||||
} |
||||
s := int32(len(e.hist)) |
||||
e.hist = append(e.hist, src...) |
||||
return s |
||||
} |
||||
|
||||
// hash4 returns the hash of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <32.
|
||||
func hash4u(u uint32, h uint8) uint32 { |
||||
return (u * prime4bytes) >> ((32 - h) & 31) |
||||
} |
||||
|
||||
type tableEntryPrev struct { |
||||
Cur tableEntry |
||||
Prev tableEntry |
||||
} |
||||
|
||||
// hash4x64 returns the hash of the lowest 4 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <32.
|
||||
func hash4x64(u uint64, h uint8) uint32 { |
||||
return (uint32(u) * prime4bytes) >> ((32 - h) & 31) |
||||
} |
||||
|
||||
// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash7(u uint64, h uint8) uint32 { |
||||
return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & 63)) |
||||
} |
||||
|
||||
// hash8 returns the hash of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash8(u uint64, h uint8) uint32 { |
||||
return uint32((u * prime8bytes) >> ((64 - h) & 63)) |
||||
} |
||||
|
||||
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
|
||||
// Preferably h should be a constant and should always be <64.
|
||||
func hash6(u uint64, h uint8) uint32 { |
||||
return uint32(((u << (64 - 48)) * prime6bytes) >> ((64 - h) & 63)) |
||||
} |
||||
|
||||
// matchlen will return the match length between offsets and t in src.
|
||||
// The maximum length returned is maxMatchLength - 4.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlen(s, t int32, src []byte) int32 { |
||||
if debugDecode { |
||||
if t >= s { |
||||
panic(fmt.Sprint("t >=s:", t, s)) |
||||
} |
||||
if int(s) >= len(src) { |
||||
panic(fmt.Sprint("s >= len(src):", s, len(src))) |
||||
} |
||||
if t < 0 { |
||||
panic(fmt.Sprint("t < 0:", t)) |
||||
} |
||||
if s-t > maxMatchOffset { |
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) |
||||
} |
||||
} |
||||
s1 := int(s) + maxMatchLength - 4 |
||||
if s1 > len(src) { |
||||
s1 = len(src) |
||||
} |
||||
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:s1], src[t:])) |
||||
} |
||||
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { |
||||
if debugDecode { |
||||
if t >= s { |
||||
panic(fmt.Sprint("t >=s:", t, s)) |
||||
} |
||||
if int(s) >= len(src) { |
||||
panic(fmt.Sprint("s >= len(src):", s, len(src))) |
||||
} |
||||
if t < 0 { |
||||
panic(fmt.Sprint("t < 0:", t)) |
||||
} |
||||
if s-t > maxMatchOffset { |
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) |
||||
} |
||||
} |
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:], src[t:])) |
||||
} |
||||
|
||||
// Reset the encoding table.
|
||||
func (e *fastGen) Reset() { |
||||
if cap(e.hist) < int(maxMatchOffset*8) { |
||||
l := maxMatchOffset * 8 |
||||
// Make it at least 1MB.
|
||||
if l < 1<<20 { |
||||
l = 1 << 20 |
||||
} |
||||
e.hist = make([]byte, 0, l) |
||||
} |
||||
// We offset current position so everything will be out of reach
|
||||
e.cur += maxMatchOffset + int32(len(e.hist)) |
||||
e.hist = e.hist[:0] |
||||
} |
||||
|
||||
// matchLen returns the maximum length.
|
||||
// 'a' must be the shortest of the two.
|
||||
func matchLen(a, b []byte) int { |
||||
b = b[:len(a)] |
||||
var checked int |
||||
if len(a) > 4 { |
||||
// Try 4 bytes first
|
||||
if diff := load32(a, 0) ^ load32(b, 0); diff != 0 { |
||||
return bits.TrailingZeros32(diff) >> 3 |
||||
} |
||||
// Switch to 8 byte matching.
|
||||
checked = 4 |
||||
a = a[4:] |
||||
b = b[4:] |
||||
for len(a) >= 8 { |
||||
b = b[:len(a)] |
||||
if diff := load64(a, 0) ^ load64(b, 0); diff != 0 { |
||||
return checked + (bits.TrailingZeros64(diff) >> 3) |
||||
} |
||||
checked += 8 |
||||
a = a[8:] |
||||
b = b[8:] |
||||
} |
||||
} |
||||
b = b[:len(a)] |
||||
for i := range a { |
||||
if a[i] != b[i] { |
||||
return int(i) + checked |
||||
} |
||||
} |
||||
return len(a) + checked |
||||
} |