Opentracing support

This commit is contained in:
Michael 2018-01-10 17:48:04 +01:00 committed by Traefiker
parent 8394549857
commit 30ffba78e6
272 changed files with 44352 additions and 63 deletions

28
vendor/github.com/pierrec/lz4/LICENSE generated vendored Normal file
View file

@ -0,0 +1,28 @@
Copyright (c) 2015, Pierre Curto
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of xxHash nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

445
vendor/github.com/pierrec/lz4/block.go generated vendored Normal file
View file

@ -0,0 +1,445 @@
package lz4
import (
"encoding/binary"
"errors"
)
// block represents a frame data block.
// Used when compressing or decompressing frame blocks concurrently.
type block struct {
compressed bool
zdata []byte // compressed data
data []byte // decompressed data
offset int // offset within the data as with block dependency the 64Kb window is prepended to it
checksum uint32 // compressed data checksum
err error // error while [de]compressing
}
var (
// ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted.
ErrInvalidSource = errors.New("lz4: invalid source")
// ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when
// the supplied buffer for [de]compression is too small.
ErrShortBuffer = errors.New("lz4: short buffer")
)
// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
func CompressBlockBound(n int) int {
return n + n/255 + 16
}
// UncompressBlock decompresses the source buffer into the destination one,
// starting at the di index and returning the decompressed size.
//
// The destination buffer must be sized appropriately.
//
// An error is returned if the source data is invalid or the destination buffer is too small.
func UncompressBlock(src, dst []byte, di int) (int, error) {
si, sn, di0 := 0, len(src), di
if sn == 0 {
return 0, nil
}
for {
// literals and match lengths (token)
lLen := int(src[si] >> 4)
mLen := int(src[si] & 0xF)
if si++; si == sn {
return di, ErrInvalidSource
}
// literals
if lLen > 0 {
if lLen == 0xF {
for src[si] == 0xFF {
lLen += 0xFF
if si++; si == sn {
return di - di0, ErrInvalidSource
}
}
lLen += int(src[si])
if si++; si == sn {
return di - di0, ErrInvalidSource
}
}
if len(dst)-di < lLen || si+lLen > sn {
return di - di0, ErrShortBuffer
}
di += copy(dst[di:], src[si:si+lLen])
if si += lLen; si >= sn {
return di - di0, nil
}
}
if si += 2; si >= sn {
return di, ErrInvalidSource
}
offset := int(src[si-2]) | int(src[si-1])<<8
if di-offset < 0 || offset == 0 {
return di - di0, ErrInvalidSource
}
// match
if mLen == 0xF {
for src[si] == 0xFF {
mLen += 0xFF
if si++; si == sn {
return di - di0, ErrInvalidSource
}
}
mLen += int(src[si])
if si++; si == sn {
return di - di0, ErrInvalidSource
}
}
// minimum match length is 4
mLen += 4
if len(dst)-di <= mLen {
return di - di0, ErrShortBuffer
}
// copy the match (NB. match is at least 4 bytes long)
// NB. past di, copy() would write old bytes instead of
// the ones we just copied, so split the work into the largest chunk.
for ; mLen >= offset; mLen -= offset {
di += copy(dst[di:], dst[di-offset:di])
}
di += copy(dst[di:], dst[di-offset:di-offset+mLen])
}
}
// CompressBlock compresses the source buffer starting at soffet into the destination one.
// This is the fast version of LZ4 compression and also the default one.
//
// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlock(src, dst []byte, soffset int) (int, error) {
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 || soffset >= sn {
return 0, nil
}
var si, di int
// fast scan strategy:
// we only need a hash table to store the last sequences (4 bytes)
var hashTable [1 << hashLog]int
var hashShift = uint((minMatch * 8) - hashLog)
// Initialise the hash table with the first 64Kb of the input buffer
// (used when compressing dependent blocks)
for si < soffset {
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
si++
hashTable[h] = si
}
anchor := si
fma := 1 << skipStrength
for si < sn-minMatch {
// hash the next 4 bytes (sequence)...
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
// -1 to separate existing entries from new ones
ref := hashTable[h] - 1
// ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
hashTable[h] = si + 1
// no need to check the last 3 bytes in the first literal 4 bytes as
// this guarantees that the next match, if any, is compressed with
// a lower size, since to have some compression we must have:
// ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size)
// => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap
// and by definition we do have:
// ll >= 1, ml >= 4
// => ll+ml >= 5
// => so overlap must be 0
// the sequence is new, out of bound (64kb) or not valid: try next sequence
if ref < 0 || fma&(1<<skipStrength-1) < 4 ||
(si-ref)>>winSizeLog > 0 ||
src[ref] != src[si] ||
src[ref+1] != src[si+1] ||
src[ref+2] != src[si+2] ||
src[ref+3] != src[si+3] {
// variable step: improves performance on non-compressible data
si += fma >> skipStrength
fma++
continue
}
// match found
fma = 1 << skipStrength
lLen := si - anchor
offset := si - ref
// encode match length part 1
si += minMatch
mLen := si // match length has minMatch already
for si <= sn && src[si] == src[si-offset] {
si++
}
mLen = si - mLen
if mLen < 0xF {
dst[di] = byte(mLen)
} else {
dst[di] = 0xF
}
// encode literals length
if lLen < 0xF {
dst[di] |= byte(lLen << 4)
} else {
dst[di] |= 0xF0
if di++; di == dn {
return di, ErrShortBuffer
}
l := lLen - 0xF
for ; l >= 0xFF; l -= 0xFF {
dst[di] = 0xFF
if di++; di == dn {
return di, ErrShortBuffer
}
}
dst[di] = byte(l)
}
if di++; di == dn {
return di, ErrShortBuffer
}
// literals
if di+lLen >= dn {
return di, ErrShortBuffer
}
di += copy(dst[di:], src[anchor:anchor+lLen])
anchor = si
// encode offset
if di += 2; di >= dn {
return di, ErrShortBuffer
}
dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
// encode match length part 2
if mLen >= 0xF {
for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
dst[di] = 0xFF
if di++; di == dn {
return di, ErrShortBuffer
}
}
dst[di] = byte(mLen)
if di++; di == dn {
return di, ErrShortBuffer
}
}
}
if anchor == 0 {
// incompressible
return 0, nil
}
// last literals
lLen := len(src) - anchor
if lLen < 0xF {
dst[di] = byte(lLen << 4)
} else {
dst[di] = 0xF0
if di++; di == dn {
return di, ErrShortBuffer
}
lLen -= 0xF
for ; lLen >= 0xFF; lLen -= 0xFF {
dst[di] = 0xFF
if di++; di == dn {
return di, ErrShortBuffer
}
}
dst[di] = byte(lLen)
}
if di++; di == dn {
return di, ErrShortBuffer
}
// write literals
src = src[anchor:]
switch n := di + len(src); {
case n > dn:
return di, ErrShortBuffer
case n >= sn:
// incompressible
return 0, nil
}
di += copy(dst[di:], src)
return di, nil
}
// CompressBlockHC compresses the source buffer starting at soffet into the destination one.
// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
//
// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
//
// An error is returned if the destination buffer is too small.
func CompressBlockHC(src, dst []byte, soffset int) (int, error) {
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 || soffset >= sn {
return 0, nil
}
var si, di int
// Hash Chain strategy:
// we need a hash table and a chain table
// the chain table cannot contain more entries than the window size (64Kb entries)
var hashTable [1 << hashLog]int
var chainTable [winSize]int
var hashShift = uint((minMatch * 8) - hashLog)
// Initialise the hash table with the first 64Kb of the input buffer
// (used when compressing dependent blocks)
for si < soffset {
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
chainTable[si&winMask] = hashTable[h]
si++
hashTable[h] = si
}
anchor := si
for si < sn-minMatch {
// hash the next 4 bytes (sequence)...
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
// follow the chain until out of window and give the longest match
mLen := 0
offset := 0
for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 {
// the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length
if src[next+mLen] == src[si+mLen] {
for ml := 0; ; ml++ {
if src[next+ml] != src[si+ml] || si+ml > sn {
// found a longer match, keep its position and length
if mLen < ml && ml >= minMatch {
mLen = ml
offset = si - next
}
break
}
}
}
}
chainTable[si&winMask] = hashTable[h]
hashTable[h] = si + 1
// no match found
if mLen == 0 {
si++
continue
}
// match found
// update hash/chain tables with overlaping bytes:
// si already hashed, add everything from si+1 up to the match length
for si, ml := si+1, si+mLen; si < ml; {
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
chainTable[si&winMask] = hashTable[h]
si++
hashTable[h] = si
}
lLen := si - anchor
si += mLen
mLen -= minMatch // match length does not include minMatch
if mLen < 0xF {
dst[di] = byte(mLen)
} else {
dst[di] = 0xF
}
// encode literals length
if lLen < 0xF {
dst[di] |= byte(lLen << 4)
} else {
dst[di] |= 0xF0
if di++; di == dn {
return di, ErrShortBuffer
}
l := lLen - 0xF
for ; l >= 0xFF; l -= 0xFF {
dst[di] = 0xFF
if di++; di == dn {
return di, ErrShortBuffer
}
}
dst[di] = byte(l)
}
if di++; di == dn {
return di, ErrShortBuffer
}
// literals
if di+lLen >= dn {
return di, ErrShortBuffer
}
di += copy(dst[di:], src[anchor:anchor+lLen])
anchor = si
// encode offset
if di += 2; di >= dn {
return di, ErrShortBuffer
}
dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
// encode match length part 2
if mLen >= 0xF {
for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
dst[di] = 0xFF
if di++; di == dn {
return di, ErrShortBuffer
}
}
dst[di] = byte(mLen)
if di++; di == dn {
return di, ErrShortBuffer
}
}
}
if anchor == 0 {
// incompressible
return 0, nil
}
// last literals
lLen := len(src) - anchor
if lLen < 0xF {
dst[di] = byte(lLen << 4)
} else {
dst[di] = 0xF0
if di++; di == dn {
return di, ErrShortBuffer
}
lLen -= 0xF
for ; lLen >= 0xFF; lLen -= 0xFF {
dst[di] = 0xFF
if di++; di == dn {
return di, ErrShortBuffer
}
}
dst[di] = byte(lLen)
}
if di++; di == dn {
return di, ErrShortBuffer
}
// write literals
src = src[anchor:]
switch n := di + len(src); {
case n > dn:
return di, ErrShortBuffer
case n >= sn:
// incompressible
return 0, nil
}
di += copy(dst[di:], src)
return di, nil
}

105
vendor/github.com/pierrec/lz4/lz4.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
// Package lz4 implements reading and writing lz4 compressed data (a frame),
// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
// using an io.Reader (decompression) and io.Writer (compression).
// It is designed to minimize memory usage while maximizing throughput by being able to
// [de]compress data concurrently.
//
// The Reader and the Writer support concurrent processing provided the supplied buffers are
// large enough (in multiples of BlockMaxSize) and there is no block dependency.
// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
// The runtime.GOMAXPROCS() value is used to apply concurrency or not.
//
// Although the block level compression and decompression functions are exposed and are fully compatible
// with the lz4 block format definition, they are low level and should not be used directly.
// For a complete description of an lz4 compressed block, see:
// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
//
// See https://github.com/Cyan4973/lz4 for the reference C implementation.
package lz4
import (
"hash"
"sync"
"github.com/pierrec/xxHash/xxHash32"
)
const (
// Extension is the LZ4 frame file name extension
Extension = ".lz4"
// Version is the LZ4 frame format version
Version = 1
frameMagic = uint32(0x184D2204)
frameSkipMagic = uint32(0x184D2A50)
// The following constants are used to setup the compression algorithm.
minMatch = 4 // the minimum size of the match sequence size (4 bytes)
winSizeLog = 16 // LZ4 64Kb window size limit
winSize = 1 << winSizeLog
winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
// hashLog determines the size of the hash table used to quickly find a previous match position.
// Its value influences the compression speed and memory usage, the lower the faster,
// but at the expense of the compression ratio.
// 16 seems to be the best compromise.
hashLog = 16
hashTableSize = 1 << hashLog
hashShift = uint((minMatch * 8) - hashLog)
mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
skipStrength = 6 // variable step for fast scan
hasher = uint32(2654435761) // prime number used to hash minMatch
)
// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
var bsMapValue = map[int]byte{}
// Reversed.
func init() {
for i, v := range bsMapID {
bsMapValue[v] = i
}
}
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
//
// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency).
type Header struct {
BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one)
BlockChecksum bool // compressed blocks are checksumed
NoChecksum bool // frame checksum
BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
Size uint64 // the frame total size. It is _not_ computed by the Writer.
HighCompression bool // use high compression (only for the Writer)
done bool // whether the descriptor was processed (Read or Write and checked)
// Removed as not supported
// Dict bool // a dictionary id is to be used
// DictID uint32 // the dictionary id read from the frame, if any.
}
// xxhPool wraps the standard pool for xxHash items.
// Putting items back in the pool automatically resets them.
type xxhPool struct {
sync.Pool
}
func (p *xxhPool) Get() hash.Hash32 {
return p.Pool.Get().(hash.Hash32)
}
func (p *xxhPool) Put(h hash.Hash32) {
h.Reset()
p.Pool.Put(h)
}
// hashPool is used by readers and writers and contains xxHash items.
var hashPool = xxhPool{
Pool: sync.Pool{
New: func() interface{} { return xxHash32.New(0) },
},
}

364
vendor/github.com/pierrec/lz4/reader.go generated vendored Normal file
View file

@ -0,0 +1,364 @@
package lz4
import (
"encoding/binary"
"errors"
"fmt"
"hash"
"io"
"io/ioutil"
"runtime"
"sync"
"sync/atomic"
)
// ErrInvalid is returned when the data being read is not an LZ4 archive
// (LZ4 magic number detection failed).
var ErrInvalid = errors.New("invalid lz4 data")
// errEndOfBlock is returned by readBlock when it has reached the last block of the frame.
// It is not an error.
var errEndOfBlock = errors.New("end of block")
// Reader implements the LZ4 frame decoder.
// The Header is set after the first call to Read().
// The Header may change between Read() calls in case of concatenated frames.
type Reader struct {
Pos int64 // position within the source
Header
src io.Reader
checksum hash.Hash32 // frame hash
wg sync.WaitGroup // decompressing go routine wait group
data []byte // buffered decompressed data
window []byte // 64Kb decompressed data window
}
// NewReader returns a new LZ4 frame decoder.
// No access to the underlying io.Reader is performed.
func NewReader(src io.Reader) *Reader {
return &Reader{
src: src,
checksum: hashPool.Get(),
}
}
// readHeader checks the frame magic number and parses the frame descriptoz.
// Skippable frames are supported even as a first frame although the LZ4
// specifications recommends skippable frames not to be used as first frames.
func (z *Reader) readHeader(first bool) error {
defer z.checksum.Reset()
for {
var magic uint32
if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil {
if !first && err == io.ErrUnexpectedEOF {
return io.EOF
}
return err
}
z.Pos += 4
if magic>>8 == frameSkipMagic>>8 {
var skipSize uint32
if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil {
return err
}
z.Pos += 4
m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
z.Pos += m
if err != nil {
return err
}
continue
}
if magic != frameMagic {
return ErrInvalid
}
break
}
// header
var buf [8]byte
if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
return err
}
z.Pos += 2
b := buf[0]
if b>>6 != Version {
return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version)
}
z.BlockDependency = b>>5&1 == 0
z.BlockChecksum = b>>4&1 > 0
frameSize := b>>3&1 > 0
z.NoChecksum = b>>2&1 == 0
// z.Dict = b&1 > 0
bmsID := buf[1] >> 4 & 0x7
bSize, ok := bsMapID[bmsID]
if !ok {
return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID)
}
z.BlockMaxSize = bSize
z.checksum.Write(buf[0:2])
if frameSize {
if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil {
return err
}
z.Pos += 8
binary.LittleEndian.PutUint64(buf[:], z.Size)
z.checksum.Write(buf[0:8])
}
// if z.Dict {
// if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil {
// return err
// }
// z.Pos += 4
// binary.LittleEndian.PutUint32(buf[:], z.DictID)
// z.checksum.Write(buf[0:4])
// }
// header checksum
if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
return err
}
z.Pos++
if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h)
}
z.Header.done = true
return nil
}
// Read decompresses data from the underlying source into the supplied buffer.
//
// Since there can be multiple streams concatenated, Header values may
// change between calls to Read(). If that is the case, no data is actually read from
// the underlying io.Reader, to allow for potential input buffer resizing.
//
// Data is buffered if the input buffer is too small, and exhausted upon successive calls.
//
// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is
// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value.
func (z *Reader) Read(buf []byte) (n int, err error) {
if !z.Header.done {
if err = z.readHeader(true); err != nil {
return
}
}
if len(buf) == 0 {
return
}
// exhaust remaining data from previous Read()
if len(z.data) > 0 {
n = copy(buf, z.data)
z.data = z.data[n:]
if len(z.data) == 0 {
z.data = nil
}
return
}
// Break up the input buffer into BlockMaxSize blocks with at least one block.
// Then decompress into each of them concurrently if possible (no dependency).
// In case of dependency, the first block will be missing the window (except on the
// very first call), the rest will have it already since it comes from the previous block.
wbuf := buf
zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize
zblocks := make([]block, zn)
for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ {
zb := &zblocks[zi]
// last block may be too small
if len(wbuf) < z.BlockMaxSize+len(z.window) {
wbuf = make([]byte, z.BlockMaxSize+len(z.window))
}
copy(wbuf, z.window)
if zb.err = z.readBlock(wbuf, zb); zb.err != nil {
break
}
wbuf = wbuf[z.BlockMaxSize:]
if !z.BlockDependency {
z.wg.Add(1)
go z.decompressBlock(zb, &abort)
continue
}
// cannot decompress concurrently when dealing with block dependency
z.decompressBlock(zb, nil)
// the last block may not contain enough data
if len(z.window) == 0 {
z.window = make([]byte, winSize)
}
if len(zb.data) >= winSize {
copy(z.window, zb.data[len(zb.data)-winSize:])
} else {
copy(z.window, z.window[len(zb.data):])
copy(z.window[len(zb.data)+1:], zb.data)
}
}
z.wg.Wait()
// since a block size may be less then BlockMaxSize, trim the decompressed buffers
for _, zb := range zblocks {
if zb.err != nil {
if zb.err == errEndOfBlock {
return n, z.close()
}
return n, zb.err
}
bLen := len(zb.data)
if !z.NoChecksum {
z.checksum.Write(zb.data)
}
m := copy(buf[n:], zb.data)
// buffer the remaining data (this is necessarily the last block)
if m < bLen {
z.data = zb.data[m:]
}
n += m
}
return
}
// readBlock reads an entire frame block from the frame.
// The input buffer is the one that will receive the decompressed data.
// If the end of the frame is detected, it returns the errEndOfBlock error.
func (z *Reader) readBlock(buf []byte, b *block) error {
var bLen uint32
if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil {
return err
}
atomic.AddInt64(&z.Pos, 4)
switch {
case bLen == 0:
return errEndOfBlock
case bLen&(1<<31) == 0:
b.compressed = true
b.data = buf
b.zdata = make([]byte, bLen)
default:
bLen = bLen & (1<<31 - 1)
if int(bLen) > len(buf) {
return fmt.Errorf("lz4.Read: invalid block size: %d", bLen)
}
b.data = buf[:bLen]
b.zdata = buf[:bLen]
}
if _, err := io.ReadFull(z.src, b.zdata); err != nil {
return err
}
if z.BlockChecksum {
if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil {
return err
}
xxh := hashPool.Get()
defer hashPool.Put(xxh)
xxh.Write(b.zdata)
if h := xxh.Sum32(); h != b.checksum {
return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum)
}
}
return nil
}
// decompressBlock decompresses a frame block.
// In case of an error, the block err is set with it and abort is set to 1.
func (z *Reader) decompressBlock(b *block, abort *uint32) {
if abort != nil {
defer z.wg.Done()
}
if b.compressed {
n := len(z.window)
m, err := UncompressBlock(b.zdata, b.data, n)
if err != nil {
if abort != nil {
atomic.StoreUint32(abort, 1)
}
b.err = err
return
}
b.data = b.data[n : n+m]
}
atomic.AddInt64(&z.Pos, int64(len(b.data)))
}
// close validates the frame checksum (if any) and checks the next frame (if any).
func (z *Reader) close() error {
if !z.NoChecksum {
var checksum uint32
if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil {
return err
}
if checksum != z.checksum.Sum32() {
return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum)
}
}
// get ready for the next concatenated frame, but do not change the position
pos := z.Pos
z.Reset(z.src)
z.Pos = pos
// since multiple frames can be concatenated, check for another one
return z.readHeader(false)
}
// Reset discards the Reader's state and makes it equivalent to the
// result of its original state from NewReader, but reading from r instead.
// This permits reusing a Reader rather than allocating a new one.
func (z *Reader) Reset(r io.Reader) {
z.Header = Header{}
z.Pos = 0
z.src = r
z.checksum.Reset()
z.data = nil
z.window = nil
}
// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer.
// Returns the number of bytes written.
func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
cpus := runtime.GOMAXPROCS(0)
var buf []byte
// The initial buffer being nil, the first Read will be only read the compressed frame options.
// The buffer can then be sized appropriately to support maximum concurrency decompression.
// If multiple frames are concatenated, Read() will return with no data decompressed but with
// potentially changed options. The buffer will be resized accordingly, always trying to
// maximize concurrency.
for {
nsize := 0
// the block max size can change if multiple streams are concatenated.
// Check it after every Read().
if z.BlockDependency {
// in case of dependency, we cannot decompress concurrently,
// so allocate the minimum buffer + window size
nsize = len(z.window) + z.BlockMaxSize
} else {
// if no dependency, allocate a buffer large enough for concurrent decompression
nsize = cpus * z.BlockMaxSize
}
if nsize != len(buf) {
buf = make([]byte, nsize)
}
m, er := z.Read(buf)
if er != nil && er != io.EOF {
return n, er
}
m, err = w.Write(buf[:m])
n += int64(m)
if err != nil || er == io.EOF {
return
}
}
}

377
vendor/github.com/pierrec/lz4/writer.go generated vendored Normal file
View file

@ -0,0 +1,377 @@
package lz4
import (
"encoding/binary"
"fmt"
"hash"
"io"
"runtime"
)
// Writer implements the LZ4 frame encoder.
type Writer struct {
Header
dst io.Writer
checksum hash.Hash32 // frame checksum
data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
zbCompressBuf []byte // buffer for compressing lz4 blocks
writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
}
// NewWriter returns a new LZ4 frame encoder.
// No access to the underlying io.Writer is performed.
// The supplied Header is checked at the first Write.
// It is ok to change it before the first Write but then not until a Reset() is performed.
func NewWriter(dst io.Writer) *Writer {
return &Writer{
dst: dst,
checksum: hashPool.Get(),
Header: Header{
BlockMaxSize: 4 << 20,
},
writeSizeBuf: make([]byte, 4),
}
}
// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
func (z *Writer) writeHeader() error {
// Default to 4Mb if BlockMaxSize is not set
if z.Header.BlockMaxSize == 0 {
z.Header.BlockMaxSize = 4 << 20
}
// the only option that need to be validated
bSize, ok := bsMapValue[z.Header.BlockMaxSize]
if !ok {
return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize)
}
// magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
// Size and DictID are optional
var buf [19]byte
// set the fixed size data: magic number, block max size and flags
binary.LittleEndian.PutUint32(buf[0:], frameMagic)
flg := byte(Version << 6)
if !z.Header.BlockDependency {
flg |= 1 << 5
}
if z.Header.BlockChecksum {
flg |= 1 << 4
}
if z.Header.Size > 0 {
flg |= 1 << 3
}
if !z.Header.NoChecksum {
flg |= 1 << 2
}
// if z.Header.Dict {
// flg |= 1
// }
buf[4] = flg
buf[5] = bSize << 4
// current buffer size: magic(4) + flags(1) + block max size (1)
n := 6
// optional items
if z.Header.Size > 0 {
binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
n += 8
}
// if z.Header.Dict {
// binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID)
// n += 4
// }
// header checksum includes the flags, block max size and optional Size and DictID
z.checksum.Write(buf[4:n])
buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF)
z.checksum.Reset()
// header ready, write it out
if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
return err
}
z.Header.done = true
// initialize buffers dependent on header info
z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize)
return nil
}
// Write compresses data from the supplied buffer into the underlying io.Writer.
// Write does not return until the data has been written.
//
// If the input buffer is large enough (typically in multiples of BlockMaxSize)
// the data will be compressed concurrently.
//
// Write never buffers any data unless in BlockDependency mode where it may
// do so until it has 64Kb of data, after which it never buffers any.
func (z *Writer) Write(buf []byte) (n int, err error) {
if !z.Header.done {
if err = z.writeHeader(); err != nil {
return
}
}
if len(buf) == 0 {
return
}
if !z.NoChecksum {
z.checksum.Write(buf)
}
// with block dependency, require at least 64Kb of data to work with
// not having 64Kb only matters initially to setup the first window
bl := 0
if z.BlockDependency && len(z.window) == 0 {
bl = len(z.data)
z.data = append(z.data, buf...)
if len(z.data) < winSize {
return len(buf), nil
}
buf = z.data
z.data = nil
}
// Break up the input buffer into BlockMaxSize blocks, provisioning the left over block.
// Then compress into each of them concurrently if possible (no dependency).
var (
zb block
wbuf = buf
zn = len(wbuf) / z.BlockMaxSize
zi = 0
leftover = len(buf) % z.BlockMaxSize
)
loop:
for zi < zn {
if z.BlockDependency {
if zi == 0 {
// first block does not have the window
zb.data = append(z.window, wbuf[:z.BlockMaxSize]...)
zb.offset = len(z.window)
wbuf = wbuf[z.BlockMaxSize-winSize:]
} else {
// set the uncompressed data including the window from previous block
zb.data = wbuf[:z.BlockMaxSize+winSize]
zb.offset = winSize
wbuf = wbuf[z.BlockMaxSize:]
}
} else {
zb.data = wbuf[:z.BlockMaxSize]
wbuf = wbuf[z.BlockMaxSize:]
}
goto write
}
// left over
if leftover > 0 {
zb = block{data: wbuf}
if z.BlockDependency {
if zn == 0 {
zb.data = append(z.window, zb.data...)
zb.offset = len(z.window)
} else {
zb.offset = winSize
}
}
leftover = 0
goto write
}
if z.BlockDependency {
if len(z.window) == 0 {
z.window = make([]byte, winSize)
}
// last buffer may be shorter than the window
if len(buf) >= winSize {
copy(z.window, buf[len(buf)-winSize:])
} else {
copy(z.window, z.window[len(buf):])
copy(z.window[len(buf)+1:], buf)
}
}
return
write:
zb = z.compressBlock(zb)
_, err = z.writeBlock(zb)
written := len(zb.data)
if bl > 0 {
if written >= bl {
written -= bl
bl = 0
} else {
bl -= written
written = 0
}
}
n += written
// remove the window in zb.data
if z.BlockDependency {
if zi == 0 {
n -= len(z.window)
} else {
n -= winSize
}
}
if err != nil {
return
}
zi++
goto loop
}
// compressBlock compresses a block.
func (z *Writer) compressBlock(zb block) block {
// compressed block size cannot exceed the input's
var (
n int
err error
zbuf = z.zbCompressBuf
)
if z.HighCompression {
n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
} else {
n, err = CompressBlock(zb.data, zbuf, zb.offset)
}
// compressible and compressed size smaller than decompressed: ok!
if err == nil && n > 0 && len(zb.zdata) < len(zb.data) {
zb.compressed = true
zb.zdata = zbuf[:n]
} else {
zb.compressed = false
zb.zdata = zb.data[zb.offset:]
}
if z.BlockChecksum {
xxh := hashPool.Get()
xxh.Write(zb.zdata)
zb.checksum = xxh.Sum32()
hashPool.Put(xxh)
}
return zb
}
// writeBlock writes a frame block to the underlying io.Writer (size, data).
func (z *Writer) writeBlock(zb block) (int, error) {
bLen := uint32(len(zb.zdata))
if !zb.compressed {
bLen |= 1 << 31
}
n := 0
binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen)
n, err := z.dst.Write(z.writeSizeBuf)
if err != nil {
return n, err
}
m, err := z.dst.Write(zb.zdata)
n += m
if err != nil {
return n, err
}
if z.BlockChecksum {
binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum)
m, err := z.dst.Write(z.writeSizeBuf)
n += m
if err != nil {
return n, err
}
}
return n, nil
}
// Flush flushes any pending compressed data to the underlying writer.
// Flush does not return until the data has been written.
// If the underlying writer returns an error, Flush returns that error.
//
// Flush is only required when in BlockDependency mode and the total of
// data written is less than 64Kb.
func (z *Writer) Flush() error {
if len(z.data) == 0 {
return nil
}
zb := z.compressBlock(block{data: z.data})
if _, err := z.writeBlock(zb); err != nil {
return err
}
return nil
}
// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
func (z *Writer) Close() error {
if !z.Header.done {
if err := z.writeHeader(); err != nil {
return err
}
}
// buffered data for the block dependency window
if z.BlockDependency && len(z.data) > 0 {
zb := block{data: z.data}
if _, err := z.writeBlock(z.compressBlock(zb)); err != nil {
return err
}
}
if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil {
return err
}
if !z.NoChecksum {
if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil {
return err
}
}
return nil
}
// Reset clears the state of the Writer z such that it is equivalent to its
// initial state from NewWriter, but instead writing to w.
// No access to the underlying io.Writer is performed.
func (z *Writer) Reset(w io.Writer) {
z.Header = Header{}
z.dst = w
z.checksum.Reset()
z.data = nil
z.window = nil
}
// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer.
// Returns the number of bytes read.
// It does not close the Writer.
func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) {
cpus := runtime.GOMAXPROCS(0)
buf := make([]byte, cpus*z.BlockMaxSize)
for {
m, er := io.ReadFull(r, buf)
n += int64(m)
if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF {
if _, err = z.Write(buf[:m]); err != nil {
return
}
if er == nil {
continue
}
return
}
return n, er
}
}

28
vendor/github.com/pierrec/xxHash/LICENSE generated vendored Normal file
View file

@ -0,0 +1,28 @@
Copyright (c) 2014, Pierre Curto
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of xxHash nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

212
vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go generated vendored Normal file
View file

@ -0,0 +1,212 @@
// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version).
// (https://github.com/Cyan4973/xxHash/)
package xxHash32
import "hash"
const (
prime32_1 = 2654435761
prime32_2 = 2246822519
prime32_3 = 3266489917
prime32_4 = 668265263
prime32_5 = 374761393
)
type xxHash struct {
seed uint32
v1 uint32
v2 uint32
v3 uint32
v4 uint32
totalLen uint64
buf [16]byte
bufused int
}
// New returns a new Hash32 instance.
func New(seed uint32) hash.Hash32 {
xxh := &xxHash{seed: seed}
xxh.Reset()
return xxh
}
// Sum appends the current hash to b and returns the resulting slice.
// It does not change the underlying hash state.
func (xxh xxHash) Sum(b []byte) []byte {
h32 := xxh.Sum32()
return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
}
// Reset resets the Hash to its initial state.
func (xxh *xxHash) Reset() {
xxh.v1 = xxh.seed + prime32_1 + prime32_2
xxh.v2 = xxh.seed + prime32_2
xxh.v3 = xxh.seed
xxh.v4 = xxh.seed - prime32_1
xxh.totalLen = 0
xxh.bufused = 0
}
// Size returns the number of bytes returned by Sum().
func (xxh *xxHash) Size() int {
return 4
}
// BlockSize gives the minimum number of bytes accepted by Write().
func (xxh *xxHash) BlockSize() int {
return 1
}
// Write adds input bytes to the Hash.
// It never returns an error.
func (xxh *xxHash) Write(input []byte) (int, error) {
n := len(input)
m := xxh.bufused
xxh.totalLen += uint64(n)
r := len(xxh.buf) - m
if n < r {
copy(xxh.buf[m:], input)
xxh.bufused += len(input)
return n, nil
}
p := 0
if m > 0 {
// some data left from previous update
copy(xxh.buf[xxh.bufused:], input[:r])
xxh.bufused += len(input) - r
// fast rotl(13)
xxh.v1 = rol13(xxh.v1+u32(xxh.buf[:])*prime32_2) * prime32_1
xxh.v2 = rol13(xxh.v2+u32(xxh.buf[4:])*prime32_2) * prime32_1
xxh.v3 = rol13(xxh.v3+u32(xxh.buf[8:])*prime32_2) * prime32_1
xxh.v4 = rol13(xxh.v4+u32(xxh.buf[12:])*prime32_2) * prime32_1
p = r
xxh.bufused = 0
}
// Causes compiler to work directly from registers instead of stack:
v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
for n := n - 16; p <= n; p += 16 {
sub := input[p:][:16] //BCE hint for compiler
v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1
v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1
v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1
v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1
}
xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
copy(xxh.buf[xxh.bufused:], input[p:])
xxh.bufused += len(input) - p
return n, nil
}
// Sum32 returns the 32 bits Hash value.
func (xxh *xxHash) Sum32() uint32 {
h32 := uint32(xxh.totalLen)
if xxh.totalLen >= 16 {
h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
} else {
h32 += xxh.seed + prime32_5
}
p := 0
n := xxh.bufused
for n := n - 4; p <= n; p += 4 {
h32 += u32(xxh.buf[p:p+4]) * prime32_3
h32 = rol17(h32) * prime32_4
}
for ; p < n; p++ {
h32 += uint32(xxh.buf[p]) * prime32_5
h32 = rol11(h32) * prime32_1
}
h32 ^= h32 >> 15
h32 *= prime32_2
h32 ^= h32 >> 13
h32 *= prime32_3
h32 ^= h32 >> 16
return h32
}
// Checksum returns the 32bits Hash value.
func Checksum(input []byte, seed uint32) uint32 {
n := len(input)
h32 := uint32(n)
if n < 16 {
h32 += seed + prime32_5
} else {
v1 := seed + prime32_1 + prime32_2
v2 := seed + prime32_2
v3 := seed
v4 := seed - prime32_1
p := 0
for n := n - 16; p <= n; p += 16 {
sub := input[p:][:16] //BCE hint for compiler
v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1
v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1
v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1
v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1
}
input = input[p:]
n -= p
h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
}
p := 0
for n := n - 4; p <= n; p += 4 {
h32 += u32(input[p:p+4]) * prime32_3
h32 = rol17(h32) * prime32_4
}
for p < n {
h32 += uint32(input[p]) * prime32_5
h32 = rol11(h32) * prime32_1
p++
}
h32 ^= h32 >> 15
h32 *= prime32_2
h32 ^= h32 >> 13
h32 *= prime32_3
h32 ^= h32 >> 16
return h32
}
func u32(buf []byte) uint32 {
// go compiler recognizes this pattern and optimizes it on little endian platforms
return uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
}
func rol1(u uint32) uint32 {
return u<<1 | u>>31
}
func rol7(u uint32) uint32 {
return u<<7 | u>>25
}
func rol11(u uint32) uint32 {
return u<<11 | u>>21
}
func rol12(u uint32) uint32 {
return u<<12 | u>>20
}
func rol13(u uint32) uint32 {
return u<<13 | u>>19
}
func rol17(u uint32) uint32 {
return u<<17 | u>>15
}
func rol18(u uint32) uint32 {
return u<<18 | u>>14
}