Drop caddy from vendor (#700)

* Removed caddy

* new stuff

* Now need to go get caddy

* Duh
This commit is contained in:
Miek Gieben
2017-06-03 08:27:41 +01:00
committed by GitHub
parent 18bc52b5e0
commit 30217a4cb2
269 changed files with 32812 additions and 29134 deletions

View File

@@ -1,31 +0,0 @@
# Created by https://www.gitignore.io/api/macos
### macOS ###
*.DS_Store
.AppleDouble
.LSOverride
# Icon must end with two \r
Icon
# Thumbnails
._*
# Files that might appear in the root of a volume
.DocumentRevisions-V100
.fseventsd
.Spotlight-V100
.TemporaryItems
.Trashes
.VolumeIcon.icns
.com.apple.timemachine.donotpresent
# Directories potentially created on remote AFP share
.AppleDB
.AppleDesktop
Network Trash Folder
Temporary Items
.apdisk
# End of https://www.gitignore.io/api/macos

View File

@@ -3,6 +3,7 @@ package lz4
import (
"encoding/binary"
"errors"
"unsafe"
)
// block represents a frame data block.
@@ -110,6 +111,11 @@ func UncompressBlock(src, dst []byte, di int) (int, error) {
}
}
type hashEntry struct {
generation uint
value int
}
// CompressBlock compresses the source buffer starting at soffet into the destination one.
// This is the fast version of LZ4 compression and also the default one.
//
@@ -117,6 +123,27 @@ func UncompressBlock(src, dst []byte, di int) (int, error) {
//
// An error is returned if the destination buffer is too small.
func CompressBlock(src, dst []byte, soffset int) (int, error) {
var hashTable [hashTableSize]hashEntry
return compressGenerationalBlock(src, dst, soffset, 0, hashTable[:])
}
// getUint32 is a despicably evil function (well, for Go!) that takes advantage
// of the machine's byte order to save some operations. This may look
// inefficient but it is significantly faster on littleEndian machines,
// which include x84, amd64, and some ARM processors.
func getUint32(b []byte) uint32 {
_ = b[3]
if isLittleEndian {
return *(*uint32)(unsafe.Pointer(&b))
}
return uint32(b[0]) |
uint32(b[1])<<8 |
uint32(b[2])<<16 |
uint32(b[3])<<24
}
func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, hashTable []hashEntry) (int, error) {
sn, dn := len(src)-mfLimit, len(dst)
if sn <= 0 || dn == 0 || soffset >= sn {
return 0, nil
@@ -125,26 +152,28 @@ func CompressBlock(src, dst []byte, soffset int) (int, error) {
// fast scan strategy:
// we only need a hash table to store the last sequences (4 bytes)
var hashTable [1 << hashLog]int
var hashShift = uint((minMatch * 8) - hashLog)
// Initialise the hash table with the first 64Kb of the input buffer
// (used when compressing dependent blocks)
for si < soffset {
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
h := getUint32(src[si:]) * hasher >> hashShift
si++
hashTable[h] = si
hashTable[h] = hashEntry{generation, si}
}
anchor := si
fma := 1 << skipStrength
for si < sn-minMatch {
// hash the next 4 bytes (sequence)...
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
h := getUint32(src[si:]) * hasher >> hashShift
if hashTable[h].generation != generation {
hashTable[h] = hashEntry{generation, 0}
}
// -1 to separate existing entries from new ones
ref := hashTable[h] - 1
ref := hashTable[h].value - 1
// ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
hashTable[h] = si + 1
hashTable[h].value = si + 1
// no need to check the last 3 bytes in the first literal 4 bytes as
// this guarantees that the next match, if any, is compressed with
// a lower size, since to have some compression we must have:

13
vendor/github.com/pierrec/lz4/lz4.go generated vendored
View File

@@ -20,6 +20,7 @@ package lz4
import (
"hash"
"sync"
"unsafe"
"github.com/pierrec/xxHash/xxHash32"
)
@@ -64,6 +65,18 @@ func init() {
}
}
var isLittleEndian = getIsLittleEndian()
func getIsLittleEndian() (ret bool) {
var i int = 0x1
bs := (*[1]byte)(unsafe.Pointer(&i))
if bs[0] == 0 {
return false
}
return true
}
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
//

View File

@@ -261,25 +261,6 @@ func TestBlock(t *testing.T) {
}
}
func TestBlockCompression(t *testing.T) {
input := make([]byte, 64*1024)
for i := 0; i < 64*1024; i += 1 {
input[i] = byte(i & 0x1)
}
output := make([]byte, 64*1024)
c, err := lz4.CompressBlock(input, output, 0)
if err != nil {
t.Fatal(err)
}
if c == 0 {
t.Fatal("cannot compress compressible data")
}
}
func BenchmarkUncompressBlock(b *testing.B) {
d := make([]byte, len(lorem))
z := make([]byte, len(lorem))

View File

@@ -16,8 +16,10 @@ type Writer struct {
data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
zbCompressBuf []byte // buffer for compressing lz4 blocks
writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
zbCompressBuf []byte // buffer for compressing lz4 blocks
writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
hashTable []hashEntry
currentGeneration uint
}
// NewWriter returns a new LZ4 frame encoder.
@@ -31,6 +33,7 @@ func NewWriter(dst io.Writer) *Writer {
Header: Header{
BlockMaxSize: 4 << 20,
},
hashTable: make([]hashEntry, hashTableSize),
writeSizeBuf: make([]byte, 4),
}
}
@@ -242,7 +245,11 @@ func (z *Writer) compressBlock(zb block) block {
if z.HighCompression {
n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
} else {
n, err = CompressBlock(zb.data, zbuf, zb.offset)
n, err = compressGenerationalBlock(zb.data, zbuf, zb.offset, z.currentGeneration, z.hashTable)
z.currentGeneration++
if z.currentGeneration == 0 { // wrapped around, reset table
z.hashTable = make([]hashEntry, hashTableSize)
}
}
// compressible and compressed size smaller than decompressed: ok!