mirror of
https://github.com/coredns/coredns.git
synced 2025-11-04 11:13:16 -05:00
Update vendor libraries except client-go, apimachinery and ugorji/go (#1197)
This fix updates vendor libraries except client-go, apimachinery and ugorji/go, as github.com/ugorji/go/codec is causing compatibilities issues. Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
31
vendor/github.com/pierrec/lz4/.gitignore
generated
vendored
Normal file
31
vendor/github.com/pierrec/lz4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Created by https://www.gitignore.io/api/macos
|
||||
|
||||
### macOS ###
|
||||
*.DS_Store
|
||||
.AppleDouble
|
||||
.LSOverride
|
||||
|
||||
# Icon must end with two \r
|
||||
Icon
|
||||
|
||||
|
||||
# Thumbnails
|
||||
._*
|
||||
|
||||
# Files that might appear in the root of a volume
|
||||
.DocumentRevisions-V100
|
||||
.fseventsd
|
||||
.Spotlight-V100
|
||||
.TemporaryItems
|
||||
.Trashes
|
||||
.VolumeIcon.icns
|
||||
.com.apple.timemachine.donotpresent
|
||||
|
||||
# Directories potentially created on remote AFP share
|
||||
.AppleDB
|
||||
.AppleDesktop
|
||||
Network Trash Folder
|
||||
Temporary Items
|
||||
.apdisk
|
||||
|
||||
# End of https://www.gitignore.io/api/macos
|
||||
7
vendor/github.com/pierrec/lz4/.travis.yml
generated
vendored
7
vendor/github.com/pierrec/lz4/.travis.yml
generated
vendored
@@ -1,9 +1,8 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.x
|
||||
|
||||
script:
|
||||
- go test -cpu=2
|
||||
- go test -cpu=2 -race
|
||||
- go test -v -cpu=2
|
||||
- go test -v -cpu=2 -race
|
||||
41
vendor/github.com/pierrec/lz4/block.go
generated
vendored
41
vendor/github.com/pierrec/lz4/block.go
generated
vendored
@@ -3,7 +3,6 @@ package lz4
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// block represents a frame data block.
|
||||
@@ -111,11 +110,6 @@ func UncompressBlock(src, dst []byte, di int) (int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
type hashEntry struct {
|
||||
generation uint
|
||||
value int
|
||||
}
|
||||
|
||||
// CompressBlock compresses the source buffer starting at soffet into the destination one.
|
||||
// This is the fast version of LZ4 compression and also the default one.
|
||||
//
|
||||
@@ -123,27 +117,6 @@ type hashEntry struct {
|
||||
//
|
||||
// An error is returned if the destination buffer is too small.
|
||||
func CompressBlock(src, dst []byte, soffset int) (int, error) {
|
||||
var hashTable [hashTableSize]hashEntry
|
||||
return compressGenerationalBlock(src, dst, soffset, 0, hashTable[:])
|
||||
}
|
||||
|
||||
// getUint32 is a despicably evil function (well, for Go!) that takes advantage
|
||||
// of the machine's byte order to save some operations. This may look
|
||||
// inefficient but it is significantly faster on littleEndian machines,
|
||||
// which include x84, amd64, and some ARM processors.
|
||||
func getUint32(b []byte) uint32 {
|
||||
_ = b[3]
|
||||
if isLittleEndian {
|
||||
return *(*uint32)(unsafe.Pointer(&b))
|
||||
}
|
||||
|
||||
return uint32(b[0]) |
|
||||
uint32(b[1])<<8 |
|
||||
uint32(b[2])<<16 |
|
||||
uint32(b[3])<<24
|
||||
}
|
||||
|
||||
func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, hashTable []hashEntry) (int, error) {
|
||||
sn, dn := len(src)-mfLimit, len(dst)
|
||||
if sn <= 0 || dn == 0 || soffset >= sn {
|
||||
return 0, nil
|
||||
@@ -152,28 +125,26 @@ func compressGenerationalBlock(src, dst []byte, soffset int, generation uint, ha
|
||||
|
||||
// fast scan strategy:
|
||||
// we only need a hash table to store the last sequences (4 bytes)
|
||||
var hashTable [1 << hashLog]int
|
||||
var hashShift = uint((minMatch * 8) - hashLog)
|
||||
|
||||
// Initialise the hash table with the first 64Kb of the input buffer
|
||||
// (used when compressing dependent blocks)
|
||||
for si < soffset {
|
||||
h := getUint32(src[si:]) * hasher >> hashShift
|
||||
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
|
||||
si++
|
||||
hashTable[h] = hashEntry{generation, si}
|
||||
hashTable[h] = si
|
||||
}
|
||||
|
||||
anchor := si
|
||||
fma := 1 << skipStrength
|
||||
for si < sn-minMatch {
|
||||
// hash the next 4 bytes (sequence)...
|
||||
h := getUint32(src[si:]) * hasher >> hashShift
|
||||
if hashTable[h].generation != generation {
|
||||
hashTable[h] = hashEntry{generation, 0}
|
||||
}
|
||||
h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
|
||||
// -1 to separate existing entries from new ones
|
||||
ref := hashTable[h].value - 1
|
||||
ref := hashTable[h] - 1
|
||||
// ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
|
||||
hashTable[h].value = si + 1
|
||||
hashTable[h] = si + 1
|
||||
// no need to check the last 3 bytes in the first literal 4 bytes as
|
||||
// this guarantees that the next match, if any, is compressed with
|
||||
// a lower size, since to have some compression we must have:
|
||||
|
||||
13
vendor/github.com/pierrec/lz4/lz4.go
generated
vendored
13
vendor/github.com/pierrec/lz4/lz4.go
generated
vendored
@@ -20,7 +20,6 @@ package lz4
|
||||
import (
|
||||
"hash"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/pierrec/xxHash/xxHash32"
|
||||
)
|
||||
@@ -65,18 +64,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
var isLittleEndian = getIsLittleEndian()
|
||||
|
||||
func getIsLittleEndian() (ret bool) {
|
||||
var i int = 0x1
|
||||
bs := (*[1]byte)(unsafe.Pointer(&i))
|
||||
if bs[0] == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Header describes the various flags that can be set on a Writer or obtained from a Reader.
|
||||
// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
|
||||
//
|
||||
|
||||
254
vendor/github.com/pierrec/lz4/lz4_test.go
generated
vendored
254
vendor/github.com/pierrec/lz4/lz4_test.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@@ -261,6 +262,25 @@ func TestBlock(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockCompression(t *testing.T) {
|
||||
input := make([]byte, 64*1024)
|
||||
|
||||
for i := 0; i < 64*1024; i += 1 {
|
||||
input[i] = byte(i & 0x1)
|
||||
}
|
||||
output := make([]byte, 64*1024)
|
||||
|
||||
c, err := lz4.CompressBlock(input, output, 0)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if c == 0 {
|
||||
t.Fatal("cannot compress compressible data")
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkUncompressBlock(b *testing.B) {
|
||||
d := make([]byte, len(lorem))
|
||||
z := make([]byte, len(lorem))
|
||||
@@ -395,57 +415,60 @@ func TestReset(t *testing.T) {
|
||||
func TestFrame(t *testing.T) {
|
||||
for _, tdata := range testDataItems {
|
||||
data := tdata.data
|
||||
// test various options
|
||||
for _, headerItem := range testHeaderItems {
|
||||
tag := tdata.label + ": " + headerItem.label
|
||||
rw := bytes.NewBuffer(nil)
|
||||
t.Run(tdata.label, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// test various options
|
||||
for _, headerItem := range testHeaderItems {
|
||||
tag := tdata.label + ": " + headerItem.label
|
||||
rw := bytes.NewBuffer(nil)
|
||||
|
||||
// Set all options to non default values and compress
|
||||
w := lz4.NewWriter(rw)
|
||||
w.Header = headerItem.header
|
||||
// Set all options to non default values and compress
|
||||
w := lz4.NewWriter(rw)
|
||||
w.Header = headerItem.header
|
||||
|
||||
n, err := w.Write(data)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Write(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Errorf("%s: Write(): expected %d bytes written, got %d", tag, len(data), n)
|
||||
t.FailNow()
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Errorf("%s: Close(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
n, err := w.Write(data)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Write(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Errorf("%s: Write(): expected %d bytes written, got %d", tag, len(data), n)
|
||||
t.FailNow()
|
||||
}
|
||||
if err = w.Close(); err != nil {
|
||||
t.Errorf("%s: Close(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
// Decompress
|
||||
r := lz4.NewReader(rw)
|
||||
n, err = r.Read(nil)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Read(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if n != 0 {
|
||||
t.Errorf("%s: Read(): expected 0 bytes read, got %d", tag, n)
|
||||
}
|
||||
// Decompress
|
||||
r := lz4.NewReader(rw)
|
||||
n, err = r.Read(nil)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Read(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if n != 0 {
|
||||
t.Errorf("%s: Read(): expected 0 bytes read, got %d", tag, n)
|
||||
}
|
||||
|
||||
buf := make([]byte, len(data))
|
||||
n, err = r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Errorf("%s: Read(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Errorf("%s: Read(): expected %d bytes read, got %d", tag, len(data), n)
|
||||
}
|
||||
buf = buf[:n]
|
||||
if !bytes.Equal(buf, data) {
|
||||
t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, len(buf), len(data))
|
||||
t.FailNow()
|
||||
}
|
||||
buf := make([]byte, len(data))
|
||||
n, err = r.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
t.Errorf("%s: Read(): unexpected error: %v", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if n != len(data) {
|
||||
t.Errorf("%s: Read(): expected %d bytes read, got %d", tag, len(data), n)
|
||||
}
|
||||
buf = buf[:n]
|
||||
if !bytes.Equal(buf, data) {
|
||||
t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, len(buf), len(data))
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
compareHeaders(w.Header, r.Header, t)
|
||||
}
|
||||
compareHeaders(w.Header, r.Header, t)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,76 +477,82 @@ func TestReadFromWriteTo(t *testing.T) {
|
||||
for _, tdata := range testDataItems {
|
||||
data := tdata.data
|
||||
|
||||
// test various options
|
||||
for _, headerItem := range testHeaderItems {
|
||||
tag := "ReadFromWriteTo: " + tdata.label + ": " + headerItem.label
|
||||
dbuf := bytes.NewBuffer(data)
|
||||
t.Run(tdata.label, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
// test various options
|
||||
for _, headerItem := range testHeaderItems {
|
||||
tag := "ReadFromWriteTo: " + tdata.label + ": " + headerItem.label
|
||||
dbuf := bytes.NewBuffer(data)
|
||||
|
||||
zbuf := bytes.NewBuffer(nil)
|
||||
w := lz4.NewWriter(zbuf)
|
||||
w.Header = headerItem.header
|
||||
if _, err := w.ReadFrom(dbuf); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
zbuf := bytes.NewBuffer(nil)
|
||||
w := lz4.NewWriter(zbuf)
|
||||
w.Header = headerItem.header
|
||||
if _, err := w.ReadFrom(dbuf); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
r := lz4.NewReader(zbuf)
|
||||
if _, err := r.WriteTo(buf); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
buf := bytes.NewBuffer(nil)
|
||||
r := lz4.NewReader(zbuf)
|
||||
if _, err := r.WriteTo(buf); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), data) {
|
||||
t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
|
||||
t.FailNow()
|
||||
if !bytes.Equal(buf.Bytes(), data) {
|
||||
t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestCopy will use io.Copy and avoid using Reader.WriteTo() and Writer.ReadFrom().
|
||||
func TestCopy(t *testing.T) {
|
||||
w := lz4.NewWriter(nil)
|
||||
r := lz4.NewReader(nil)
|
||||
for _, tdata := range testDataItems {
|
||||
data := tdata.data
|
||||
t.Run(tdata.label, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// test various options
|
||||
for _, headerItem := range testHeaderItems {
|
||||
tag := "io.Copy: " + tdata.label + ": " + headerItem.label
|
||||
dbuf := &testBuffer{bytes.NewBuffer(data)}
|
||||
w := lz4.NewWriter(nil)
|
||||
r := lz4.NewReader(nil)
|
||||
// test various options
|
||||
for _, headerItem := range testHeaderItems {
|
||||
tag := "io.Copy: " + tdata.label + ": " + headerItem.label
|
||||
dbuf := &testBuffer{bytes.NewBuffer(data)}
|
||||
|
||||
zbuf := bytes.NewBuffer(nil)
|
||||
w.Reset(zbuf)
|
||||
w.Header = headerItem.header
|
||||
if _, err := io.Copy(w, dbuf); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
zbuf := bytes.NewBuffer(nil)
|
||||
w.Reset(zbuf)
|
||||
w.Header = headerItem.header
|
||||
if _, err := io.Copy(w, dbuf); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
buf := &testBuffer{bytes.NewBuffer(nil)}
|
||||
r.Reset(zbuf)
|
||||
if _, err := io.Copy(buf, r); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), data) {
|
||||
t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
buf := &testBuffer{bytes.NewBuffer(nil)}
|
||||
r.Reset(zbuf)
|
||||
if _, err := io.Copy(buf, r); err != nil {
|
||||
t.Errorf("%s: unexpected error: %s", tag, err)
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
if !bytes.Equal(buf.Bytes(), data) {
|
||||
t.Errorf("%s: decompress(compress(data)) != data (%d/%d)", tag, buf.Len(), len(data))
|
||||
t.FailNow()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -644,3 +673,26 @@ func writeReadChunked(t *testing.T, in []byte, chunkSize int) []byte {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestMultiBlockWrite(t *testing.T) {
|
||||
f, err := os.Open("testdata/207326ba-36f8-11e7-954a-aca46ba8ca73.png")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
zbuf := bytes.NewBuffer(nil)
|
||||
zw := lz4.NewWriter(zbuf)
|
||||
if _, err := io.Copy(zw, f); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := zw.Flush(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
zr := lz4.NewReader(zbuf)
|
||||
if _, err := io.Copy(buf, zr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
14
vendor/github.com/pierrec/lz4/writer.go
generated
vendored
14
vendor/github.com/pierrec/lz4/writer.go
generated
vendored
@@ -16,10 +16,8 @@ type Writer struct {
|
||||
data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
|
||||
window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
|
||||
|
||||
zbCompressBuf []byte // buffer for compressing lz4 blocks
|
||||
writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
|
||||
hashTable []hashEntry
|
||||
currentGeneration uint
|
||||
zbCompressBuf []byte // buffer for compressing lz4 blocks
|
||||
writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
|
||||
}
|
||||
|
||||
// NewWriter returns a new LZ4 frame encoder.
|
||||
@@ -33,7 +31,6 @@ func NewWriter(dst io.Writer) *Writer {
|
||||
Header: Header{
|
||||
BlockMaxSize: 4 << 20,
|
||||
},
|
||||
hashTable: make([]hashEntry, hashTableSize),
|
||||
writeSizeBuf: make([]byte, 4),
|
||||
}
|
||||
}
|
||||
@@ -245,11 +242,7 @@ func (z *Writer) compressBlock(zb block) block {
|
||||
if z.HighCompression {
|
||||
n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
|
||||
} else {
|
||||
n, err = compressGenerationalBlock(zb.data, zbuf, zb.offset, z.currentGeneration, z.hashTable)
|
||||
z.currentGeneration++
|
||||
if z.currentGeneration == 0 { // wrapped around, reset table
|
||||
z.hashTable = make([]hashEntry, hashTableSize)
|
||||
}
|
||||
n, err = CompressBlock(zb.data, zbuf, zb.offset)
|
||||
}
|
||||
|
||||
// compressible and compressed size smaller than decompressed: ok!
|
||||
@@ -257,6 +250,7 @@ func (z *Writer) compressBlock(zb block) block {
|
||||
zb.compressed = true
|
||||
zb.zdata = zbuf[:n]
|
||||
} else {
|
||||
zb.compressed = false
|
||||
zb.zdata = zb.data[zb.offset:]
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user