Dep helper (#2151)

* Add dep task to update go dependencies

* Update go dependencies
This commit is contained in:
Manuel Alejandro de Brito Fontes
2018-09-29 19:47:07 -03:00
committed by Miek Gieben
parent 8f8b81f56b
commit 0e8977761d
764 changed files with 172 additions and 267451 deletions

View File

@@ -1,197 +0,0 @@
package zipkintracer
import (
"bytes"
"fmt"
"net/http"
"testing"
opentracing "github.com/opentracing/opentracing-go"
)
var tags []string
func init() {
tags = make([]string, 1000)
for j := 0; j < len(tags); j++ {
tags[j] = fmt.Sprintf("%d", randomID())
}
}
func executeOps(sp opentracing.Span, numEvent, numTag, numItems int) {
for j := 0; j < numEvent; j++ {
sp.LogEvent("event")
}
for j := 0; j < numTag; j++ {
sp.SetTag(tags[j], nil)
}
for j := 0; j < numItems; j++ {
sp.SetBaggageItem(tags[j], tags[j])
}
}
func benchmarkWithOps(b *testing.B, numEvent, numTag, numItems int) {
var r CountingRecorder
t, err := NewTracer(&r)
if err != nil {
b.Fatalf("Unable to create Tracer: %+v", err)
}
benchmarkWithOpsAndCB(b, func() opentracing.Span {
return t.StartSpan("test")
}, numEvent, numTag, numItems)
if int(r) != b.N {
b.Fatalf("missing traces: expected %d, got %d", b.N, r)
}
}
func benchmarkWithOpsAndCB(b *testing.B, create func() opentracing.Span,
numEvent, numTag, numItems int) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
sp := create()
executeOps(sp, numEvent, numTag, numItems)
sp.Finish()
}
b.StopTimer()
}
func BenchmarkSpan_Empty(b *testing.B) {
benchmarkWithOps(b, 0, 0, 0)
}
func BenchmarkSpan_100Events(b *testing.B) {
benchmarkWithOps(b, 100, 0, 0)
}
func BenchmarkSpan_1000Events(b *testing.B) {
benchmarkWithOps(b, 1000, 0, 0)
}
func BenchmarkSpan_100Tags(b *testing.B) {
benchmarkWithOps(b, 0, 100, 0)
}
func BenchmarkSpan_1000Tags(b *testing.B) {
benchmarkWithOps(b, 0, 1000, 0)
}
func BenchmarkSpan_100BaggageItems(b *testing.B) {
benchmarkWithOps(b, 0, 0, 100)
}
func BenchmarkTrimmedSpan_100Events_100Tags_100BaggageItems(b *testing.B) {
var r CountingRecorder
t, err := NewTracer(
&r,
TrimUnsampledSpans(true),
WithSampler(neverSample),
TraceID128Bit(true),
)
if err != nil {
b.Fatalf("Unable to create Tracer: %+v", err)
}
benchmarkWithOpsAndCB(b, func() opentracing.Span {
sp := t.StartSpan("test")
return sp
}, 100, 100, 100)
if int(r) != b.N {
b.Fatalf("missing traces: expected %d, got %d", b.N, r)
}
}
func benchmarkInject(b *testing.B, format opentracing.BuiltinFormat, numItems int) {
var r CountingRecorder
tracer, err := NewTracer(&r)
if err != nil {
b.Fatalf("Unable to create Tracer: %+v", err)
}
sp := tracer.StartSpan("testing")
executeOps(sp, 0, 0, numItems)
var carrier interface{}
switch format {
case opentracing.TextMap, opentracing.HTTPHeaders:
carrier = opentracing.HTTPHeadersCarrier(http.Header{})
case opentracing.Binary:
carrier = &bytes.Buffer{}
default:
b.Fatalf("unhandled format %d", format)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := tracer.Inject(sp.Context(), format, carrier)
if err != nil {
b.Fatal(err)
}
}
}
func benchmarkExtract(b *testing.B, format opentracing.BuiltinFormat, numItems int) {
var r CountingRecorder
tracer, err := NewTracer(&r)
if err != nil {
b.Fatalf("Unable to create Tracer: %+v", err)
}
sp := tracer.StartSpan("testing")
executeOps(sp, 0, 0, numItems)
var carrier interface{}
switch format {
case opentracing.TextMap, opentracing.HTTPHeaders:
carrier = opentracing.HTTPHeadersCarrier(http.Header{})
case opentracing.Binary:
carrier = &bytes.Buffer{}
default:
b.Fatalf("unhandled format %d", format)
}
if err := tracer.Inject(sp.Context(), format, carrier); err != nil {
b.Fatal(err)
}
// We create a new bytes.Buffer every time for tracer.Extract() to keep
// this benchmark realistic.
var rawBinaryBytes []byte
if format == opentracing.Binary {
rawBinaryBytes = carrier.(*bytes.Buffer).Bytes()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if format == opentracing.Binary {
carrier = bytes.NewBuffer(rawBinaryBytes)
}
_, err := tracer.Extract(format, carrier)
if err != nil {
b.Fatal(err)
}
}
}
func BenchmarkInject_TextMap_Empty(b *testing.B) {
benchmarkInject(b, opentracing.TextMap, 0)
}
func BenchmarkInject_TextMap_100BaggageItems(b *testing.B) {
benchmarkInject(b, opentracing.TextMap, 100)
}
func BenchmarkInject_Binary_Empty(b *testing.B) {
benchmarkInject(b, opentracing.Binary, 0)
}
func BenchmarkInject_Binary_100BaggageItems(b *testing.B) {
benchmarkInject(b, opentracing.Binary, 100)
}
func BenchmarkExtract_TextMap_Empty(b *testing.B) {
benchmarkExtract(b, opentracing.TextMap, 0)
}
func BenchmarkExtract_TextMap_100BaggageItems(b *testing.B) {
benchmarkExtract(b, opentracing.TextMap, 100)
}
func BenchmarkExtract_Binary_Empty(b *testing.B) {
benchmarkExtract(b, opentracing.Binary, 0)
}
func BenchmarkExtract_Binary_100BaggageItems(b *testing.B) {
benchmarkExtract(b, opentracing.Binary, 100)
}

View File

@@ -1,382 +0,0 @@
package zipkintracer
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"sync"
"testing"
"time"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
const (
interval = 10 * time.Millisecond
serverSleep = 100 * time.Millisecond
)
func TestHttpCollector(t *testing.T) {
t.Parallel()
port := 10000
server := newHTTPServer(t, port)
c, err := NewHTTPCollector(fmt.Sprintf("http://localhost:%d/api/v1/spans", port))
if err != nil {
t.Fatal(err)
}
var (
serviceName = "service"
methodName = "method"
traceID = int64(123)
spanID = int64(456)
parentSpanID = int64(0)
value = "foo"
)
span := makeNewSpan("1.2.3.4:1234", serviceName, methodName, traceID, spanID, parentSpanID, true)
annotate(span, time.Now(), value, nil)
if err := c.Collect(span); err != nil {
t.Errorf("error during collection: %v", err)
}
if err := c.Close(); err != nil {
t.Fatalf("error during collection: %v", err)
}
if want, have := 1, len(server.spans()); want != have {
t.Fatal("never received a span")
}
gotSpan := server.spans()[0]
if want, have := methodName, gotSpan.GetName(); want != have {
t.Errorf("want %q, have %q", want, have)
}
if want, have := traceID, gotSpan.TraceID; want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := spanID, gotSpan.ID; want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := parentSpanID, *gotSpan.ParentID; want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := 1, len(gotSpan.GetAnnotations()); want != have {
t.Fatalf("want %d, have %d", want, have)
}
gotAnnotation := gotSpan.GetAnnotations()[0]
if want, have := value, gotAnnotation.GetValue(); want != have {
t.Errorf("want %q, have %q", want, have)
}
}
func TestHttpCollector_Batch(t *testing.T) {
t.Parallel()
port := 10001
server := newHTTPServer(t, port)
var (
batchSize = 5
spanTimeout = 100 * time.Millisecond
)
c, err := NewHTTPCollector(fmt.Sprintf("http://localhost:%d/api/v1/spans", port),
HTTPBatchSize(batchSize),
HTTPBatchInterval(time.Duration(2*batchSize)*spanTimeout), // Make sure timeout won't cause this test to pass
)
if err != nil {
t.Fatal(err)
}
for i := 0; i < batchSize-1; i++ {
if err := c.Collect(&zipkincore.Span{}); err != nil {
t.Errorf("error during collection: %v", err)
}
}
err = consistently(func() bool { return len(server.spans()) == 0 }, spanTimeout)
if err != nil {
t.Fatal("Client sent spans before batch size")
}
if err := c.Collect(&zipkincore.Span{}); err != nil {
t.Errorf("error during collection: %v", err)
}
err = eventually(func() bool { return len(server.spans()) != batchSize }, time.Duration(batchSize)*time.Millisecond)
if err != nil {
t.Fatal("Client did not send spans when batch size reached")
}
}
func TestHttpCollector_BatchInterval(t *testing.T) {
t.Parallel()
port := 10002
server := newHTTPServer(t, port)
var (
batchSize = 5
batchInterval = 100 * time.Millisecond
)
start := time.Now()
c, err := NewHTTPCollector(fmt.Sprintf("http://localhost:%d/api/v1/spans", port),
HTTPBatchSize(batchSize), // Make sure batch won't make this test pass
HTTPBatchInterval(batchInterval),
)
if err != nil {
t.Fatal(err)
}
// send less spans than batchSize in the background
lessThanBatchSize := batchSize - 1
go func() {
for i := 0; i < lessThanBatchSize; i++ {
if err := c.Collect(&zipkincore.Span{}); err != nil {
t.Errorf("error during collection: %v", err)
}
}
}()
beforeInterval := batchInterval - (2 * interval) - time.Now().Sub(start)
err = consistently(func() bool { return len(server.spans()) == 0 }, beforeInterval)
if err != nil {
t.Fatal("Client sent spans before timeout")
}
afterInterval := batchInterval * 2
err = eventually(func() bool { return len(server.spans()) == lessThanBatchSize }, afterInterval)
if err != nil {
t.Fatal("Client did not send spans after timeout")
}
}
// TestHttpCollector_NonBlockCollect tests that the Collect
// function is non-blocking, even when the server is slow.
// Use of the /api/v1/sleep endpoint registered in the server.
func TestHttpCollector_NonBlockCollect(t *testing.T) {
t.Parallel()
port := 10003
newHTTPServer(t, port)
c, err := NewHTTPCollector(fmt.Sprintf("http://localhost:%d/api/v1/sleep", port))
if err != nil {
t.Fatal(err)
}
start := time.Now()
if err := c.Collect(&zipkincore.Span{}); err != nil {
t.Errorf("error during collection: %v", err)
}
if time.Now().Sub(start) >= serverSleep {
t.Fatal("Collect is blocking")
}
}
func TestHttpCollector_MaxBatchSize(t *testing.T) {
t.Parallel()
port := 10004
server := newHTTPServer(t, port)
var (
maxBacklog = 5
batchSize = maxBacklog * 2 // make backsize bigger than backlog enable testing backlog disposal
)
c, err := NewHTTPCollector(fmt.Sprintf("http://localhost:%d/api/v1/spans", port),
HTTPMaxBacklog(maxBacklog),
HTTPBatchSize(batchSize),
)
if err != nil {
t.Fatal(err)
}
for i := 0; i < batchSize; i++ {
c.Collect(makeNewSpan("", "", "", 0, int64(i), 0, false))
}
c.Close()
for i, s := range server.spans() {
if want, have := int64(i+maxBacklog), s.ID; want != have {
t.Errorf("Span ID is wrong. want %d, have %d", want, have)
}
}
}
func TestHTTPCollector_RequestCallback(t *testing.T) {
t.Parallel()
var (
err error
port = 10005
server = newHTTPServer(t, port)
hdrKey = "test-key"
hdrValue = "test-value"
)
c, err := NewHTTPCollector(
fmt.Sprintf("http://localhost:%d/api/v1/spans", port),
HTTPRequestCallback(func(r *http.Request) {
r.Header.Add(hdrKey, hdrValue)
}),
)
if err != nil {
t.Fatal(err)
}
if err = c.Collect(&zipkincore.Span{}); err != nil {
t.Fatal(err)
}
if err = c.Close(); err != nil {
t.Fatal(err)
}
if want, have := 1, len(server.spans()); want != have {
t.Fatal("never received a span")
}
headers := server.headers()
if len(headers) == 0 {
t.Fatalf("Collect request was not handled")
}
testHeader := headers.Get(hdrKey)
if !strings.EqualFold(testHeader, hdrValue) {
t.Errorf("Custom header not received. want %s, have %s", testHeader, hdrValue)
}
server.clearHeaders()
}
type httpServer struct {
t *testing.T
zipkinSpans []*zipkincore.Span
zipkinHeader http.Header
mutex sync.RWMutex
}
func (s *httpServer) spans() []*zipkincore.Span {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.zipkinSpans
}
func (s *httpServer) clearSpans() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.zipkinSpans = s.zipkinSpans[:0]
}
func (s *httpServer) headers() http.Header {
s.mutex.RLock()
defer s.mutex.RUnlock()
return s.zipkinHeader
}
func (s *httpServer) clearHeaders() {
s.mutex.Lock()
defer s.mutex.Unlock()
s.zipkinHeader = make(http.Header, 0)
}
func newHTTPServer(t *testing.T, port int) *httpServer {
server := &httpServer{
t: t,
zipkinSpans: make([]*zipkincore.Span, 0),
mutex: sync.RWMutex{},
}
handler := http.NewServeMux()
handler.HandleFunc("/api/v1/spans", func(w http.ResponseWriter, r *http.Request) {
contextType := r.Header.Get("Content-Type")
if contextType != "application/x-thrift" {
t.Fatalf(
"except Content-Type should be application/x-thrift, but is %s",
contextType)
}
// clone headers from request
headers := make(http.Header, len(r.Header))
for k, vv := range r.Header {
vv2 := make([]string, len(vv))
copy(vv2, vv)
headers[k] = vv2
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
buffer := thrift.NewTMemoryBuffer()
if _, err = buffer.Write(body); err != nil {
t.Error(err)
return
}
transport := thrift.NewTBinaryProtocolTransport(buffer)
_, size, err := transport.ReadListBegin()
if err != nil {
t.Error(err)
return
}
var spans []*zipkincore.Span
for i := 0; i < size; i++ {
zs := &zipkincore.Span{}
if err = zs.Read(transport); err != nil {
t.Error(err)
return
}
spans = append(spans, zs)
}
err = transport.ReadListEnd()
if err != nil {
t.Error(err)
return
}
server.mutex.Lock()
defer server.mutex.Unlock()
server.zipkinSpans = append(server.zipkinSpans, spans...)
server.zipkinHeader = headers
})
handler.HandleFunc("/api/v1/sleep", func(w http.ResponseWriter, r *http.Request) {
time.Sleep(serverSleep)
})
go func() {
http.ListenAndServe(fmt.Sprintf(":%d", port), handler)
}()
return server
}
func consistently(assertion func() bool, atList time.Duration) error {
deadline := time.Now().Add(atList)
for time.Now().Before(deadline) {
if !assertion() {
return fmt.Errorf("failed")
}
time.Sleep(interval)
}
return nil
}
func eventually(assertion func() bool, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
if assertion() {
return nil
}
time.Sleep(interval)
}
return fmt.Errorf("failed")
}

View File

@@ -1,194 +0,0 @@
package zipkintracer
import (
"context"
"errors"
"testing"
"time"
"github.com/Shopify/sarama"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
type stubProducer struct {
in chan *sarama.ProducerMessage
err chan *sarama.ProducerError
kdown bool
closed bool
}
func (p *stubProducer) AsyncClose() {}
func (p *stubProducer) Close() error {
if p.kdown {
return errors.New("Kafka is down")
}
p.closed = true
return nil
}
func (p *stubProducer) Input() chan<- *sarama.ProducerMessage { return p.in }
func (p *stubProducer) Successes() <-chan *sarama.ProducerMessage { return nil }
func (p *stubProducer) Errors() <-chan *sarama.ProducerError { return p.err }
func newStubProducer(kdown bool) *stubProducer {
return &stubProducer{
make(chan *sarama.ProducerMessage),
make(chan *sarama.ProducerError),
kdown,
false,
}
}
var spans = []*zipkincore.Span{
makeNewSpan("203.0.113.10:1234", "service1", "avg", 123, 456, 0, true),
makeNewSpan("203.0.113.10:1234", "service2", "sum", 123, 789, 456, true),
makeNewSpan("203.0.113.10:1234", "service2", "div", 123, 101112, 456, true),
}
func TestKafkaProduce(t *testing.T) {
p := newStubProducer(false)
c, err := NewKafkaCollector(
[]string{"192.0.2.10:9092"}, KafkaProducer(p),
)
if err != nil {
t.Fatal(err)
}
for _, want := range spans {
m := collectSpan(t, c, p, want)
testMetadata(t, m)
got := deserializeSpan(t, m.Value)
testEqual(t, want, got)
}
}
func TestKafkaClose(t *testing.T) {
p := newStubProducer(false)
c, err := NewKafkaCollector(
[]string{"192.0.2.10:9092"}, KafkaProducer(p),
)
if err != nil {
t.Fatal(err)
}
if err = c.Close(); err != nil {
t.Fatal(err)
}
if !p.closed {
t.Fatal("producer not closed")
}
}
func TestKafkaCloseError(t *testing.T) {
p := newStubProducer(true)
c, err := NewKafkaCollector(
[]string{"192.0.2.10:9092"}, KafkaProducer(p),
)
if err != nil {
t.Fatal(err)
}
if err = c.Close(); err == nil {
t.Error("no error on close")
}
}
func TestKafkaErrors(t *testing.T) {
p := newStubProducer(true)
errs := make(chan []interface{}, len(spans))
lg := Logger(LoggerFunc(func(keyvals ...interface{}) error {
for i := 0; i < len(keyvals); i += 2 {
if keyvals[i] == "result" && keyvals[i+1] == "failed to produce msg" {
errs <- keyvals
}
}
return nil
}))
c, err := NewKafkaCollector(
[]string{"192.0.2.10:9092"},
KafkaProducer(p),
KafkaLogger(lg),
)
if err != nil {
t.Fatal(err)
}
for _, want := range spans {
_ = collectSpan(t, c, p, want)
}
for i := 0; i < len(spans); i++ {
select {
case <-errs:
case <-time.After(100 * time.Millisecond):
t.Fatalf("errors not logged. got %d, wanted %d", i, len(spans))
}
}
}
func collectSpan(t *testing.T, c Collector, p *stubProducer, s *zipkincore.Span) *sarama.ProducerMessage {
var m *sarama.ProducerMessage
rcvd := make(chan bool, 1)
go func() {
select {
case m = <-p.in:
rcvd <- true
if p.kdown {
p.err <- &sarama.ProducerError{
Msg: m,
Err: errors.New("kafka is down"),
}
}
case <-time.After(100 * time.Millisecond):
rcvd <- false
}
}()
if err := c.Collect(s); err != nil {
t.Errorf("error during collection: %v", err)
}
if !<-rcvd {
t.Fatal("span message was not produced")
}
return m
}
func testMetadata(t *testing.T, m *sarama.ProducerMessage) {
if m.Topic != "zipkin" {
t.Errorf("produced to topic %q, want %q", m.Topic, "zipkin")
}
if m.Key != nil {
t.Errorf("produced with key %q, want nil", m.Key)
}
}
func deserializeSpan(t *testing.T, e sarama.Encoder) *zipkincore.Span {
bytes, err := e.Encode()
if err != nil {
t.Errorf("error in encoding: %v", err)
}
s := zipkincore.NewSpan()
mb := thrift.NewTMemoryBufferLen(len(bytes))
_, _ = mb.Write(bytes)
_ = mb.Flush(context.Background())
pt := thrift.NewTBinaryProtocolTransport(mb)
err = s.Read(pt)
if err != nil {
t.Errorf("error in decoding: %v", err)
}
return s
}
func testEqual(t *testing.T, want *zipkincore.Span, got *zipkincore.Span) {
if got.TraceID != want.TraceID {
t.Errorf("trace_id %d, want %d", got.TraceID, want.TraceID)
}
if got.ID != want.ID {
t.Errorf("id %d, want %d", got.ID, want.ID)
}
if got.ParentID == nil {
if want.ParentID != nil {
t.Errorf("parent_id %d, want %d", got.ParentID, want.ParentID)
}
} else if *got.ParentID != *want.ParentID {
t.Errorf("parent_id %d, want %d", got.ParentID, want.ParentID)
}
}

View File

@@ -1,189 +0,0 @@
package zipkintracer
import (
"context"
"encoding/base64"
"fmt"
"math/rand"
"net"
"sync"
"testing"
"time"
"github.com/apache/thrift/lib/go/thrift"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
func TestScribeCollector(t *testing.T) {
server := newScribeServer(t)
timeout := time.Second
batchInterval := time.Millisecond
c, err := NewScribeCollector(server.addr(), timeout, ScribeBatchSize(0), ScribeBatchInterval(batchInterval))
if err != nil {
t.Fatal(err)
}
var (
serviceName = "service"
methodName = "method"
traceID = int64(123)
spanID = int64(456)
parentSpanID = int64(0)
value = "foo"
)
span := makeNewSpan("1.2.3.4:1234", serviceName, methodName, traceID, spanID, parentSpanID, true)
annotate(span, time.Now(), "foo", nil)
if err := c.Collect(span); err != nil {
t.Errorf("error during collection: %v", err)
}
if err := c.Close(); err != nil {
t.Fatalf("error during collection: %v", err)
}
if want, have := 1, len(server.spans()); want != have {
t.Fatalf("never received a span")
}
gotSpan := server.spans()[0]
if want, have := methodName, gotSpan.GetName(); want != have {
t.Errorf("want %q, have %q", want, have)
}
if want, have := traceID, gotSpan.TraceID; want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := spanID, gotSpan.ID; want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := parentSpanID, *gotSpan.ParentID; want != have {
t.Errorf("want %d, have %d", want, have)
}
if want, have := 1, len(gotSpan.GetAnnotations()); want != have {
t.Fatalf("want %d, have %d", want, have)
}
gotAnnotation := gotSpan.GetAnnotations()[0]
if want, have := value, gotAnnotation.GetValue(); want != have {
t.Errorf("want %q, have %q", want, have)
}
}
type scribeServer struct {
t *testing.T
transport *thrift.TServerSocket
address string
server *thrift.TSimpleServer
handler *scribeHandler
}
func newScribeServer(t *testing.T) *scribeServer {
protocolFactory := thrift.NewTBinaryProtocolFactoryDefault()
transportFactory := thrift.NewTFramedTransportFactory(thrift.NewTTransportFactory())
var port int
var transport *thrift.TServerSocket
var err error
for i := 0; i < 10; i++ {
port = 10000 + rand.Intn(10000)
transport, err = thrift.NewTServerSocket(fmt.Sprintf(":%d", port))
if err != nil {
t.Logf("port %d: %v", port, err)
continue
}
break
}
if err != nil {
t.Fatal(err)
}
handler := newScribeHandler(t)
server := thrift.NewTSimpleServer4(
scribe.NewScribeProcessor(handler),
transport,
transportFactory,
protocolFactory,
)
go func() {
_ = server.Serve()
}()
deadline := time.Now().Add(time.Second)
for !canConnect(port) {
if time.Now().After(deadline) {
t.Fatal("server never started")
}
time.Sleep(time.Millisecond)
}
return &scribeServer{
transport: transport,
address: fmt.Sprintf("127.0.0.1:%d", port),
handler: handler,
}
}
func (s *scribeServer) addr() string {
return s.address
}
func (s *scribeServer) spans() []*zipkincore.Span {
return s.handler.spans()
}
type scribeHandler struct {
t *testing.T
sync.RWMutex
entries []*scribe.LogEntry
}
func newScribeHandler(t *testing.T) *scribeHandler {
return &scribeHandler{t: t}
}
func (h *scribeHandler) Log(ctx context.Context, messages []*scribe.LogEntry) (scribe.ResultCode, error) {
h.Lock()
defer h.Unlock()
for _, m := range messages {
h.entries = append(h.entries, m)
}
return scribe.ResultCode_OK, nil
}
func (h *scribeHandler) spans() []*zipkincore.Span {
h.RLock()
defer h.RUnlock()
spans := []*zipkincore.Span{}
for _, m := range h.entries {
decoded, err := base64.StdEncoding.DecodeString(m.GetMessage())
if err != nil {
h.t.Error(err)
continue
}
buffer := thrift.NewTMemoryBuffer()
if _, err := buffer.Write(decoded); err != nil {
h.t.Error(err)
continue
}
transport := thrift.NewTBinaryProtocolTransport(buffer)
zs := &zipkincore.Span{}
if err := zs.Read(transport); err != nil {
h.t.Error(err)
continue
}
spans = append(spans, zs)
}
return spans
}
func canConnect(port int) bool {
c, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port))
if err != nil {
return false
}
_ = c.Close()
return true
}

View File

@@ -1,110 +0,0 @@
package zipkintracer
import (
"fmt"
"testing"
"time"
"github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore"
)
var s = makeNewSpan("203.0.113.10:1234", "service1", "avg", 123, 456, 0, true)
func TestNopCollector(t *testing.T) {
c := NopCollector{}
if err := c.Collect(s); err != nil {
t.Error(err)
}
if err := c.Close(); err != nil {
t.Error(err)
}
}
type stubCollector struct {
errid int
collected bool
closed bool
}
func (c *stubCollector) Collect(*zipkincore.Span) error {
c.collected = true
if c.errid != 0 {
return fmt.Errorf("error %d", c.errid)
}
return nil
}
func (c *stubCollector) Close() error {
c.closed = true
if c.errid != 0 {
return fmt.Errorf("error %d", c.errid)
}
return nil
}
func TestMultiCollector(t *testing.T) {
cs := MultiCollector{
&stubCollector{errid: 1},
&stubCollector{},
&stubCollector{errid: 2},
}
err := cs.Collect(s)
if err == nil {
t.Fatal("wanted error, got none")
}
if want, have := "error 1; error 2", err.Error(); want != have {
t.Errorf("want %q, have %q", want, have)
}
collectionError := err.(CollectionError).GetErrors()
if want, have := 3, len(collectionError); want != have {
t.Fatalf("want %d, have %d", want, have)
}
if want, have := cs[0].Collect(s).Error(), collectionError[0].Error(); want != have {
t.Errorf("want %q, have %q", want, have)
}
if want, have := cs[1].Collect(s), collectionError[1]; want != have {
t.Errorf("want %q, have %q", want, have)
}
if want, have := cs[2].Collect(s).Error(), collectionError[2].Error(); want != have {
t.Errorf("want %q, have %q", want, have)
}
for _, c := range cs {
if !c.(*stubCollector).collected {
t.Error("collect not called")
}
}
}
func TestMultiCollectorClose(t *testing.T) {
cs := MultiCollector{
&stubCollector{errid: 1},
&stubCollector{},
&stubCollector{errid: 2},
}
err := cs.Close()
if err == nil {
t.Fatal("wanted error, got none")
}
if want, have := "error 1; error 2", err.Error(); want != have {
t.Errorf("want %q, have %q", want, have)
}
for _, c := range cs {
if !c.(*stubCollector).closed {
t.Error("close not called")
}
}
}
func makeNewSpan(hostPort, serviceName, methodName string, traceID, spanID, parentSpanID int64, debug bool) *zipkincore.Span {
timestamp := time.Now().UnixNano() / 1e3
return &zipkincore.Span{
TraceID: traceID,
Name: methodName,
ID: spanID,
ParentID: &parentSpanID,
Debug: debug,
Timestamp: &timestamp,
}
}

View File

@@ -1,134 +0,0 @@
package zipkintracer
import (
"strings"
"sync"
"testing"
opentracing "github.com/opentracing/opentracing-go"
)
const op = "test"
func TestDebugAssertSingleGoroutine(t *testing.T) {
tracer, err := NewTracer(
NewInMemoryRecorder(),
EnableSpanPool(true),
DebugAssertSingleGoroutine(true),
TraceID128Bit(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
sp := tracer.StartSpan(op)
sp.LogEvent("something on my goroutine")
wait := make(chan struct{})
var panicked bool
go func() {
defer func() {
if r := recover(); r != nil {
_, panicked = r.(*errAssertionFailed)
}
close(wait)
}()
sp.LogEvent("something on your goroutine")
}()
<-wait
if !panicked {
t.Fatal("expected a panic")
}
}
func TestDebugAssertUseAfterFinish(t *testing.T) {
tracer, err := NewTracer(
NewInMemoryRecorder(),
EnableSpanPool(true),
DebugAssertUseAfterFinish(true),
TraceID128Bit(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
const msg = "I shall be finished"
for _, double := range []bool{false, true} {
sp := tracer.StartSpan(op)
sp.Log(opentracing.LogData{Event: msg})
if double {
sp.Finish()
}
var panicked bool
func() {
defer func() {
r := recover()
var assertionErr error
assertionErr, panicked = r.(*errAssertionFailed)
if !panicked && r != nil {
panic(r)
}
if panicked && !strings.Contains(assertionErr.Error(), msg) {
t.Fatalf("debug output did not contain log message '%s': %+v", msg, assertionErr)
}
spImpl := sp.(*spanImpl)
// The panic should leave the Mutex unlocked.
spImpl.Mutex.Lock()
spImpl.Mutex.Unlock()
}()
sp.Finish()
}()
if panicked != double {
t.Errorf("finished double = %t, but panicked = %t", double, panicked)
}
}
}
func TestConcurrentUsage(t *testing.T) {
var cr CountingRecorder
tracer, err := NewTracer(
&cr,
EnableSpanPool(true),
DebugAssertSingleGoroutine(true),
TraceID128Bit(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
var wg sync.WaitGroup
const num = 100
wg.Add(num)
for i := 0; i < num; i++ {
go func() {
defer wg.Done()
for j := 0; j < num; j++ {
sp := tracer.StartSpan(op)
sp.LogEvent("test event")
sp.SetTag("foo", "bar")
sp.SetBaggageItem("boo", "far")
sp.SetOperationName("x")
csp := tracer.StartSpan(
"csp",
opentracing.ChildOf(sp.Context()))
csp.Finish()
defer sp.Finish()
}
}()
}
wg.Wait()
}
func TestDisableSpanPool(t *testing.T) {
var cr CountingRecorder
tracer, err := NewTracer(
&cr,
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
parent := tracer.StartSpan("parent")
parent.Finish()
// This shouldn't panic.
child := tracer.StartSpan(
"child",
opentracing.ChildOf(parent.Context()))
child.Finish()
}

View File

@@ -1,74 +0,0 @@
package zipkintracer
import (
"errors"
"testing"
"github.com/opentracing/opentracing-go/log"
)
type obj struct {
a int
b string
}
func getLogFields() []log.Field {
lazy := func(fv log.Encoder) {
fv.EmitString("lazy", "logger")
}
return []log.Field{
log.Bool("bool", true),
log.String("string", "value"),
log.Error(errors.New("an error")),
log.Float32("float32", 32.123),
log.Float64("float64", 64.123),
log.Int("int", 42),
log.Int32("int32", 32),
log.Int64("int64", 64),
log.Uint32("uint32", 32),
log.Uint64("uint64", 64),
log.Object("object", obj{a: 42, b: "string"}),
log.Lazy(lazy),
log.String("event", "EventValue"),
}
}
func TestMaterializeWithJSON(t *testing.T) {
logFields := getLogFields()
want := `{"bool":"true","error":"an error","event":"EventValue","float32":"32.123001","float64":"64.123000","int":"42","int32":"32","int64":"64","lazy":"logger","object":"{a:42 b:string}","string":"value","uint32":"32","uint64":"64"}`
have, err := MaterializeWithJSON(logFields)
if err != nil {
t.Fatalf("expected json string, got error %+v", err)
}
if want != string(have) {
t.Errorf("want:\n%s\nhave\n%s", want, have)
}
}
func TestMaterializeWithLogFmt(t *testing.T) {
logFields := getLogFields()
want := `bool=true string=value error="an error" float32=32.123 float64=64.123 int=42 int32=32 int64=64 uint32=32 uint64=64 object="unsupported value type" event=EventValue`
have, err := MaterializeWithLogFmt(logFields)
if err != nil {
t.Fatalf("expected logfmt string, got error %+v", err)
}
if want != string(have) {
t.Errorf("want:\n%s\nhave\n%s", want, have)
}
}
func TestStrictZipkinMaterializer(t *testing.T) {
logFields := getLogFields()
want := `EventValue`
have, err := StrictZipkinMaterializer(logFields)
if err != nil {
t.Fatalf("expected string got error %+v", err)
}
if want != string(have) {
t.Errorf("want:\n%s\nhave\n%s", want, have)
}
logFields = []log.Field{log.String("SomeKey", "SomeValue")}
if _, err = StrictZipkinMaterializer(logFields); err == nil {
t.Errorf("expected error: %s, got nil", errEventLogNotFound)
}
}

View File

@@ -1,171 +0,0 @@
package zipkintracer_test
import (
"bytes"
"net/http"
"reflect"
"testing"
"time"
"github.com/davecgh/go-spew/spew"
opentracing "github.com/opentracing/opentracing-go"
zipkintracer "github.com/openzipkin/zipkin-go-opentracing"
"github.com/openzipkin/zipkin-go-opentracing/flag"
"github.com/openzipkin/zipkin-go-opentracing/types"
)
type verbatimCarrier struct {
zipkintracer.SpanContext
b map[string]string
}
var _ zipkintracer.DelegatingCarrier = &verbatimCarrier{}
func (vc *verbatimCarrier) SetBaggageItem(k, v string) {
vc.b[k] = v
}
func (vc *verbatimCarrier) GetBaggage(f func(string, string)) {
for k, v := range vc.b {
f(k, v)
}
}
func (vc *verbatimCarrier) SetState(tID types.TraceID, sID uint64, pID *uint64, sampled bool, flags flag.Flags) {
vc.SpanContext = zipkintracer.SpanContext{
TraceID: tID,
SpanID: sID,
ParentSpanID: pID,
Sampled: sampled,
Flags: flags,
}
}
func (vc *verbatimCarrier) State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
return vc.SpanContext.TraceID, vc.SpanContext.SpanID, vc.SpanContext.ParentSpanID, vc.SpanContext.Sampled, vc.SpanContext.Flags
}
func TestSpanPropagator(t *testing.T) {
const op = "test"
recorder := zipkintracer.NewInMemoryRecorder()
tracer, err := zipkintracer.NewTracer(
recorder,
zipkintracer.ClientServerSameSpan(true),
zipkintracer.DebugMode(true),
zipkintracer.TraceID128Bit(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
// create root span so propagation test will include parentSpanID
ps := tracer.StartSpan("root")
defer ps.Finish()
// client side span with parent span 'ps'
sp := tracer.StartSpan(op, opentracing.ChildOf(ps.Context()))
sp.SetBaggageItem("foo", "bar")
tmc := opentracing.HTTPHeadersCarrier(http.Header{})
tests := []struct {
typ, carrier interface{}
}{
{zipkintracer.Delegator, zipkintracer.DelegatingCarrier(&verbatimCarrier{b: map[string]string{}})},
{opentracing.Binary, &bytes.Buffer{}},
{opentracing.HTTPHeaders, tmc},
{opentracing.TextMap, tmc},
}
for i, test := range tests {
if err := tracer.Inject(sp.Context(), test.typ, test.carrier); err != nil {
t.Fatalf("%d: %v", i, err)
}
injectedContext, err := tracer.Extract(test.typ, test.carrier)
if err != nil {
t.Fatalf("%d: %v", i, err)
}
child := tracer.StartSpan(
op,
opentracing.ChildOf(injectedContext))
child.Finish()
}
sp.Finish()
spans := recorder.GetSpans()
if a, e := len(spans), len(tests)+1; a != e {
t.Fatalf("expected %d spans, got %d", e, a)
}
// The last span is the original one.
exp, spans := spans[len(spans)-1], spans[:len(spans)-1]
exp.Duration = time.Duration(123)
exp.Start = time.Time{}.Add(1)
for i, sp := range spans {
if a, e := *sp.Context.ParentSpanID, exp.Context.SpanID; a != e {
t.Fatalf("%d: ParentSpanID %d does not match expectation %d", i, a, e)
} else {
// Prepare for comparison.
sp.Context.Flags &= flag.Debug // other flags then Debug should be discarded in comparison
exp.Context.Flags &= flag.Debug // other flags then Debug should be discarded in comparison
sp.Context.SpanID, sp.Context.ParentSpanID = exp.Context.SpanID, exp.Context.ParentSpanID
sp.Duration, sp.Start = exp.Duration, exp.Start
}
if a, e := sp.Context.TraceID, exp.Context.TraceID; a != e {
t.Fatalf("%d: TraceID changed from %d to %d", i, e, a)
}
if exp.Context.ParentSpanID == nil {
t.Fatalf("%d: Expected a ParentSpanID, got nil", i)
}
if p, c := sp.Context.ParentSpanID, exp.Context.ParentSpanID; p != c {
t.Fatalf("%d: ParentSpanID changed from %d to %d", i, p, c)
}
if !reflect.DeepEqual(exp, sp) {
t.Fatalf("%d: wanted %+v, got %+v", i, spew.Sdump(exp), spew.Sdump(sp))
}
}
}
func TestInvalidCarrier(t *testing.T) {
recorder := zipkintracer.NewInMemoryRecorder()
tracer, err := zipkintracer.NewTracer(
recorder,
zipkintracer.ClientServerSameSpan(true),
zipkintracer.DebugMode(true),
zipkintracer.TraceID128Bit(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
if _, err = tracer.Extract(zipkintracer.Delegator, "invalid carrier"); err == nil {
t.Fatalf("Expected: %s, got nil", opentracing.ErrInvalidCarrier)
}
}
func TestB3Hex(t *testing.T) {
recorder := zipkintracer.NewInMemoryRecorder()
tracer, err := zipkintracer.NewTracer(
recorder,
zipkintracer.TraceID128Bit(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
for i := 0; i < 1000; i++ {
headers := http.Header{}
tmc := opentracing.HTTPHeadersCarrier(headers)
span := tracer.StartSpan("dummy")
if err := tracer.Inject(span.Context(), opentracing.TextMap, tmc); err != nil {
t.Fatalf("Expected nil, got error %+v", err)
}
if want1, want2, have := 32, 16, len(headers["X-B3-Traceid"][0]); want1 != have && want2 != have {
t.Errorf("X-B3-TraceId hex length expected %d or %d, got %d", want1, want2, have)
}
if want, have := 16, len(headers["X-B3-Spanid"][0]); want != have {
t.Errorf("X-B3-SpanId hex length expected %d, got %d", want, have)
}
span.Finish()
}
}

View File

@@ -1,29 +0,0 @@
package zipkintracer
import (
"sync/atomic"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestInMemoryRecorderSpans(t *testing.T) {
recorder := NewInMemoryRecorder()
var apiRecorder SpanRecorder = recorder
span := RawSpan{
Context: SpanContext{},
Operation: "test-span",
Start: time.Now(),
Duration: -1,
}
apiRecorder.RecordSpan(span)
assert.Equal(t, []RawSpan{span}, recorder.GetSpans())
assert.Equal(t, []RawSpan{}, recorder.GetSampledSpans())
}
type CountingRecorder int32
func (c *CountingRecorder) RecordSpan(r RawSpan) {
atomic.AddInt32((*int32)(c), 1)
}

View File

@@ -1,52 +0,0 @@
package zipkintracer_test
import (
"testing"
zipkin "github.com/openzipkin/zipkin-go-opentracing"
)
func TestBoundarySampler(t *testing.T) {
type triple struct {
id uint64
salt int64
rate float64
}
for input, want := range map[triple]bool{
{123, 456, 1.0}: true,
{123, 456, 999}: true,
{123, 456, 0.0}: false,
{123, 456, -42}: false,
{1229998, 0, 0.01}: false,
{1229999, 0, 0.01}: false,
{1230000, 0, 0.01}: true,
{1230001, 0, 0.01}: true,
{1230098, 0, 0.01}: true,
{1230099, 0, 0.01}: true,
{1230100, 0, 0.01}: false,
{1230101, 0, 0.01}: false,
{1, 9999999, 0.01}: false,
{999, 0, 0.99}: true,
{9999, 0, 0.99}: false,
} {
sampler := zipkin.NewBoundarySampler(input.rate, input.salt)
if have := sampler(input.id); want != have {
t.Errorf("%#+v: want %v, have %v", input, want, have)
}
}
}
func TestCountingSampler(t *testing.T) {
for n := 1; n < 100; n++ {
sampler := zipkin.NewCountingSampler(float64(n) / 100)
found := 0
for i := 0; i < 100; i++ {
if sampler(1) {
found++
}
}
if found != n {
t.Errorf("want %d, have %d\n", n, found)
}
}
}

View File

@@ -1,252 +0,0 @@
package zipkintracer
import (
"reflect"
"strconv"
"testing"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"github.com/opentracing/opentracing-go/log"
"github.com/stretchr/testify/assert"
)
func TestSpan_Baggage(t *testing.T) {
recorder := NewInMemoryRecorder()
tracer, err := NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return true }),
WithLogger(&nopLogger{}),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span := tracer.StartSpan("x")
span.SetBaggageItem("x", "y")
assert.Equal(t, "y", span.BaggageItem("x"))
span.Finish()
spans := recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, map[string]string{"x": "y"}, spans[0].Context.Baggage)
recorder.Reset()
span = tracer.StartSpan("x")
span.SetBaggageItem("x", "y")
baggage := make(map[string]string)
span.Context().ForeachBaggageItem(func(k, v string) bool {
baggage[k] = v
return true
})
assert.Equal(t, map[string]string{"x": "y"}, baggage)
span.SetBaggageItem("a", "b")
baggage = make(map[string]string)
span.Context().ForeachBaggageItem(func(k, v string) bool {
baggage[k] = v
return false // exit early
})
assert.Equal(t, 1, len(baggage))
span.Finish()
spans = recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, 2, len(spans[0].Context.Baggage))
}
func TestSpan_Sampling(t *testing.T) {
recorder := NewInMemoryRecorder()
tracer, err := NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return true }),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span := tracer.StartSpan("x")
span.Finish()
assert.Equal(t, 1, len(recorder.GetSampledSpans()), "by default span should be sampled")
recorder.Reset()
span = tracer.StartSpan("x")
ext.SamplingPriority.Set(span, 0)
span.Finish()
assert.Equal(t, 0, len(recorder.GetSampledSpans()), "SamplingPriority=0 should turn off sampling")
tracer, err = NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return false }),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
recorder.Reset()
span = tracer.StartSpan("x")
span.Finish()
assert.Equal(t, 0, len(recorder.GetSampledSpans()), "by default span should not be sampled")
recorder.Reset()
span = tracer.StartSpan("x")
ext.SamplingPriority.Set(span, 1)
span.Finish()
assert.Equal(t, 1, len(recorder.GetSampledSpans()), "SamplingPriority=1 should turn on sampling")
}
func TestSpan_SingleLoggedTaggedSpan(t *testing.T) {
recorder := NewInMemoryRecorder()
tracer, err := NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return true }),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span := tracer.StartSpan("x")
span.LogEventWithPayload("event", "payload")
span.LogFields(log.String("key_str", "value"), log.Uint32("32bit", 4294967295))
span.SetTag("tag", "value")
span.Finish()
spans := recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, "x", spans[0].Operation)
assert.Equal(t, 2, len(spans[0].Logs))
assert.Equal(t, opentracing.Tags{"tag": "value"}, spans[0].Tags)
fv := NewLogFieldValidator(t, spans[0].Logs[0].Fields)
fv.
ExpectNextFieldEquals("event", reflect.String, "event").
ExpectNextFieldEquals("payload", reflect.Interface, "payload")
fv = NewLogFieldValidator(t, spans[0].Logs[1].Fields)
fv.
ExpectNextFieldEquals("key_str", reflect.String, "value").
ExpectNextFieldEquals("32bit", reflect.Uint32, "4294967295")
}
func TestSpan_TrimUnsampledSpans(t *testing.T) {
recorder := NewInMemoryRecorder()
// Tracer that trims only unsampled but always samples
tracer, err := NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return true }),
TrimUnsampledSpans(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span := tracer.StartSpan("x")
span.LogFields(log.String("key_str", "value"), log.Uint32("32bit", 4294967295))
span.SetTag("tag", "value")
span.Finish()
spans := recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, 1, len(spans[0].Logs))
assert.Equal(t, opentracing.Tags{"tag": "value"}, spans[0].Tags)
fv := NewLogFieldValidator(t, spans[0].Logs[0].Fields)
fv.
ExpectNextFieldEquals("key_str", reflect.String, "value").
ExpectNextFieldEquals("32bit", reflect.Uint32, "4294967295")
recorder.Reset()
// Tracer that trims only unsampled and never samples
tracer, err = NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return false }),
TrimUnsampledSpans(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span = tracer.StartSpan("x")
span.LogFields(log.String("key_str", "value"), log.Uint32("32bit", 4294967295))
span.SetTag("tag", "value")
span.Finish()
spans = recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, 0, len(spans[0].Logs))
assert.Equal(t, 0, len(spans[0].Tags))
}
func TestSpan_DropAllLogs(t *testing.T) {
recorder := NewInMemoryRecorder()
// Tracer that drops logs
tracer, err := NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return true }),
DropAllLogs(true),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span := tracer.StartSpan("x")
span.LogFields(log.String("key_str", "value"), log.Uint32("32bit", 4294967295))
span.SetTag("tag", "value")
span.Finish()
spans := recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, "x", spans[0].Operation)
assert.Equal(t, opentracing.Tags{"tag": "value"}, spans[0].Tags)
// Only logs are dropped
assert.Equal(t, 0, len(spans[0].Logs))
}
func TestSpan_MaxLogSperSpan(t *testing.T) {
for _, limit := range []int{5, 10, 15, 20, 30, 40, 50} {
for _, numLogs := range []int{5, 10, 15, 20, 30, 40, 50, 60, 70, 80} {
recorder := NewInMemoryRecorder()
// Tracer that only retains the last <limit> logs.
tracer, err := NewTracer(
recorder,
WithSampler(func(_ uint64) bool { return true }),
WithMaxLogsPerSpan(limit),
)
if err != nil {
t.Fatalf("Unable to create Tracer: %+v", err)
}
span := tracer.StartSpan("x")
for i := 0; i < numLogs; i++ {
span.LogKV("eventIdx", i)
}
span.Finish()
spans := recorder.GetSpans()
assert.Equal(t, 1, len(spans))
assert.Equal(t, "x", spans[0].Operation)
logs := spans[0].Logs
var firstLogs, lastLogs []opentracing.LogRecord
if numLogs <= limit {
assert.Equal(t, numLogs, len(logs))
firstLogs = logs
} else {
assert.Equal(t, limit, len(logs))
if len(logs) > 0 {
numOld := (len(logs) - 1) / 2
firstLogs = logs[:numOld]
lastLogs = logs[numOld+1:]
fv := NewLogFieldValidator(t, logs[numOld].Fields)
fv = fv.ExpectNextFieldEquals("event", reflect.String, "dropped Span logs")
fv = fv.ExpectNextFieldEquals(
"dropped_log_count", reflect.Int, strconv.Itoa(numLogs-limit+1),
)
fv.ExpectNextFieldEquals("component", reflect.String, "zipkintracer")
}
}
for i, lr := range firstLogs {
fv := NewLogFieldValidator(t, lr.Fields)
fv.ExpectNextFieldEquals("eventIdx", reflect.Int, strconv.Itoa(i))
}
for i, lr := range lastLogs {
fv := NewLogFieldValidator(t, lr.Fields)
fv.ExpectNextFieldEquals("eventIdx", reflect.Int, strconv.Itoa(numLogs-len(lastLogs)+i))
}
}
}
}

View File

@@ -1,125 +0,0 @@
package zipkintracer
import (
"fmt"
"reflect"
"runtime"
"testing"
"github.com/opentracing/opentracing-go/log"
)
// LogFieldValidator facilitates testing of Span.Log*() implementations.
//
// Usage:
//
// fv := log.NewLogFieldValidator(t, someLogStructure.Fields)
// fv.
// ExpectNextFieldEquals("key1", reflect.String, "some string value").
// ExpectNextFieldEquals("key2", reflect.Uint32, "4294967295")
//
// LogFieldValidator satisfies the log.Encoder interface and thus is able to
// marshal log.Field instances (which it takes advantage of internally).
type LogFieldValidator struct {
t *testing.T
fieldIdx int
fields []log.Field
nextKey string
nextKind reflect.Kind
nextValAsString string
}
// NewLogFieldValidator returns a new validator that will test the contents of
// `fields`.
func NewLogFieldValidator(t *testing.T, fields []log.Field) *LogFieldValidator {
return &LogFieldValidator{
t: t,
fields: fields,
}
}
// ExpectNextFieldEquals facilitates a fluent way of testing the contents
// []Field slices.
func (fv *LogFieldValidator) ExpectNextFieldEquals(key string, kind reflect.Kind, valAsString string) *LogFieldValidator {
if len(fv.fields) < fv.fieldIdx {
_, file, line, _ := runtime.Caller(1)
fv.t.Errorf("%s:%d Expecting more than the %v Fields we have", file, line, len(fv.fields))
}
fv.nextKey = key
fv.nextKind = kind
fv.nextValAsString = valAsString
fv.fields[fv.fieldIdx].Marshal(fv)
fv.fieldIdx++
return fv
}
// EmitString satisfies the Encoder interface
func (fv *LogFieldValidator) EmitString(key, value string) {
fv.validateNextField(key, reflect.String, value)
}
// EmitBool satisfies the Encoder interface
func (fv *LogFieldValidator) EmitBool(key string, value bool) {
fv.validateNextField(key, reflect.Bool, value)
}
// EmitInt satisfies the Encoder interface
func (fv *LogFieldValidator) EmitInt(key string, value int) {
fv.validateNextField(key, reflect.Int, value)
}
// EmitInt32 satisfies the Encoder interface
func (fv *LogFieldValidator) EmitInt32(key string, value int32) {
fv.validateNextField(key, reflect.Int32, value)
}
// EmitInt64 satisfies the Encoder interface
func (fv *LogFieldValidator) EmitInt64(key string, value int64) {
fv.validateNextField(key, reflect.Int64, value)
}
// EmitUint32 satisfies the Encoder interface
func (fv *LogFieldValidator) EmitUint32(key string, value uint32) {
fv.validateNextField(key, reflect.Uint32, value)
}
// EmitUint64 satisfies the Encoder interface
func (fv *LogFieldValidator) EmitUint64(key string, value uint64) {
fv.validateNextField(key, reflect.Uint64, value)
}
// EmitFloat32 satisfies the Encoder interface
func (fv *LogFieldValidator) EmitFloat32(key string, value float32) {
fv.validateNextField(key, reflect.Float32, value)
}
// EmitFloat64 satisfies the Encoder interface
func (fv *LogFieldValidator) EmitFloat64(key string, value float64) {
fv.validateNextField(key, reflect.Float64, value)
}
// EmitObject satisfies the Encoder interface
func (fv *LogFieldValidator) EmitObject(key string, value interface{}) {
fv.validateNextField(key, reflect.Interface, value)
}
// EmitLazyLogger satisfies the Encoder interface
func (fv *LogFieldValidator) EmitLazyLogger(value log.LazyLogger) {
fv.t.Error("Test infrastructure does not support EmitLazyLogger yet")
}
func (fv *LogFieldValidator) validateNextField(key string, actualKind reflect.Kind, value interface{}) {
// Reference the ExpectNextField caller in error messages.
_, file, line, _ := runtime.Caller(4)
if fv.nextKey != key {
fv.t.Errorf("%s:%d Bad key: expected %q, found %q", file, line, fv.nextKey, key)
}
if fv.nextKind != actualKind {
fv.t.Errorf("%s:%d Bad reflect.Kind: expected %v, found %v", file, line, fv.nextKind, actualKind)
return
}
if fv.nextValAsString != fmt.Sprint(value) {
fv.t.Errorf("%s:%d Bad value: expected %q, found %q", file, line, fv.nextValAsString, fmt.Sprint(value))
}
// All good.
}

View File

@@ -1,13 +0,0 @@
package types
import "testing"
func TestTraceID(t *testing.T) {
traceID := TraceID{High: 1, Low: 2}
if len(traceID.ToHex()) != 32 {
t.Errorf("Expected zero-padded TraceID to have 32 characters")
}
}

View File

@@ -1,50 +0,0 @@
package wire_test
import (
"testing"
"github.com/openzipkin/zipkin-go-opentracing"
"github.com/openzipkin/zipkin-go-opentracing/flag"
"github.com/openzipkin/zipkin-go-opentracing/types"
"github.com/openzipkin/zipkin-go-opentracing/wire"
)
func TestProtobufCarrier(t *testing.T) {
var carrier zipkintracer.DelegatingCarrier = &wire.ProtobufCarrier{}
traceID := types.TraceID{High: 1, Low: 2}
var spanID, parentSpanID uint64 = 3, 0
sampled := true
flags := flag.Debug | flag.Sampled | flag.SamplingSet
baggageKey, expVal := "key1", "val1"
carrier.SetState(traceID, spanID, &parentSpanID, sampled, flags)
carrier.SetBaggageItem(baggageKey, expVal)
gotTraceID, gotSpanID, gotParentSpanId, gotSampled, gotFlags := carrier.State()
if gotParentSpanId == nil {
t.Errorf("Expected a valid parentSpanID of 0 got nil (no parent)")
}
if gotFlags&flag.IsRoot == flag.IsRoot {
t.Errorf("Expected a child span with a valid parent span with id 0 got IsRoot flag")
}
if traceID != gotTraceID || spanID != gotSpanID || parentSpanID != *gotParentSpanId || sampled != gotSampled || flags != gotFlags {
t.Errorf("Wanted state %d %d %d %t %d, got %d %d %d %t %d", spanID, traceID, parentSpanID, sampled, flags, gotTraceID, gotSpanID, *gotParentSpanId, gotSampled, gotFlags)
}
gotBaggage := map[string]string{}
f := func(k, v string) {
gotBaggage[k] = v
}
carrier.GetBaggage(f)
value, ok := gotBaggage[baggageKey]
if !ok {
t.Errorf("Expected baggage item %s to exist", baggageKey)
}
if value != expVal {
t.Errorf("Expected key %s to be %s, got %s", baggageKey, expVal, value)
}
}