mirror of
https://github.com/coredns/coredns.git
synced 2025-12-30 13:40:30 -05:00
Remove the word middleware (#1067)
* Rename middleware to plugin first pass; mostly used 'sed', few spots where I manually changed text. This still builds a coredns binary. * fmt error * Rename AddMiddleware to AddPlugin * Readd AddMiddleware to remain backwards compat
This commit is contained in:
68
plugin/cache/README.md
vendored
Normal file
68
plugin/cache/README.md
vendored
Normal file
@@ -0,0 +1,68 @@
|
||||
# cache
|
||||
|
||||
*cache* enables a frontend cache. It will cache all records except zone transfers and metadata records.
|
||||
|
||||
## Syntax
|
||||
|
||||
~~~ txt
|
||||
cache [TTL] [ZONES...]
|
||||
~~~
|
||||
|
||||
* **TTL** max TTL in seconds. If not specified, the maximum TTL will be used which is 3600 for
|
||||
noerror responses and 1800 for denial of existence ones.
|
||||
Setting a TTL of 300 *cache 300* would cache the record up to 300 seconds.
|
||||
* **ZONES** zones it should cache for. If empty, the zones from the configuration block are used.
|
||||
|
||||
Each element in the cache is cached according to its TTL (with **TTL** as the max).
|
||||
For the negative cache, the SOA's MinTTL value is used. A cache can contain up to 10,000 items by
|
||||
default. A TTL of zero is not allowed.
|
||||
|
||||
If you want more control:
|
||||
|
||||
~~~ txt
|
||||
cache [TTL] [ZONES...] {
|
||||
success CAPACITY [TTL]
|
||||
denial CAPACITY [TTL]
|
||||
prefetch AMOUNT [[DURATION] [PERCENTAGE%]]
|
||||
}
|
||||
~~~
|
||||
|
||||
* **TTL** and **ZONES** as above.
|
||||
* `success`, override the settings for caching successful responses, **CAPACITY** indicates the maximum
|
||||
number of packets we cache before we start evicting (*randomly*). **TTL** overrides the cache maximum TTL.
|
||||
* `denial`, override the settings for caching denial of existence responses, **CAPACITY** indicates the maximum
|
||||
number of packets we cache before we start evicting (LRU). **TTL** overrides the cache maximum TTL.
|
||||
There is a third category (`error`) but those responses are never cached.
|
||||
* `prefetch`, will prefetch popular items when they are about to be expunged from the cache.
|
||||
Popular means **AMOUNT** queries have been seen no gaps of **DURATION** or more between them.
|
||||
**DURATION** defaults to 1m. Prefetching will happen when the TTL drops below **PERCENTAGE**,
|
||||
which defaults to `10%`. Values should be in the range `[10%, 90%]`. Note the percent sign is
|
||||
mandatory. **PERCENTAGE** is treated as an `int`.
|
||||
|
||||
The minimum TTL allowed on resource records is 5 seconds.
|
||||
|
||||
## Metrics
|
||||
|
||||
If monitoring is enabled (via the *prometheus* directive) then the following metrics are exported:
|
||||
|
||||
* coredns_cache_size{type} - Total elements in the cache by cache type.
|
||||
* coredns_cache_capacity{type} - Total capacity of the cache by cache type.
|
||||
* coredns_cache_hits_total{type} - Counter of cache hits by cache type.
|
||||
* coredns_cache_misses_total - Counter of cache misses.
|
||||
|
||||
Cache types are either "denial" or "success".
|
||||
|
||||
## Examples
|
||||
|
||||
Enable caching for all zones, but cap everything to a TTL of 10 seconds:
|
||||
|
||||
~~~
|
||||
cache 10
|
||||
~~~
|
||||
|
||||
Proxy to Google Public DNS and only cache responses for example.org (or below).
|
||||
|
||||
~~~
|
||||
proxy . 8.8.8.8:53
|
||||
cache example.org
|
||||
~~~
|
||||
167
plugin/cache/cache.go
vendored
Normal file
167
plugin/cache/cache.go
vendored
Normal file
@@ -0,0 +1,167 @@
|
||||
// Package cache implements a cache.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/pkg/cache"
|
||||
"github.com/coredns/coredns/plugin/pkg/response"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Cache is plugin that looks up responses in a cache and caches replies.
|
||||
// It has a success and a denial of existence cache.
|
||||
type Cache struct {
|
||||
Next plugin.Handler
|
||||
Zones []string
|
||||
|
||||
ncache *cache.Cache
|
||||
ncap int
|
||||
nttl time.Duration
|
||||
|
||||
pcache *cache.Cache
|
||||
pcap int
|
||||
pttl time.Duration
|
||||
|
||||
// Prefetch.
|
||||
prefetch int
|
||||
duration time.Duration
|
||||
percentage int
|
||||
}
|
||||
|
||||
// Return key under which we store the item, -1 will be returned if we don't store the
|
||||
// message.
|
||||
// Currently we do not cache Truncated, errors zone transfers or dynamic update messages.
|
||||
func key(m *dns.Msg, t response.Type, do bool) int {
|
||||
// We don't store truncated responses.
|
||||
if m.Truncated {
|
||||
return -1
|
||||
}
|
||||
// Nor errors or Meta or Update
|
||||
if t == response.OtherError || t == response.Meta || t == response.Update {
|
||||
return -1
|
||||
}
|
||||
|
||||
return int(hash(m.Question[0].Name, m.Question[0].Qtype, do))
|
||||
}
|
||||
|
||||
var one = []byte("1")
|
||||
var zero = []byte("0")
|
||||
|
||||
func hash(qname string, qtype uint16, do bool) uint32 {
|
||||
h := fnv.New32()
|
||||
|
||||
if do {
|
||||
h.Write(one)
|
||||
} else {
|
||||
h.Write(zero)
|
||||
}
|
||||
|
||||
b := make([]byte, 2)
|
||||
binary.BigEndian.PutUint16(b, qtype)
|
||||
h.Write(b)
|
||||
|
||||
for i := range qname {
|
||||
c := qname[i]
|
||||
if c >= 'A' && c <= 'Z' {
|
||||
c += 'a' - 'A'
|
||||
}
|
||||
h.Write([]byte{c})
|
||||
}
|
||||
|
||||
return h.Sum32()
|
||||
}
|
||||
|
||||
// ResponseWriter is a response writer that caches the reply message.
|
||||
type ResponseWriter struct {
|
||||
dns.ResponseWriter
|
||||
*Cache
|
||||
|
||||
prefetch bool // When true write nothing back to the client.
|
||||
}
|
||||
|
||||
// WriteMsg implements the dns.ResponseWriter interface.
|
||||
func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
|
||||
do := false
|
||||
mt, opt := response.Typify(res, time.Now().UTC())
|
||||
if opt != nil {
|
||||
do = opt.Do()
|
||||
}
|
||||
|
||||
// key returns empty string for anything we don't want to cache.
|
||||
key := key(res, mt, do)
|
||||
|
||||
duration := w.pttl
|
||||
if mt == response.NameError || mt == response.NoData {
|
||||
duration = w.nttl
|
||||
}
|
||||
|
||||
msgTTL := minMsgTTL(res, mt)
|
||||
if msgTTL < duration {
|
||||
duration = msgTTL
|
||||
}
|
||||
|
||||
if key != -1 {
|
||||
w.set(res, key, mt, duration)
|
||||
|
||||
cacheSize.WithLabelValues(Success).Set(float64(w.pcache.Len()))
|
||||
cacheSize.WithLabelValues(Denial).Set(float64(w.ncache.Len()))
|
||||
}
|
||||
|
||||
if w.prefetch {
|
||||
return nil
|
||||
}
|
||||
|
||||
return w.ResponseWriter.WriteMsg(res)
|
||||
}
|
||||
|
||||
func (w *ResponseWriter) set(m *dns.Msg, key int, mt response.Type, duration time.Duration) {
|
||||
if key == -1 {
|
||||
log.Printf("[ERROR] Caching called with empty cache key")
|
||||
return
|
||||
}
|
||||
|
||||
switch mt {
|
||||
case response.NoError, response.Delegation:
|
||||
i := newItem(m, duration)
|
||||
w.pcache.Add(uint32(key), i)
|
||||
|
||||
case response.NameError, response.NoData:
|
||||
i := newItem(m, duration)
|
||||
w.ncache.Add(uint32(key), i)
|
||||
|
||||
case response.OtherError:
|
||||
// don't cache these
|
||||
default:
|
||||
log.Printf("[WARNING] Caching called with unknown classification: %d", mt)
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements the dns.ResponseWriter interface.
|
||||
func (w *ResponseWriter) Write(buf []byte) (int, error) {
|
||||
log.Printf("[WARNING] Caching called with Write: not caching reply")
|
||||
if w.prefetch {
|
||||
return 0, nil
|
||||
}
|
||||
n, err := w.ResponseWriter.Write(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
const (
|
||||
maxTTL = 1 * time.Hour
|
||||
maxNTTL = 30 * time.Minute
|
||||
|
||||
minTTL = 5 // seconds
|
||||
|
||||
defaultCap = 10000 // default capacity of the cache.
|
||||
|
||||
// Success is the class for caching positive caching.
|
||||
Success = "success"
|
||||
// Denial is the class defined for negative caching.
|
||||
Denial = "denial"
|
||||
)
|
||||
251
plugin/cache/cache_test.go
vendored
Normal file
251
plugin/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/pkg/cache"
|
||||
"github.com/coredns/coredns/plugin/pkg/response"
|
||||
"github.com/coredns/coredns/plugin/test"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
type cacheTestCase struct {
|
||||
test.Case
|
||||
in test.Case
|
||||
AuthenticatedData bool
|
||||
Authoritative bool
|
||||
RecursionAvailable bool
|
||||
Truncated bool
|
||||
shouldCache bool
|
||||
}
|
||||
|
||||
var cacheTestCases = []cacheTestCase{
|
||||
{
|
||||
RecursionAvailable: true, AuthenticatedData: true, Authoritative: true,
|
||||
Case: test.Case{
|
||||
Qname: "miek.nl.", Qtype: dns.TypeMX,
|
||||
Answer: []dns.RR{
|
||||
test.MX("miek.nl. 3600 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("miek.nl. 3600 IN MX 10 aspmx2.googlemail.com."),
|
||||
},
|
||||
},
|
||||
in: test.Case{
|
||||
Qname: "miek.nl.", Qtype: dns.TypeMX,
|
||||
Answer: []dns.RR{
|
||||
test.MX("miek.nl. 3601 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("miek.nl. 3601 IN MX 10 aspmx2.googlemail.com."),
|
||||
},
|
||||
},
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
RecursionAvailable: true, AuthenticatedData: true, Authoritative: true,
|
||||
Case: test.Case{
|
||||
Qname: "mIEK.nL.", Qtype: dns.TypeMX,
|
||||
Answer: []dns.RR{
|
||||
test.MX("mIEK.nL. 3600 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("mIEK.nL. 3600 IN MX 10 aspmx2.googlemail.com."),
|
||||
},
|
||||
},
|
||||
in: test.Case{
|
||||
Qname: "mIEK.nL.", Qtype: dns.TypeMX,
|
||||
Answer: []dns.RR{
|
||||
test.MX("mIEK.nL. 3601 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("mIEK.nL. 3601 IN MX 10 aspmx2.googlemail.com."),
|
||||
},
|
||||
},
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
Truncated: true,
|
||||
Case: test.Case{
|
||||
Qname: "miek.nl.", Qtype: dns.TypeMX,
|
||||
Answer: []dns.RR{test.MX("miek.nl. 1800 IN MX 1 aspmx.l.google.com.")},
|
||||
},
|
||||
in: test.Case{},
|
||||
shouldCache: false,
|
||||
},
|
||||
{
|
||||
RecursionAvailable: true, Authoritative: true,
|
||||
Case: test.Case{
|
||||
Rcode: dns.RcodeNameError,
|
||||
Qname: "example.org.", Qtype: dns.TypeA,
|
||||
Ns: []dns.RR{
|
||||
test.SOA("example.org. 3600 IN SOA sns.dns.icann.org. noc.dns.icann.org. 2016082540 7200 3600 1209600 3600"),
|
||||
},
|
||||
},
|
||||
in: test.Case{
|
||||
Rcode: dns.RcodeNameError,
|
||||
Qname: "example.org.", Qtype: dns.TypeA,
|
||||
Ns: []dns.RR{
|
||||
test.SOA("example.org. 3600 IN SOA sns.dns.icann.org. noc.dns.icann.org. 2016082540 7200 3600 1209600 3600"),
|
||||
},
|
||||
},
|
||||
shouldCache: true,
|
||||
},
|
||||
{
|
||||
RecursionAvailable: true, Authoritative: true,
|
||||
Case: test.Case{
|
||||
Qname: "miek.nl.", Qtype: dns.TypeMX,
|
||||
Do: true,
|
||||
Answer: []dns.RR{
|
||||
test.MX("miek.nl. 3600 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("miek.nl. 3600 IN MX 10 aspmx2.googlemail.com."),
|
||||
test.RRSIG("miek.nl. 3600 IN RRSIG MX 8 2 1800 20160521031301 20160421031301 12051 miek.nl. lAaEzB5teQLLKyDenatmyhca7blLRg9DoGNrhe3NReBZN5C5/pMQk8Jc u25hv2fW23/SLm5IC2zaDpp2Fzgm6Jf7e90/yLcwQPuE7JjS55WMF+HE LEh7Z6AEb+Iq4BWmNhUz6gPxD4d9eRMs7EAzk13o1NYi5/JhfL6IlaYy qkc="),
|
||||
},
|
||||
},
|
||||
in: test.Case{
|
||||
Qname: "miek.nl.", Qtype: dns.TypeMX,
|
||||
Do: true,
|
||||
Answer: []dns.RR{
|
||||
test.MX("miek.nl. 3600 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("miek.nl. 3600 IN MX 10 aspmx2.googlemail.com."),
|
||||
test.RRSIG("miek.nl. 1800 IN RRSIG MX 8 2 1800 20160521031301 20160421031301 12051 miek.nl. lAaEzB5teQLLKyDenatmyhca7blLRg9DoGNrhe3NReBZN5C5/pMQk8Jc u25hv2fW23/SLm5IC2zaDpp2Fzgm6Jf7e90/yLcwQPuE7JjS55WMF+HE LEh7Z6AEb+Iq4BWmNhUz6gPxD4d9eRMs7EAzk13o1NYi5/JhfL6IlaYy qkc="),
|
||||
},
|
||||
},
|
||||
shouldCache: false,
|
||||
},
|
||||
{
|
||||
RecursionAvailable: true, Authoritative: true,
|
||||
Case: test.Case{
|
||||
Qname: "example.org.", Qtype: dns.TypeMX,
|
||||
Do: true,
|
||||
Answer: []dns.RR{
|
||||
test.MX("example.org. 3600 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("example.org. 3600 IN MX 10 aspmx2.googlemail.com."),
|
||||
test.RRSIG("example.org. 3600 IN RRSIG MX 8 2 1800 20170521031301 20170421031301 12051 miek.nl. lAaEzB5teQLLKyDenatmyhca7blLRg9DoGNrhe3NReBZN5C5/pMQk8Jc u25hv2fW23/SLm5IC2zaDpp2Fzgm6Jf7e90/yLcwQPuE7JjS55WMF+HE LEh7Z6AEb+Iq4BWmNhUz6gPxD4d9eRMs7EAzk13o1NYi5/JhfL6IlaYy qkc="),
|
||||
},
|
||||
},
|
||||
in: test.Case{
|
||||
Qname: "example.org.", Qtype: dns.TypeMX,
|
||||
Do: true,
|
||||
Answer: []dns.RR{
|
||||
test.MX("example.org. 3600 IN MX 1 aspmx.l.google.com."),
|
||||
test.MX("example.org. 3600 IN MX 10 aspmx2.googlemail.com."),
|
||||
test.RRSIG("example.org. 1800 IN RRSIG MX 8 2 1800 20170521031301 20170421031301 12051 miek.nl. lAaEzB5teQLLKyDenatmyhca7blLRg9DoGNrhe3NReBZN5C5/pMQk8Jc u25hv2fW23/SLm5IC2zaDpp2Fzgm6Jf7e90/yLcwQPuE7JjS55WMF+HE LEh7Z6AEb+Iq4BWmNhUz6gPxD4d9eRMs7EAzk13o1NYi5/JhfL6IlaYy qkc="),
|
||||
},
|
||||
},
|
||||
shouldCache: true,
|
||||
},
|
||||
}
|
||||
|
||||
func cacheMsg(m *dns.Msg, tc cacheTestCase) *dns.Msg {
|
||||
m.RecursionAvailable = tc.RecursionAvailable
|
||||
m.AuthenticatedData = tc.AuthenticatedData
|
||||
m.Authoritative = tc.Authoritative
|
||||
m.Rcode = tc.Rcode
|
||||
m.Truncated = tc.Truncated
|
||||
m.Answer = tc.in.Answer
|
||||
m.Ns = tc.in.Ns
|
||||
// m.Extra = tc.in.Extra don't copy Extra, because we don't care and fake EDNS0 DO with tc.Do.
|
||||
return m
|
||||
}
|
||||
|
||||
func newTestCache(ttl time.Duration) (*Cache, *ResponseWriter) {
|
||||
c := &Cache{Zones: []string{"."}, pcap: defaultCap, ncap: defaultCap, pttl: ttl, nttl: ttl}
|
||||
c.pcache = cache.New(c.pcap)
|
||||
c.ncache = cache.New(c.ncap)
|
||||
|
||||
crr := &ResponseWriter{ResponseWriter: nil, Cache: c}
|
||||
return c, crr
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
now, _ := time.Parse(time.UnixDate, "Fri Apr 21 10:51:21 BST 2017")
|
||||
utc := now.UTC()
|
||||
|
||||
c, crr := newTestCache(maxTTL)
|
||||
|
||||
log.SetOutput(ioutil.Discard)
|
||||
|
||||
for _, tc := range cacheTestCases {
|
||||
m := tc.in.Msg()
|
||||
m = cacheMsg(m, tc)
|
||||
do := tc.in.Do
|
||||
|
||||
mt, _ := response.Typify(m, utc)
|
||||
k := key(m, mt, do)
|
||||
|
||||
crr.set(m, k, mt, c.pttl)
|
||||
|
||||
name := plugin.Name(m.Question[0].Name).Normalize()
|
||||
qtype := m.Question[0].Qtype
|
||||
|
||||
i, _ := c.get(time.Now().UTC(), name, qtype, do)
|
||||
ok := i != nil
|
||||
|
||||
if ok != tc.shouldCache {
|
||||
t.Errorf("cached message that should not have been cached: %s", name)
|
||||
continue
|
||||
}
|
||||
|
||||
if ok {
|
||||
resp := i.toMsg(m)
|
||||
|
||||
if !test.Header(t, tc.Case, resp) {
|
||||
t.Logf("%v\n", resp)
|
||||
continue
|
||||
}
|
||||
|
||||
if !test.Section(t, tc.Case, test.Answer, resp.Answer) {
|
||||
t.Logf("%v\n", resp)
|
||||
}
|
||||
if !test.Section(t, tc.Case, test.Ns, resp.Ns) {
|
||||
t.Logf("%v\n", resp)
|
||||
|
||||
}
|
||||
if !test.Section(t, tc.Case, test.Extra, resp.Extra) {
|
||||
t.Logf("%v\n", resp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkCacheResponse(b *testing.B) {
|
||||
c := &Cache{Zones: []string{"."}, pcap: defaultCap, ncap: defaultCap, pttl: maxTTL, nttl: maxTTL}
|
||||
c.pcache = cache.New(c.pcap)
|
||||
c.ncache = cache.New(c.ncap)
|
||||
c.prefetch = 1
|
||||
c.duration = 1 * time.Second
|
||||
c.Next = BackendHandler()
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
reqs := make([]*dns.Msg, 5)
|
||||
for i, q := range []string{"example1", "example2", "a", "b", "ddd"} {
|
||||
reqs[i] = new(dns.Msg)
|
||||
reqs[i].SetQuestion(q+".example.org.", dns.TypeA)
|
||||
}
|
||||
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
i := 0
|
||||
for pb.Next() {
|
||||
req := reqs[i]
|
||||
c.ServeDNS(ctx, &test.ResponseWriter{}, req)
|
||||
i++
|
||||
i = i % 5
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BackendHandler() plugin.Handler {
|
||||
return plugin.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
m.Response = true
|
||||
m.RecursionAvailable = true
|
||||
|
||||
owner := m.Question[0].Name
|
||||
m.Answer = []dns.RR{test.A(owner + " 303 IN A 127.0.0.53")}
|
||||
|
||||
w.WriteMsg(m)
|
||||
return dns.RcodeSuccess, nil
|
||||
})
|
||||
}
|
||||
55
plugin/cache/freq/freq.go
vendored
Normal file
55
plugin/cache/freq/freq.go
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
// Package freq keeps track of last X seen events. The events themselves are not stored
|
||||
// here. So the Freq type should be added next to the thing it is tracking.
|
||||
package freq
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Freq tracks the frequencies of things.
|
||||
type Freq struct {
|
||||
// Last time we saw a query for this element.
|
||||
last time.Time
|
||||
// Number of this in the last time slice.
|
||||
hits int
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// New returns a new initialized Freq.
|
||||
func New(t time.Time) *Freq {
|
||||
return &Freq{last: t, hits: 0}
|
||||
}
|
||||
|
||||
// Update updates the number of hits. Last time seen will be set to now.
|
||||
// If the last time we've seen this entity is within now - d, we increment hits, otherwise
|
||||
// we reset hits to 1. It returns the number of hits.
|
||||
func (f *Freq) Update(d time.Duration, now time.Time) int {
|
||||
earliest := now.Add(-1 * d)
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
if f.last.Before(earliest) {
|
||||
f.last = now
|
||||
f.hits = 1
|
||||
return f.hits
|
||||
}
|
||||
f.last = now
|
||||
f.hits++
|
||||
return f.hits
|
||||
}
|
||||
|
||||
// Hits returns the number of hits that we have seen, according to the updates we have done to f.
|
||||
func (f *Freq) Hits() int {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
return f.hits
|
||||
}
|
||||
|
||||
// Reset resets f to time t and hits to hits.
|
||||
func (f *Freq) Reset(t time.Time, hits int) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.last = t
|
||||
f.hits = hits
|
||||
}
|
||||
36
plugin/cache/freq/freq_test.go
vendored
Normal file
36
plugin/cache/freq/freq_test.go
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package freq
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestFreqUpdate(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
f := New(now)
|
||||
window := 1 * time.Minute
|
||||
|
||||
f.Update(window, time.Now().UTC())
|
||||
f.Update(window, time.Now().UTC())
|
||||
f.Update(window, time.Now().UTC())
|
||||
hitsCheck(t, f, 3)
|
||||
|
||||
f.Reset(now, 0)
|
||||
history := time.Now().UTC().Add(-3 * time.Minute)
|
||||
f.Update(window, history)
|
||||
hitsCheck(t, f, 1)
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
f := New(time.Now().UTC())
|
||||
f.Update(1*time.Minute, time.Now().UTC())
|
||||
hitsCheck(t, f, 1)
|
||||
f.Reset(time.Now().UTC(), 0)
|
||||
hitsCheck(t, f, 0)
|
||||
}
|
||||
|
||||
func hitsCheck(t *testing.T, f *Freq, expected int) {
|
||||
if x := f.Hits(); x != expected {
|
||||
t.Fatalf("Expected hits to be %d, got %d", expected, x)
|
||||
}
|
||||
}
|
||||
119
plugin/cache/handler.go
vendored
Normal file
119
plugin/cache/handler.go
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/request"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// ServeDNS implements the plugin.Handler interface.
|
||||
func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||
state := request.Request{W: w, Req: r}
|
||||
|
||||
qname := state.Name()
|
||||
qtype := state.QType()
|
||||
zone := plugin.Zones(c.Zones).Matches(qname)
|
||||
if zone == "" {
|
||||
return c.Next.ServeDNS(ctx, w, r)
|
||||
}
|
||||
|
||||
do := state.Do() // TODO(): might need more from OPT record? Like the actual bufsize?
|
||||
|
||||
now := time.Now().UTC()
|
||||
|
||||
i, ttl := c.get(now, qname, qtype, do)
|
||||
if i != nil && ttl > 0 {
|
||||
resp := i.toMsg(r)
|
||||
|
||||
state.SizeAndDo(resp)
|
||||
resp, _ = state.Scrub(resp)
|
||||
w.WriteMsg(resp)
|
||||
|
||||
i.Freq.Update(c.duration, now)
|
||||
|
||||
pct := 100
|
||||
if i.origTTL != 0 { // you'll never know
|
||||
pct = int(float64(ttl) / float64(i.origTTL) * 100)
|
||||
}
|
||||
|
||||
if c.prefetch > 0 && i.Freq.Hits() > c.prefetch && pct < c.percentage {
|
||||
// When prefetching we loose the item i, and with it the frequency
|
||||
// that we've gathered sofar. See we copy the frequencies info back
|
||||
// into the new item that was stored in the cache.
|
||||
prr := &ResponseWriter{ResponseWriter: w, Cache: c, prefetch: true}
|
||||
plugin.NextOrFailure(c.Name(), c.Next, ctx, prr, r)
|
||||
|
||||
if i1, _ := c.get(now, qname, qtype, do); i1 != nil {
|
||||
i1.Freq.Reset(now, i.Freq.Hits())
|
||||
}
|
||||
}
|
||||
|
||||
return dns.RcodeSuccess, nil
|
||||
}
|
||||
|
||||
crr := &ResponseWriter{ResponseWriter: w, Cache: c}
|
||||
return plugin.NextOrFailure(c.Name(), c.Next, ctx, crr, r)
|
||||
}
|
||||
|
||||
// Name implements the Handler interface.
|
||||
func (c *Cache) Name() string { return "cache" }
|
||||
|
||||
func (c *Cache) get(now time.Time, qname string, qtype uint16, do bool) (*item, int) {
|
||||
k := hash(qname, qtype, do)
|
||||
|
||||
if i, ok := c.ncache.Get(k); ok {
|
||||
cacheHits.WithLabelValues(Denial).Inc()
|
||||
return i.(*item), i.(*item).ttl(now)
|
||||
}
|
||||
|
||||
if i, ok := c.pcache.Get(k); ok {
|
||||
cacheHits.WithLabelValues(Success).Inc()
|
||||
return i.(*item), i.(*item).ttl(now)
|
||||
}
|
||||
cacheMisses.Inc()
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
var (
|
||||
cacheSize = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "size",
|
||||
Help: "The number of elements in the cache.",
|
||||
}, []string{"type"})
|
||||
|
||||
cacheCapacity = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "capacity",
|
||||
Help: "The cache's capacity.",
|
||||
}, []string{"type"})
|
||||
|
||||
cacheHits = prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "hits_total",
|
||||
Help: "The count of cache hits.",
|
||||
}, []string{"type"})
|
||||
|
||||
cacheMisses = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: plugin.Namespace,
|
||||
Subsystem: subsystem,
|
||||
Name: "misses_total",
|
||||
Help: "The count of cache misses.",
|
||||
})
|
||||
)
|
||||
|
||||
const subsystem = "cache"
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(cacheSize)
|
||||
prometheus.MustRegister(cacheCapacity)
|
||||
prometheus.MustRegister(cacheHits)
|
||||
prometheus.MustRegister(cacheMisses)
|
||||
}
|
||||
116
plugin/cache/item.go
vendored
Normal file
116
plugin/cache/item.go
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coredns/coredns/plugin/cache/freq"
|
||||
"github.com/coredns/coredns/plugin/pkg/response"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
type item struct {
|
||||
Rcode int
|
||||
Authoritative bool
|
||||
AuthenticatedData bool
|
||||
RecursionAvailable bool
|
||||
Answer []dns.RR
|
||||
Ns []dns.RR
|
||||
Extra []dns.RR
|
||||
|
||||
origTTL uint32
|
||||
stored time.Time
|
||||
|
||||
*freq.Freq
|
||||
}
|
||||
|
||||
func newItem(m *dns.Msg, d time.Duration) *item {
|
||||
i := new(item)
|
||||
i.Rcode = m.Rcode
|
||||
i.Authoritative = m.Authoritative
|
||||
i.AuthenticatedData = m.AuthenticatedData
|
||||
i.RecursionAvailable = m.RecursionAvailable
|
||||
i.Answer = m.Answer
|
||||
i.Ns = m.Ns
|
||||
i.Extra = make([]dns.RR, len(m.Extra))
|
||||
// Don't copy OPT record as these are hop-by-hop.
|
||||
j := 0
|
||||
for _, e := range m.Extra {
|
||||
if e.Header().Rrtype == dns.TypeOPT {
|
||||
continue
|
||||
}
|
||||
i.Extra[j] = e
|
||||
j++
|
||||
}
|
||||
i.Extra = i.Extra[:j]
|
||||
|
||||
i.origTTL = uint32(d.Seconds())
|
||||
i.stored = time.Now().UTC()
|
||||
|
||||
i.Freq = new(freq.Freq)
|
||||
|
||||
return i
|
||||
}
|
||||
|
||||
// toMsg turns i into a message, it tailors the reply to m.
|
||||
// The Authoritative bit is always set to 0, because the answer is from the cache.
|
||||
func (i *item) toMsg(m *dns.Msg) *dns.Msg {
|
||||
m1 := new(dns.Msg)
|
||||
m1.SetReply(m)
|
||||
|
||||
m1.Authoritative = false
|
||||
m1.AuthenticatedData = i.AuthenticatedData
|
||||
m1.RecursionAvailable = i.RecursionAvailable
|
||||
m1.Rcode = i.Rcode
|
||||
m1.Compress = true
|
||||
|
||||
m1.Answer = make([]dns.RR, len(i.Answer))
|
||||
m1.Ns = make([]dns.RR, len(i.Ns))
|
||||
m1.Extra = make([]dns.RR, len(i.Extra))
|
||||
|
||||
ttl := uint32(i.ttl(time.Now()))
|
||||
if ttl < minTTL {
|
||||
ttl = minTTL
|
||||
}
|
||||
|
||||
for j, r := range i.Answer {
|
||||
m1.Answer[j] = dns.Copy(r)
|
||||
m1.Answer[j].Header().Ttl = ttl
|
||||
}
|
||||
for j, r := range i.Ns {
|
||||
m1.Ns[j] = dns.Copy(r)
|
||||
m1.Ns[j].Header().Ttl = ttl
|
||||
}
|
||||
for j, r := range i.Extra {
|
||||
m1.Extra[j] = dns.Copy(r)
|
||||
if m1.Extra[j].Header().Rrtype != dns.TypeOPT {
|
||||
m1.Extra[j].Header().Ttl = ttl
|
||||
}
|
||||
}
|
||||
return m1
|
||||
}
|
||||
|
||||
func (i *item) ttl(now time.Time) int {
|
||||
ttl := int(i.origTTL) - int(now.UTC().Sub(i.stored).Seconds())
|
||||
return ttl
|
||||
}
|
||||
|
||||
func minMsgTTL(m *dns.Msg, mt response.Type) time.Duration {
|
||||
if mt != response.NoError && mt != response.NameError && mt != response.NoData {
|
||||
return 0
|
||||
}
|
||||
|
||||
minTTL := maxTTL
|
||||
for _, r := range append(m.Answer, m.Ns...) {
|
||||
switch mt {
|
||||
case response.NameError, response.NoData:
|
||||
if r.Header().Rrtype == dns.TypeSOA {
|
||||
return time.Duration(r.(*dns.SOA).Minttl) * time.Second
|
||||
}
|
||||
case response.NoError, response.Delegation:
|
||||
if r.Header().Ttl < uint32(minTTL.Seconds()) {
|
||||
minTTL = time.Duration(r.Header().Ttl) * time.Second
|
||||
}
|
||||
}
|
||||
}
|
||||
return minTTL
|
||||
}
|
||||
54
plugin/cache/prefech_test.go
vendored
Normal file
54
plugin/cache/prefech_test.go
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/pkg/cache"
|
||||
"github.com/coredns/coredns/plugin/pkg/dnsrecorder"
|
||||
|
||||
"github.com/coredns/coredns/plugin/test"
|
||||
"github.com/miekg/dns"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var p = false
|
||||
|
||||
func TestPrefetch(t *testing.T) {
|
||||
c := &Cache{Zones: []string{"."}, pcap: defaultCap, ncap: defaultCap, pttl: maxTTL, nttl: maxTTL}
|
||||
c.pcache = cache.New(c.pcap)
|
||||
c.ncache = cache.New(c.ncap)
|
||||
c.prefetch = 1
|
||||
c.duration = 1 * time.Second
|
||||
c.Next = PrefetchHandler(t, dns.RcodeSuccess, nil)
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
req := new(dns.Msg)
|
||||
req.SetQuestion("lowttl.example.org.", dns.TypeA)
|
||||
|
||||
rec := dnsrecorder.New(&test.ResponseWriter{})
|
||||
|
||||
c.ServeDNS(ctx, rec, req)
|
||||
p = true // prefetch should be true for the 2nd fetch
|
||||
c.ServeDNS(ctx, rec, req)
|
||||
}
|
||||
|
||||
func PrefetchHandler(t *testing.T, rcode int, err error) plugin.Handler {
|
||||
return plugin.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion("lowttl.example.org.", dns.TypeA)
|
||||
m.Response = true
|
||||
m.RecursionAvailable = true
|
||||
m.Answer = append(m.Answer, test.A("lowttl.example.org. 80 IN A 127.0.0.53"))
|
||||
if p != w.(*ResponseWriter).prefetch {
|
||||
err = fmt.Errorf("cache prefetch not equal to p: got %t, want %t", p, w.(*ResponseWriter).prefetch)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
w.WriteMsg(m)
|
||||
return rcode, err
|
||||
})
|
||||
}
|
||||
170
plugin/cache/setup.go
vendored
Normal file
170
plugin/cache/setup.go
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/coredns/coredns/core/dnsserver"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/pkg/cache"
|
||||
|
||||
"github.com/mholt/caddy"
|
||||
)
|
||||
|
||||
func init() {
|
||||
caddy.RegisterPlugin("cache", caddy.Plugin{
|
||||
ServerType: "dns",
|
||||
Action: setup,
|
||||
})
|
||||
}
|
||||
|
||||
func setup(c *caddy.Controller) error {
|
||||
ca, err := cacheParse(c)
|
||||
if err != nil {
|
||||
return plugin.Error("cache", err)
|
||||
}
|
||||
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
|
||||
ca.Next = next
|
||||
return ca
|
||||
})
|
||||
|
||||
// Export the capacity for the metrics. This only happens once, because this is a re-load change only.
|
||||
cacheCapacity.WithLabelValues(Success).Set(float64(ca.pcap))
|
||||
cacheCapacity.WithLabelValues(Denial).Set(float64(ca.ncap))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cacheParse(c *caddy.Controller) (*Cache, error) {
|
||||
|
||||
ca := &Cache{pcap: defaultCap, ncap: defaultCap, pttl: maxTTL, nttl: maxNTTL, prefetch: 0, duration: 1 * time.Minute}
|
||||
|
||||
for c.Next() {
|
||||
// cache [ttl] [zones..]
|
||||
origins := make([]string, len(c.ServerBlockKeys))
|
||||
copy(origins, c.ServerBlockKeys)
|
||||
args := c.RemainingArgs()
|
||||
|
||||
if len(args) > 0 {
|
||||
// first args may be just a number, then it is the ttl, if not it is a zone
|
||||
ttl, err := strconv.Atoi(args[0])
|
||||
if err == nil {
|
||||
// Reserve 0 (and smaller for future things)
|
||||
if ttl <= 0 {
|
||||
return nil, fmt.Errorf("cache TTL can not be zero or negative: %d", ttl)
|
||||
}
|
||||
ca.pttl = time.Duration(ttl) * time.Second
|
||||
ca.nttl = time.Duration(ttl) * time.Second
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
copy(origins, args)
|
||||
}
|
||||
}
|
||||
|
||||
// Refinements? In an extra block.
|
||||
for c.NextBlock() {
|
||||
switch c.Val() {
|
||||
// first number is cap, second is an new ttl
|
||||
case Success:
|
||||
args := c.RemainingArgs()
|
||||
if len(args) == 0 {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
pcap, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ca.pcap = pcap
|
||||
if len(args) > 1 {
|
||||
pttl, err := strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Reserve 0 (and smaller for future things)
|
||||
if pttl <= 0 {
|
||||
return nil, fmt.Errorf("cache TTL can not be zero or negative: %d", pttl)
|
||||
}
|
||||
ca.pttl = time.Duration(pttl) * time.Second
|
||||
}
|
||||
case Denial:
|
||||
args := c.RemainingArgs()
|
||||
if len(args) == 0 {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
ncap, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ca.ncap = ncap
|
||||
if len(args) > 1 {
|
||||
nttl, err := strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Reserve 0 (and smaller for future things)
|
||||
if nttl <= 0 {
|
||||
return nil, fmt.Errorf("cache TTL can not be zero or negative: %d", nttl)
|
||||
}
|
||||
ca.nttl = time.Duration(nttl) * time.Second
|
||||
}
|
||||
case "prefetch":
|
||||
args := c.RemainingArgs()
|
||||
if len(args) == 0 || len(args) > 3 {
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
amount, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if amount < 0 {
|
||||
return nil, fmt.Errorf("prefetch amount should be positive: %d", amount)
|
||||
}
|
||||
ca.prefetch = amount
|
||||
|
||||
ca.duration = 1 * time.Minute
|
||||
ca.percentage = 10
|
||||
if len(args) > 1 {
|
||||
dur, err := time.ParseDuration(args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ca.duration = dur
|
||||
}
|
||||
if len(args) > 2 {
|
||||
pct := args[2]
|
||||
if x := pct[len(pct)-1]; x != '%' {
|
||||
return nil, fmt.Errorf("last character of percentage should be `%%`, but is: %q", x)
|
||||
}
|
||||
pct = pct[:len(pct)-1]
|
||||
|
||||
num, err := strconv.Atoi(pct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if num < 10 || num > 90 {
|
||||
return nil, fmt.Errorf("percentage should fall in range [10, 90]: %d", num)
|
||||
}
|
||||
ca.percentage = num
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, c.ArgErr()
|
||||
}
|
||||
}
|
||||
|
||||
for i := range origins {
|
||||
origins[i] = plugin.Host(origins[i]).Normalize()
|
||||
}
|
||||
|
||||
ca.Zones = origins
|
||||
|
||||
ca.pcache = cache.New(ca.pcap)
|
||||
ca.ncache = cache.New(ca.ncap)
|
||||
|
||||
return ca, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
94
plugin/cache/setup_test.go
vendored
Normal file
94
plugin/cache/setup_test.go
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/caddy"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
shouldErr bool
|
||||
expectedNcap int
|
||||
expectedPcap int
|
||||
expectedNttl time.Duration
|
||||
expectedPttl time.Duration
|
||||
expectedPrefetch int
|
||||
}{
|
||||
{`cache`, false, defaultCap, defaultCap, maxNTTL, maxTTL, 0},
|
||||
{`cache {}`, false, defaultCap, defaultCap, maxNTTL, maxTTL, 0},
|
||||
{`cache example.nl {
|
||||
success 10
|
||||
}`, false, defaultCap, 10, maxNTTL, maxTTL, 0},
|
||||
{`cache example.nl {
|
||||
success 10
|
||||
denial 10 15
|
||||
}`, false, 10, 10, 15 * time.Second, maxTTL, 0},
|
||||
{`cache 25 example.nl {
|
||||
success 10
|
||||
denial 10 15
|
||||
}`, false, 10, 10, 15 * time.Second, 25 * time.Second, 0},
|
||||
{`cache aaa example.nl`, false, defaultCap, defaultCap, maxNTTL, maxTTL, 0},
|
||||
{`cache {
|
||||
prefetch 10
|
||||
}`, false, defaultCap, defaultCap, maxNTTL, maxTTL, 10},
|
||||
|
||||
// fails
|
||||
{`cache example.nl {
|
||||
success
|
||||
denial 10 15
|
||||
}`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache example.nl {
|
||||
success 15
|
||||
denial aaa
|
||||
}`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache example.nl {
|
||||
positive 15
|
||||
negative aaa
|
||||
}`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache 0 example.nl`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache -1 example.nl`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache 1 example.nl {
|
||||
positive 0
|
||||
}`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache 1 example.nl {
|
||||
positive 0
|
||||
prefetch -1
|
||||
}`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
{`cache 1 example.nl {
|
||||
prefetch 0 blurp
|
||||
}`, true, defaultCap, defaultCap, maxTTL, maxTTL, 0},
|
||||
}
|
||||
for i, test := range tests {
|
||||
c := caddy.NewTestController("dns", test.input)
|
||||
ca, err := cacheParse(c)
|
||||
if test.shouldErr && err == nil {
|
||||
t.Errorf("Test %v: Expected error but found nil", i)
|
||||
continue
|
||||
} else if !test.shouldErr && err != nil {
|
||||
t.Errorf("Test %v: Expected no error but found error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if test.shouldErr && err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if ca.ncap != test.expectedNcap {
|
||||
t.Errorf("Test %v: Expected ncap %v but found: %v", i, test.expectedNcap, ca.ncap)
|
||||
}
|
||||
if ca.pcap != test.expectedPcap {
|
||||
t.Errorf("Test %v: Expected pcap %v but found: %v", i, test.expectedPcap, ca.pcap)
|
||||
}
|
||||
if ca.nttl != test.expectedNttl {
|
||||
t.Errorf("Test %v: Expected nttl %v but found: %v", i, test.expectedNttl, ca.nttl)
|
||||
}
|
||||
if ca.pttl != test.expectedPttl {
|
||||
t.Errorf("Test %v: Expected pttl %v but found: %v", i, test.expectedPttl, ca.pttl)
|
||||
}
|
||||
if ca.prefetch != test.expectedPrefetch {
|
||||
t.Errorf("Test %v: Expected prefetch %v but found: %v", i, test.expectedPrefetch, ca.prefetch)
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user