plugin/dnssec: use entire RRset as key input (#4537)

* plugin/dnssec: use entire RRset as key input

This uses the entire rrset as input for the hash key; this is to detect
differences in the RRset and generate the correct signature.

As this would then lead to unbounded growth, we periodically (every 8h)
prune the cache of old entries. In theory we could rely on the random
eviction, but it seems nicer to do this in a maintannce loop so that we
remove the unused ones. This required adding a Walk function to the
plugin/pkg/cache.

Signed-off-by: Miek Gieben <miek@miek.nl>

* Update plugin/dnssec/cache.go

Co-authored-by: Chris O'Haver <cohaver@infoblox.com>

Co-authored-by: Chris O'Haver <cohaver@infoblox.com>
This commit is contained in:
Miek Gieben
2021-04-05 15:45:28 +02:00
committed by GitHub
parent 454bc9e0b9
commit 13cef2ee09
5 changed files with 98 additions and 15 deletions

View File

@@ -31,8 +31,11 @@ ZSK/KSK split. All signing operations are done online.
Authenticated denial of existence is implemented with NSEC black lies. Using ECDSA as an algorithm Authenticated denial of existence is implemented with NSEC black lies. Using ECDSA as an algorithm
is preferred as this leads to smaller signatures (compared to RSA). NSEC3 is *not* supported. is preferred as this leads to smaller signatures (compared to RSA). NSEC3 is *not* supported.
As the *dnssec* plugin can't see the original TTL of the RRSets it signs, it will always use 3600s
as the value.
If multiple *dnssec* plugins are specified in the same zone, the last one specified will be If multiple *dnssec* plugins are specified in the same zone, the last one specified will be
used (See [bugs](#bugs)). used.
* **ZONES** zones that should be signed. If empty, the zones from the configuration block * **ZONES** zones that should be signed. If empty, the zones from the configuration block
are used. are used.

View File

@@ -3,8 +3,9 @@ package dnssec
import ( import (
"hash/fnv" "hash/fnv"
"io" "io"
"strconv" "time"
"strings"
"github.com/coredns/coredns/plugin/pkg/cache"
"github.com/miekg/dns" "github.com/miekg/dns"
) )
@@ -12,16 +13,36 @@ import (
// hash serializes the RRset and returns a signature cache key. // hash serializes the RRset and returns a signature cache key.
func hash(rrs []dns.RR) uint64 { func hash(rrs []dns.RR) uint64 {
h := fnv.New64() h := fnv.New64()
// Only need this to be unique for ownername + qtype (+class), but we // we need to hash the entire RRset to pick the correct sig, if the rrset
// only care about IN. Its already an RRSet, so the ownername is the // changes for whatever reason we should resign.
// same as is the qtype. Take the first one and construct the hash // We could use wirefmt, or the string format, both create garbage when creating
// string that creates the key // the hash key. And of course is a uint64 big enough?
io.WriteString(h, strings.ToLower(rrs[0].Header().Name)) for _, rr := range rrs {
typ, ok := dns.TypeToString[rrs[0].Header().Rrtype] io.WriteString(h, rr.String())
if !ok { }
typ = "TYPE" + strconv.FormatUint(uint64(rrs[0].Header().Rrtype), 10) return h.Sum64()
}
func periodicClean(c *cache.Cache, stop <-chan struct{}) {
tick := time.NewTicker(8 * time.Hour)
defer tick.Stop()
for {
select {
case <-tick.C:
// we sign for 8 days, check if a signature in the cache reached 75% of that (i.e. 6), if found delete
// the signature
is75 := time.Now().UTC().Add(sixDays)
c.Walk(func(items map[uint64]interface{}, key uint64) bool {
sig := items[key].(*dns.RRSIG)
if !sig.ValidityPeriod(is75) {
delete(items, key)
}
return true
})
case <-stop:
return
}
} }
io.WriteString(h, typ)
i := h.Sum64()
return i
} }

View File

@@ -24,6 +24,17 @@ func setup(c *caddy.Controller) error {
} }
ca := cache.New(capacity) ca := cache.New(capacity)
stop := make(chan struct{})
c.OnShutdown(func() error {
close(stop)
return nil
})
c.OnStartup(func() error {
go periodicClean(ca, stop)
return nil
})
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler { dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
return New(zones, keys, splitkeys, next, ca) return New(zones, keys, splitkeys, next, ca)
}) })

View File

@@ -72,6 +72,13 @@ func (c *Cache) Len() int {
return l return l
} }
// Walk walks each shard in the cache.
func (c *Cache) Walk(f func(map[uint64]interface{}, uint64) bool) {
for _, s := range c.shards {
s.Walk(f)
}
}
// newShard returns a new shard with size. // newShard returns a new shard with size.
func newShard(size int) *shard { return &shard{items: make(map[uint64]interface{}), size: size} } func newShard(size int) *shard { return &shard{items: make(map[uint64]interface{}), size: size} }
@@ -127,4 +134,24 @@ func (s *shard) Len() int {
return l return l
} }
// Walk walks the shard for each element the function f is executed while holding a write lock.
func (s *shard) Walk(f func(map[uint64]interface{}, uint64) bool) {
items := make([]uint64, len(s.items))
s.RLock()
i := 0
for k := range s.items {
items[i] = k
i++
}
s.RUnlock()
for _, k := range items {
s.Lock()
ok := f(s.items, k)
s.Unlock()
if !ok {
return
}
}
}
const shardSize = 256 const shardSize = 256

View File

@@ -1,6 +1,8 @@
package cache package cache
import "testing" import (
"testing"
)
func TestCacheAddAndGet(t *testing.T) { func TestCacheAddAndGet(t *testing.T) {
const N = shardSize * 4 const N = shardSize * 4
@@ -53,6 +55,25 @@ func TestCacheSharding(t *testing.T) {
} }
} }
func TestCacheWalk(t *testing.T) {
c := New(10)
exp := make([]int, 10*2)
for i := 0; i < 10*2; i++ {
c.Add(uint64(i), 1)
exp[i] = 1
}
got := make([]int, 10*2)
c.Walk(func(items map[uint64]interface{}, key uint64) bool {
got[key] = items[key].(int)
return true
})
for i := range exp {
if exp[i] != got[i] {
t.Errorf("Expected %d, got %d", exp[i], got[i])
}
}
}
func BenchmarkCache(b *testing.B) { func BenchmarkCache(b *testing.B) {
b.ReportAllocs() b.ReportAllocs()