plugin/view: Advanced routing interface and new 'view' plugin (#5538)

* introduce new interface "dnsserver.Viewer", that allows a plugin implementing it to decide if a query should be routed into its server block.
* add new plugin "view", that uses the new interface to enable a user to define expression based conditions that must be met for a query to be routed to its server block.

Signed-off-by: Chris O'Haver <cohaver@infoblox.com>
This commit is contained in:
Chris O'Haver
2022-09-08 14:56:27 -04:00
committed by GitHub
parent 1f0a41a665
commit b56b080a7c
36 changed files with 880 additions and 114 deletions

View File

@@ -85,14 +85,14 @@ Entries with 0 TTL will remain in the cache until randomly evicted when the shar
If monitoring is enabled (via the *prometheus* plugin) then the following metrics are exported:
* `coredns_cache_entries{server, type, zones}` - Total elements in the cache by cache type.
* `coredns_cache_hits_total{server, type, zones}` - Counter of cache hits by cache type.
* `coredns_cache_misses_total{server, zones}` - Counter of cache misses. - Deprecated, derive misses from cache hits/requests counters.
* `coredns_cache_requests_total{server, zones}` - Counter of cache requests.
* `coredns_cache_prefetch_total{server, zones}` - Counter of times the cache has prefetched a cached item.
* `coredns_cache_drops_total{server, zones}` - Counter of responses excluded from the cache due to request/response question name mismatch.
* `coredns_cache_served_stale_total{server, zones}` - Counter of requests served from stale cache entries.
* `coredns_cache_evictions_total{server, type, zones}` - Counter of cache evictions.
* `coredns_cache_entries{server, type, zones, view}` - Total elements in the cache by cache type.
* `coredns_cache_hits_total{server, type, zones, view}` - Counter of cache hits by cache type.
* `coredns_cache_misses_total{server, zones, view}` - Counter of cache misses. - Deprecated, derive misses from cache hits/requests counters.
* `coredns_cache_requests_total{server, zones, view}` - Counter of cache requests.
* `coredns_cache_prefetch_total{server, zones, view}` - Counter of times the cache has prefetched a cached item.
* `coredns_cache_drops_total{server, zones, view}` - Counter of responses excluded from the cache due to request/response question name mismatch.
* `coredns_cache_served_stale_total{server, zones, view}` - Counter of requests served from stale cache entries.
* `coredns_cache_evictions_total{server, type, zones, view}` - Counter of cache evictions.
Cache types are either "denial" or "success". `Server` is the server handling the request, see the
prometheus plugin for documentation.

11
plugin/cache/cache.go vendored
View File

@@ -22,6 +22,7 @@ type Cache struct {
Zones []string
zonesMetricLabel string
viewMetricLabel string
ncache *cache.Cache
ncap int
@@ -177,11 +178,11 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error {
if hasKey && duration > 0 {
if w.state.Match(res) {
w.set(res, key, mt, duration)
cacheSize.WithLabelValues(w.server, Success, w.zonesMetricLabel).Set(float64(w.pcache.Len()))
cacheSize.WithLabelValues(w.server, Denial, w.zonesMetricLabel).Set(float64(w.ncache.Len()))
cacheSize.WithLabelValues(w.server, Success, w.zonesMetricLabel, w.viewMetricLabel).Set(float64(w.pcache.Len()))
cacheSize.WithLabelValues(w.server, Denial, w.zonesMetricLabel, w.viewMetricLabel).Set(float64(w.ncache.Len()))
} else {
// Don't log it, but increment counter
cacheDrops.WithLabelValues(w.server, w.zonesMetricLabel).Inc()
cacheDrops.WithLabelValues(w.server, w.zonesMetricLabel, w.viewMetricLabel).Inc()
}
}
@@ -219,7 +220,7 @@ func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration
i.wildcard = w.wildcardFunc()
}
if w.pcache.Add(key, i) {
evictions.WithLabelValues(w.server, Success, w.zonesMetricLabel).Inc()
evictions.WithLabelValues(w.server, Success, w.zonesMetricLabel, w.viewMetricLabel).Inc()
}
// when pre-fetching, remove the negative cache entry if it exists
if w.prefetch {
@@ -236,7 +237,7 @@ func (w *ResponseWriter) set(m *dns.Msg, key uint64, mt response.Type, duration
i.wildcard = w.wildcardFunc()
}
if w.ncache.Add(key, i) {
evictions.WithLabelValues(w.server, Denial, w.zonesMetricLabel).Inc()
evictions.WithLabelValues(w.server, Denial, w.zonesMetricLabel, w.viewMetricLabel).Inc()
}
case response.OtherError:

View File

@@ -60,7 +60,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg)
cw := newPrefetchResponseWriter(server, state, c)
go c.doPrefetch(ctx, state, cw, i, now)
}
servedStale.WithLabelValues(server, c.zonesMetricLabel).Inc()
servedStale.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
} else if c.shouldPrefetch(i, now) {
cw := newPrefetchResponseWriter(server, state, c)
go c.doPrefetch(ctx, state, cw, i, now)
@@ -89,7 +89,7 @@ func wildcardFunc(ctx context.Context) func() string {
}
func (c *Cache) doPrefetch(ctx context.Context, state request.Request, cw *ResponseWriter, i *item, now time.Time) {
cachePrefetches.WithLabelValues(cw.server, c.zonesMetricLabel).Inc()
cachePrefetches.WithLabelValues(cw.server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
c.doRefresh(ctx, state, cw)
// When prefetching we loose the item i, and with it the frequency
@@ -122,13 +122,13 @@ func (c *Cache) Name() string { return "cache" }
// getIgnoreTTL unconditionally returns an item if it exists in the cache.
func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item {
k := hash(state.Name(), state.QType())
cacheRequests.WithLabelValues(server, c.zonesMetricLabel).Inc()
cacheRequests.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
if i, ok := c.ncache.Get(k); ok {
itm := i.(*item)
ttl := itm.ttl(now)
if itm.matches(state) && (ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds()))) {
cacheHits.WithLabelValues(server, Denial, c.zonesMetricLabel).Inc()
cacheHits.WithLabelValues(server, Denial, c.zonesMetricLabel, c.viewMetricLabel).Inc()
return i.(*item)
}
}
@@ -136,11 +136,11 @@ func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string
itm := i.(*item)
ttl := itm.ttl(now)
if itm.matches(state) && (ttl > 0 || (c.staleUpTo > 0 && -ttl < int(c.staleUpTo.Seconds()))) {
cacheHits.WithLabelValues(server, Success, c.zonesMetricLabel).Inc()
cacheHits.WithLabelValues(server, Success, c.zonesMetricLabel, c.viewMetricLabel).Inc()
return i.(*item)
}
}
cacheMisses.WithLabelValues(server, c.zonesMetricLabel).Inc()
cacheMisses.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc()
return nil
}

View File

@@ -14,54 +14,54 @@ var (
Subsystem: "cache",
Name: "entries",
Help: "The number of elements in the cache.",
}, []string{"server", "type", "zones"})
}, []string{"server", "type", "zones", "view"})
// cacheRequests is a counter of all requests through the cache.
cacheRequests = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "requests_total",
Help: "The count of cache requests.",
}, []string{"server", "zones"})
}, []string{"server", "zones", "view"})
// cacheHits is counter of cache hits by cache type.
cacheHits = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "hits_total",
Help: "The count of cache hits.",
}, []string{"server", "type", "zones"})
}, []string{"server", "type", "zones", "view"})
// cacheMisses is the counter of cache misses. - Deprecated
cacheMisses = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "misses_total",
Help: "The count of cache misses. Deprecated, derive misses from cache hits/requests counters.",
}, []string{"server", "zones"})
}, []string{"server", "zones", "view"})
// cachePrefetches is the number of time the cache has prefetched a cached item.
cachePrefetches = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "prefetch_total",
Help: "The number of times the cache has prefetched a cached item.",
}, []string{"server", "zones"})
}, []string{"server", "zones", "view"})
// cacheDrops is the number responses that are not cached, because the reply is malformed.
cacheDrops = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "drops_total",
Help: "The number responses that are not cached, because the reply is malformed.",
}, []string{"server", "zones"})
}, []string{"server", "zones", "view"})
// servedStale is the number of requests served from stale cache entries.
servedStale = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "served_stale_total",
Help: "The number of requests served from stale cache entries.",
}, []string{"server", "zones"})
}, []string{"server", "zones", "view"})
// evictions is the counter of cache evictions.
evictions = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: plugin.Namespace,
Subsystem: "cache",
Name: "evictions_total",
Help: "The count of cache evictions.",
}, []string{"server", "type", "zones"})
}, []string{"server", "type", "zones", "view"})
)

View File

@@ -23,6 +23,12 @@ func setup(c *caddy.Controller) error {
if err != nil {
return plugin.Error("cache", err)
}
c.OnStartup(func() error {
ca.viewMetricLabel = dnsserver.GetConfig(c).ViewName
return nil
})
dnsserver.GetConfig(c).AddPlugin(func(next plugin.Handler) plugin.Handler {
ca.Next = next
return ca