mirror of
https://github.com/coredns/coredns.git
synced 2025-11-02 10:13:14 -05:00
Merge branch 'traffic' of github.com:coredns/coredns into traffic
This commit is contained in:
@@ -88,6 +88,8 @@ build:
|
||||
mkdir -p build/windows/amd64 && $(MAKE) coredns BINARY=build/windows/amd64/$(NAME).exe SYSTEM="GOOS=windows GOARCH=amd64" CHECKS="" BUILDOPTS=""
|
||||
@echo Building: linux/mips - $(VERSION)
|
||||
mkdir -p build/linux/mips && $(MAKE) coredns BINARY=build/linux/mips/$(NAME) SYSTEM="GOOS=linux GOARCH=mips" CHECKS="" BUILDOPTS=""
|
||||
@echo Building: linux/mips64le - $(VERSION)
|
||||
mkdir -p build/linux/mips64le && $(MAKE) coredns BINARY=build/linux/mips64le/$(NAME) SYSTEM="GOOS=linux GOARCH=mips64le" CHECKS="" BUILDOPTS=""
|
||||
@echo Building: linux/$(LINUX_ARCH) - $(VERSION) ;\
|
||||
for arch in $(LINUX_ARCH); do \
|
||||
mkdir -p build/linux/$$arch && $(MAKE) coredns BINARY=build/linux/$$arch/$(NAME) SYSTEM="GOOS=linux GOARCH=$$arch" CHECKS="" BUILDOPTS="" ;\
|
||||
@@ -100,6 +102,7 @@ tar:
|
||||
tar -zcf release/$(NAME)_$(VERSION)_darwin_amd64.tgz -C build/darwin/amd64 $(NAME)
|
||||
tar -zcf release/$(NAME)_$(VERSION)_windows_amd64.tgz -C build/windows/amd64 $(NAME).exe
|
||||
tar -zcf release/$(NAME)_$(VERSION)_linux_mips.tgz -C build/linux/mips $(NAME)
|
||||
tar -zcf release/$(NAME)_$(VERSION)_linux_mips64le.tgz -C build/linux/mips64le $(NAME)
|
||||
for arch in $(LINUX_ARCH); do \
|
||||
tar -zcf release/$(NAME)_$(VERSION)_linux_$$arch.tgz -C build/linux/$$arch $(NAME) ;\
|
||||
done
|
||||
|
||||
26
README.md
26
README.md
@@ -77,25 +77,20 @@ The above command alone will have `coredns` binary generated.
|
||||
## Examples
|
||||
|
||||
When starting CoreDNS without any configuration, it loads the
|
||||
[*whoami*](https://coredns.io/plugins/whoami) plugin and starts listening on port 53 (override with
|
||||
`-dns.port`), it should show the following:
|
||||
[*whoami*](https://coredns.io/plugins/whoami) and [*log*](https://coredns.io/plugins/log) plugins
|
||||
and starts listening on port 53 (override with `-dns.port`), it should show the following:
|
||||
|
||||
~~~ txt
|
||||
.:53
|
||||
______ ____ _ _______
|
||||
/ ____/___ ________ / __ \/ | / / ___/ ~ CoreDNS-1.6.3
|
||||
/ / / __ \/ ___/ _ \/ / / / |/ /\__ \ ~ linux/amd64, go1.13,
|
||||
/ /___/ /_/ / / / __/ /_/ / /| /___/ /
|
||||
\____/\____/_/ \___/_____/_/ |_//____/
|
||||
CoreDNS-1.6.6
|
||||
linux/amd64, go1.13.5, aa8c32
|
||||
~~~
|
||||
|
||||
Any query sent to port 53 should return some information; your sending address, port and protocol
|
||||
used.
|
||||
used. The query should also be logged to standard output.
|
||||
|
||||
If you have a Corefile without a port number specified it will, by default, use port 53, but you can
|
||||
override the port with the `-dns.port` flag:
|
||||
|
||||
`./coredns -dns.port 1053`, runs the server on port 1053.
|
||||
override the port with the `-dns.port` flag: `coredns -dns.port 1053`, runs the server on port 1053.
|
||||
|
||||
Start a simple proxy. You'll need to be root to start listening on port 53.
|
||||
|
||||
@@ -108,11 +103,11 @@ Start a simple proxy. You'll need to be root to start listening on port 53.
|
||||
}
|
||||
~~~
|
||||
|
||||
Just start CoreDNS: `./coredns`. Then just query on that port (53). The query should be forwarded
|
||||
to 8.8.8.8 and the response will be returned. Each query should also show up in the log which is
|
||||
printed on standard output.
|
||||
Start CoreDNS and then query on that port (53). The query should be forwarded to 8.8.8.8 and the
|
||||
response will be returned. Each query should also show up in the log which is printed on standard
|
||||
output.
|
||||
|
||||
Serve the (NSEC) DNSSEC-signed `example.org` on port 1053, with errors and logging sent to standard
|
||||
To serve the (NSEC) DNSSEC-signed `example.org` on port 1053, with errors and logging sent to standard
|
||||
output. Allow zone transfers to everybody, but specifically mention 1 IP address so that CoreDNS can
|
||||
send notifies to it.
|
||||
|
||||
@@ -139,6 +134,7 @@ example.org:1053 {
|
||||
errors
|
||||
log
|
||||
}
|
||||
|
||||
. {
|
||||
any
|
||||
forward . 8.8.8.8:53
|
||||
|
||||
11
go.mod
11
go.mod
@@ -7,10 +7,10 @@ require (
|
||||
github.com/Azure/azure-sdk-for-go v32.6.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.9.3
|
||||
github.com/Azure/go-autorest/autorest/azure/auth v0.4.2
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible // indirect
|
||||
github.com/DataDog/datadog-go v3.3.1+incompatible // indirect
|
||||
github.com/Shopify/sarama v1.21.0 // indirect
|
||||
github.com/apache/thrift v0.13.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.27.0
|
||||
github.com/aws/aws-sdk-go v1.28.0
|
||||
github.com/caddyserver/caddy v1.0.4
|
||||
github.com/coredns/federation v0.0.0-20190818181423-e032b096babe
|
||||
github.com/coreos/go-systemd v0.0.0-20190212144455-93d5ec2c7f76 // indirect
|
||||
@@ -42,14 +42,9 @@ require (
|
||||
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f
|
||||
google.golang.org/api v0.15.0
|
||||
google.golang.org/grpc v1.26.0
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.19.0
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.20.0
|
||||
k8s.io/api v0.17.0
|
||||
k8s.io/apimachinery v0.17.0
|
||||
k8s.io/client-go v0.17.0
|
||||
k8s.io/klog v1.0.0
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.0.0+incompatible
|
||||
github.com/miekg/dns v1.1.3 => github.com/miekg/dns v1.1.22
|
||||
)
|
||||
|
||||
13
go.sum
13
go.sum
@@ -41,8 +41,8 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
|
||||
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/datadog-go v3.3.1+incompatible h1:NT/ghvYzqIzTJGiqvc3n4t9cZy8waO+I2O3I8Cok6/k=
|
||||
github.com/DataDog/datadog-go v3.3.1+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14=
|
||||
github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
@@ -66,8 +66,8 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb
|
||||
github.com/apache/thrift v0.13.0 h1:5hryIiq9gtn+MiLVn0wP37kb/uTeRZgN08WoCsAhIhI=
|
||||
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.0 h1:0xphMHGMLBrPMfxR2AmVjZKcMEESEgWF8Kru94BNByk=
|
||||
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.28.0 h1:NkmnHFVEMTRYTleRLm5xUaL1mHKKkYQl4rCd+jzD58c=
|
||||
github.com/aws/aws-sdk-go v1.28.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
@@ -388,6 +388,7 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
@@ -566,8 +567,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.19.0 h1:aFSFd6oDMdvPYiToGqTv7/ERA6QrPhGaXSuueRCaM88=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.19.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.20.0 h1:OUvLkkEtg2HpDS9g+GeNKDnJtx9zVbqCh2hGH7jHHfg=
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.20.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-ACL" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-ACL" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.PP
|
||||
\fIacl\fP - enforces access control policies on source ip and prevents unauthorized access to DNS servers.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-AUTOPATH" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-AUTOPATH" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-BIND" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-BIND" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-BUFSIZE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-BUFSIZE" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-CACHE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-CACHE" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-CLOUDDNS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-CLOUDDNS" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-DNSSEC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-DNSSEC" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-FILE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-FILE" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-FORWARD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-FORWARD" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-GRPC" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-GRPC" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-HEALTH" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-HEALTH" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-HOSTS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-HOSTS" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-IMPORT" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-IMPORT" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-KUBERNETES" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-KUBERNETES" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-RELOAD" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-RELOAD" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-REWRITE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-REWRITE" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-ROUTE53" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-ROUTE53" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-SECONDARY" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-SECONDARY" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-SIGN" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-SIGN" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
@@ -43,7 +43,7 @@ the signature only has 14 days left before expiring.
|
||||
Both these dates are only checked on the SOA's signature(s).
|
||||
.IP \(bu 4
|
||||
Create RRSIGs that have an inception of -3 hours (minus a jitter between 0 and 18 hours)
|
||||
and a expiration of +32 days for every given DNSKEY.
|
||||
and a expiration of +32 (plus a jitter between 0 and 5 days) days for every given DNSKEY.
|
||||
.IP \(bu 4
|
||||
Add NSEC records for all names in the zone. The TTL for these is the negative cache TTL from the
|
||||
SOA record.
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-TEMPLATE" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-TEMPLATE" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-TLS" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-TLS" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
.\" Generated by Mmark Markdown Processer - mmark.miek.nl
|
||||
.TH "COREDNS-TRANSFER" 7 "December 2019" "CoreDNS" "CoreDNS Plugins"
|
||||
.TH "COREDNS-TRANSFER" 7 "January 2020" "CoreDNS" "CoreDNS Plugins"
|
||||
|
||||
.SH "NAME"
|
||||
.PP
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/debug"
|
||||
"github.com/coredns/coredns/plugin/pkg/policy"
|
||||
clog "github.com/coredns/coredns/plugin/pkg/log"
|
||||
"github.com/coredns/coredns/plugin/pkg/policy"
|
||||
"github.com/coredns/coredns/request"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/coredns/coredns/core/dnsserver"
|
||||
"github.com/coredns/coredns/plugin"
|
||||
"github.com/coredns/coredns/plugin/metrics"
|
||||
"github.com/coredns/coredns/plugin/pkg/policy"
|
||||
"github.com/coredns/coredns/plugin/pkg/parse"
|
||||
"github.com/coredns/coredns/plugin/pkg/policy"
|
||||
pkgtls "github.com/coredns/coredns/plugin/pkg/tls"
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@
|
||||
## Description
|
||||
|
||||
By just using *log* you dump all queries (and parts for the reply) on standard output. Options exist
|
||||
to tweak the output a little. The date/time prefix on log lines is RFC3339 formatted with
|
||||
milliseconds.
|
||||
to tweak the output a little. Note that for busy servers logging will incur a performance hit.
|
||||
|
||||
Note that for busy servers logging will incur a performance hit.
|
||||
Enabling or disabling the *log* plugin only affects the query logging, any other logging from
|
||||
CoreDNS will show up regardless.
|
||||
|
||||
## Syntax
|
||||
|
||||
@@ -18,8 +18,7 @@ Note that for busy servers logging will incur a performance hit.
|
||||
log
|
||||
~~~
|
||||
|
||||
* With no arguments, a query log entry is written to *stdout* in the common log format for all requests
|
||||
|
||||
With no arguments, a query log entry is written to *stdout* in the common log format for all requests.
|
||||
Or if you want/need slightly more control:
|
||||
|
||||
~~~ txt
|
||||
@@ -51,7 +50,7 @@ The classes of responses have the following meaning:
|
||||
* `all`: the default - nothing is specified. Using of this class means that all messages will be
|
||||
logged whatever we mix together with "all".
|
||||
|
||||
If no class is specified, it defaults to *all*.
|
||||
If no class is specified, it defaults to `all`.
|
||||
|
||||
## Log Format
|
||||
|
||||
|
||||
@@ -4,12 +4,23 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/coredns/coredns/plugin/pkg/transport"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Strips the zone, but preserves any port that comes after the zone
|
||||
func stripZone(host string) string {
|
||||
if strings.Contains(host, "%") {
|
||||
lastPercent := strings.LastIndex(host, "%")
|
||||
newHost := host[:lastPercent]
|
||||
return newHost
|
||||
}
|
||||
return host
|
||||
}
|
||||
|
||||
// HostPortOrFile parses the strings in s, each string can either be a
|
||||
// address, [scheme://]address:port or a filename. The address part is checked
|
||||
// and in case of filename a resolv.conf like file is (assumed) and parsed and
|
||||
@@ -21,10 +32,11 @@ func HostPortOrFile(s ...string) ([]string, error) {
|
||||
trans, host := Transport(h)
|
||||
|
||||
addr, _, err := net.SplitHostPort(host)
|
||||
|
||||
if err != nil {
|
||||
// Parse didn't work, it is not a addr:port combo
|
||||
if net.ParseIP(host) == nil {
|
||||
// Not an IP address.
|
||||
hostNoZone := stripZone(host)
|
||||
if net.ParseIP(hostNoZone) == nil {
|
||||
ss, err := tryFile(host)
|
||||
if err == nil {
|
||||
servers = append(servers, ss...)
|
||||
@@ -47,8 +59,7 @@ func HostPortOrFile(s ...string) ([]string, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if net.ParseIP(addr) == nil {
|
||||
// Not an IP address.
|
||||
if net.ParseIP(stripZone(addr)) == nil {
|
||||
ss, err := tryFile(host)
|
||||
if err == nil {
|
||||
servers = append(servers, ss...)
|
||||
|
||||
@@ -34,6 +34,26 @@ func TestHostPortOrFile(t *testing.T) {
|
||||
"127.0.0.1:53",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"fe80::1",
|
||||
"[fe80::1]:53",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"fe80::1%ens3",
|
||||
"[fe80::1%ens3]:53",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"[fd01::1]:153",
|
||||
"[fd01::1]:153",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"[fd01::1%ens3]:153",
|
||||
"[fd01::1%ens3]:153",
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
err := ioutil.WriteFile("resolv.conf", []byte("nameserver 127.0.0.1\n"), 0600)
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
"sync"
|
||||
|
||||
clog "github.com/coredns/coredns/plugin/pkg/log"
|
||||
"github.com/coredns/coredns/plugin/pkg/uniq"
|
||||
"github.com/coredns/coredns/plugin/pkg/reuseport"
|
||||
"github.com/coredns/coredns/plugin/pkg/uniq"
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -32,7 +32,7 @@ it do key or algorithm rollovers - it just signs.
|
||||
Both these dates are only checked on the SOA's signature(s).
|
||||
|
||||
* Create RRSIGs that have an inception of -3 hours (minus a jitter between 0 and 18 hours)
|
||||
and a expiration of +32 days for every given DNSKEY.
|
||||
and a expiration of +32 (plus a jitter between 0 and 5 days) days for every given DNSKEY.
|
||||
|
||||
* Add NSEC records for all names in the zone. The TTL for these is the negative cache TTL from the
|
||||
SOA record.
|
||||
|
||||
@@ -23,7 +23,7 @@ func setup(c *caddy.Controller) error {
|
||||
c.OnStartup(sign.OnStartup)
|
||||
c.OnStartup(func() error {
|
||||
for _, signer := range sign.signers {
|
||||
go signer.refresh(DurationRefreshHours)
|
||||
go signer.refresh(durationRefreshHours)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -66,7 +66,8 @@ func parse(c *caddy.Controller) (*Sign, error) {
|
||||
signers[i] = &Signer{
|
||||
dbfile: dbfile,
|
||||
origin: plugin.Host(origins[i]).Normalize(),
|
||||
jitter: time.Duration(float32(DurationJitter) * rand.Float32()),
|
||||
jitterIncep: time.Duration(float32(durationInceptionJitter) * rand.Float32()),
|
||||
jitterExpir: time.Duration(float32(durationExpirationDayJitter) * rand.Float32()),
|
||||
directory: "/var/lib/coredns",
|
||||
stop: make(chan struct{}),
|
||||
signedfile: fmt.Sprintf("db.%ssigned", origins[i]), // origins[i] is a fqdn, so it ends with a dot, hence %ssigned.
|
||||
|
||||
@@ -26,12 +26,13 @@ func (s *Sign) OnStartup() error {
|
||||
|
||||
// Various duration constants for signing of the zones.
|
||||
const (
|
||||
DurationExpireDays = 7 * 24 * time.Hour // max time allowed before expiration
|
||||
DurationResignDays = 6 * 24 * time.Hour // if the last sign happenend this long ago, sign again
|
||||
DurationSignatureExpireDays = 32 * 24 * time.Hour // sign for 32 days
|
||||
DurationRefreshHours = 5 * time.Hour // check zones every 5 hours
|
||||
DurationJitter = -18 * time.Hour // default max jitter
|
||||
DurationSignatureInceptionHours = -3 * time.Hour // -(2+1) hours, be sure to catch daylight saving time and such, jitter is subtracted
|
||||
durationExpireDays = 7 * 24 * time.Hour // max time allowed before expiration
|
||||
durationResignDays = 6 * 24 * time.Hour // if the last sign happenend this long ago, sign again
|
||||
durationSignatureExpireDays = 32 * 24 * time.Hour // sign for 32 days
|
||||
durationRefreshHours = 5 * time.Hour // check zones every 5 hours
|
||||
durationInceptionJitter = -18 * time.Hour // default max jitter for the inception
|
||||
durationExpirationDayJitter = 5 * 24 * time.Hour // default max jitter for the expiration
|
||||
durationSignatureInceptionHours = -3 * time.Hour // -(2+1) hours, be sure to catch daylight saving time and such, jitter is subtracted
|
||||
)
|
||||
|
||||
const timeFmt = "2006-01-02T15:04:05.000Z07:00"
|
||||
|
||||
@@ -22,7 +22,8 @@ type Signer struct {
|
||||
origin string
|
||||
dbfile string
|
||||
directory string
|
||||
jitter time.Duration
|
||||
jitterIncep time.Duration
|
||||
jitterExpir time.Duration
|
||||
|
||||
signedfile string
|
||||
stop chan struct{}
|
||||
@@ -42,7 +43,7 @@ func (s *Signer) Sign(now time.Time) (*file.Zone, error) {
|
||||
|
||||
mttl := z.Apex.SOA.Minttl
|
||||
ttl := z.Apex.SOA.Header().Ttl
|
||||
inception, expiration := lifetime(now, s.jitter)
|
||||
inception, expiration := lifetime(now, s.jitterIncep, s.jitterExpir)
|
||||
z.Apex.SOA.Serial = uint32(now.Unix())
|
||||
|
||||
for _, pair := range s.keys {
|
||||
@@ -143,8 +144,8 @@ func resign(rd io.Reader, now time.Time) (why error) {
|
||||
}
|
||||
incep, _ := time.Parse("20060102150405", dns.TimeToString(x.Inception))
|
||||
// If too long ago, resign.
|
||||
if now.Sub(incep) >= 0 && now.Sub(incep) > DurationResignDays {
|
||||
return fmt.Errorf("inception %q was more than: %s ago from %s: %s", incep.Format(timeFmt), DurationResignDays, now.Format(timeFmt), now.Sub(incep))
|
||||
if now.Sub(incep) >= 0 && now.Sub(incep) > durationResignDays {
|
||||
return fmt.Errorf("inception %q was more than: %s ago from %s: %s", incep.Format(timeFmt), durationResignDays, now.Format(timeFmt), now.Sub(incep))
|
||||
}
|
||||
// Inception hasn't even start yet.
|
||||
if now.Sub(incep) < 0 {
|
||||
@@ -152,8 +153,8 @@ func resign(rd io.Reader, now time.Time) (why error) {
|
||||
}
|
||||
|
||||
expire, _ := time.Parse("20060102150405", dns.TimeToString(x.Expiration))
|
||||
if expire.Sub(now) < DurationExpireDays {
|
||||
return fmt.Errorf("expiration %q is less than: %s away from %s: %s", expire.Format(timeFmt), DurationExpireDays, now.Format(timeFmt), expire.Sub(now))
|
||||
if expire.Sub(now) < durationExpireDays {
|
||||
return fmt.Errorf("expiration %q is less than: %s away from %s: %s", expire.Format(timeFmt), durationExpireDays, now.Format(timeFmt), expire.Sub(now))
|
||||
}
|
||||
}
|
||||
i++
|
||||
@@ -173,7 +174,7 @@ func signAndLog(s *Signer, why error) {
|
||||
z, err := s.Sign(now)
|
||||
log.Infof("Signing %q because %s", s.origin, why)
|
||||
if err != nil {
|
||||
log.Warningf("Error signing %q with key tags %q in %s: %s, next: %s", s.origin, keyTag(s.keys), time.Since(now), err, now.Add(DurationRefreshHours).Format(timeFmt))
|
||||
log.Warningf("Error signing %q with key tags %q in %s: %s, next: %s", s.origin, keyTag(s.keys), time.Since(now), err, now.Add(durationRefreshHours).Format(timeFmt))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -181,7 +182,7 @@ func signAndLog(s *Signer, why error) {
|
||||
log.Warningf("Error signing %q: failed to move zone file into place: %s", s.origin, err)
|
||||
return
|
||||
}
|
||||
log.Infof("Successfully signed zone %q in %q with key tags %q and %d SOA serial, elapsed %f, next: %s", s.origin, filepath.Join(s.directory, s.signedfile), keyTag(s.keys), z.Apex.SOA.Serial, time.Since(now).Seconds(), now.Add(DurationRefreshHours).Format(timeFmt))
|
||||
log.Infof("Successfully signed zone %q in %q with key tags %q and %d SOA serial, elapsed %f, next: %s", s.origin, filepath.Join(s.directory, s.signedfile), keyTag(s.keys), z.Apex.SOA.Serial, time.Since(now).Seconds(), now.Add(durationRefreshHours).Format(timeFmt))
|
||||
}
|
||||
|
||||
// refresh checks every val if some zones need to be resigned.
|
||||
@@ -202,8 +203,8 @@ func (s *Signer) refresh(val time.Duration) {
|
||||
}
|
||||
}
|
||||
|
||||
func lifetime(now time.Time, jitter time.Duration) (uint32, uint32) {
|
||||
incep := uint32(now.Add(DurationSignatureInceptionHours).Add(jitter).Unix())
|
||||
expir := uint32(now.Add(DurationSignatureExpireDays).Unix())
|
||||
func lifetime(now time.Time, jitterInception, jitterExpiration time.Duration) (uint32, uint32) {
|
||||
incep := uint32(now.Add(durationSignatureInceptionHours).Add(jitterInception).Unix())
|
||||
expir := uint32(now.Add(durationSignatureExpireDays).Add(jitterExpiration).Unix())
|
||||
return incep, expir
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Hacking on *traffic*
|
||||
# Hacking on *traffic*
|
||||
|
||||
Repos used:
|
||||
|
||||
@@ -12,7 +12,8 @@ I found these website useful while working on this.
|
||||
|
||||
* https://github.com/envoyproxy/envoy/blob/master/api/API_OVERVIEW.md
|
||||
* https://github.com/envoyproxy/learnenvoy/blob/master/_articles/service-discovery.md
|
||||
* This was *really* helpful: https://www.envoyproxy.io/docs/envoy/v1.11.2/api-docs/xds_protocol
|
||||
* This was *really* helpful: https://www.envoyproxy.io/docs/envoy/v1.11.2/api-docs/xds_protocol to
|
||||
show the flow of the protocol.
|
||||
|
||||
# Testing
|
||||
|
||||
@@ -42,16 +43,8 @@ example.org {
|
||||
|
||||
Start CoreDNS (`coredns -conf Corefile -dns.port=1053`), and see logging/debugging flow by; the
|
||||
test binary should also spew out a bunch of things. CoreDNS willl build up a list of cluster and
|
||||
endpoints. Next you can query it:
|
||||
|
||||
~~~ sh
|
||||
% dig @localhost -p 1053 cluster-v0-0.example.org A
|
||||
;; QUESTION SECTION:
|
||||
;cluster-v0-0.example.org. IN A
|
||||
|
||||
;; ANSWER SECTION:
|
||||
cluster-v0-0.example.org. 5 IN A 127.0.0.1
|
||||
~~~
|
||||
endpoints. Next you can query it. Note none of the endpoints are HEALTHY so you'll mostly get NODATA
|
||||
responses, instead of actual records.
|
||||
|
||||
Note: the xds/test binary is a go-control-plane binary with added debugging that I'm using for
|
||||
testing.
|
||||
|
||||
@@ -20,10 +20,13 @@ be upgraded, so all traffic to it is drained. Or the entire Kubernetes needs to
|
||||
endpoints need to be drained from it.
|
||||
|
||||
*Traffic* discovers the endpoints via Envoy's xDS protocol. Endpoints and clusters are discovered
|
||||
every 10 seconds. The plugin hands out responses that adhere to these assignments. Each DNS response
|
||||
contains a single IP address that's considered the best one. *Traffic* will load balance A and AAAA
|
||||
queries. The TTL on these answer is set to 5s. It will only return successful responses either with
|
||||
an answer or otherwise a NODATA response. Queries for non-existent clusters get a NXDOMAIN.
|
||||
every 10 seconds. The plugin hands out responses that adhere to these assignments. Only endpoints
|
||||
that are *healthy* are handed out.
|
||||
|
||||
Each DNS response contains a single IP address that's considered the best one. *Traffic* will load
|
||||
balance A and AAAA queries. The TTL on these answer is set to 5s. It will only return successful
|
||||
responses either with an answer or otherwise a NODATA response. Queries for non-existent clusters
|
||||
get a NXDOMAIN.
|
||||
|
||||
The *traffic* plugin has no notion of draining, drop overload and anything that advanced, *it just
|
||||
acts upon assignments*. This is means that if a endpoint goes down and *traffic* has not seen a new
|
||||
@@ -74,12 +77,11 @@ and "cluster-v0" is one of the load balanced cluster, *traffic* will respond to
|
||||
|
||||
## Metrics
|
||||
|
||||
What metrics should we do?
|
||||
What metrics should we do? If any? Number of clusters? Number of endpoints and health?
|
||||
|
||||
## Ready
|
||||
|
||||
Should this plugin implement readyness?
|
||||
|
||||
Should this plugin implement readiness?
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -108,8 +110,7 @@ The following documents provide some background on Envoy's control plane.
|
||||
|
||||
## Bugs
|
||||
|
||||
Priority from ClusterLoadAssignments is not used. Locality is also not used. Health status of the
|
||||
endpoints is ignore (for now).
|
||||
Priority and locality information from ClusterLoadAssignments is not used.
|
||||
|
||||
Load reporting via xDS is not supported; this can be implemented, but there are some things that
|
||||
make this difficult. A single (DNS) query is done by a resolver. Behind this resolver there may be
|
||||
@@ -121,9 +122,8 @@ Multiple **TO** addresses is not implemented.
|
||||
|
||||
## TODO
|
||||
|
||||
* acking responses
|
||||
* correctly tracking versions and pruning old clusters.
|
||||
* metrics?
|
||||
* how to exactly deal with health status from the endpoints.
|
||||
* testing
|
||||
* more and better testing
|
||||
* credentials (other than TLS) - how/what?
|
||||
* is the protocol correctly implemented? Should we not have a 10s tick, but wait for responses from
|
||||
the control plane?
|
||||
|
||||
@@ -20,12 +20,19 @@ func TestParseTraffic(t *testing.T) {
|
||||
input string
|
||||
shouldErr bool
|
||||
}{
|
||||
// ok
|
||||
{`traffic grpc://127.0.0.1:18000 {
|
||||
id test-id
|
||||
}`, false},
|
||||
|
||||
// fail
|
||||
{`traffic`, true},
|
||||
{`traffic tls://1.1.1.1`, true},
|
||||
{`traffic {
|
||||
id bla bla
|
||||
}`, true},
|
||||
{`traffic {
|
||||
node bla bla
|
||||
node
|
||||
}`, true},
|
||||
}
|
||||
for i, test := range tests {
|
||||
|
||||
@@ -28,17 +28,12 @@ func (t *Traffic) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg
|
||||
|
||||
cluster := ""
|
||||
for _, o := range t.origins {
|
||||
println(o, state.Name())
|
||||
if strings.HasSuffix(state.Name(), o) {
|
||||
cluster, _ = dnsutil.TrimZone(state.Name(), o)
|
||||
state.Zone = o
|
||||
break
|
||||
}
|
||||
}
|
||||
if cluster == "" {
|
||||
return plugin.NextOrFailure(t.Name(), t.Next, ctx, w, r)
|
||||
}
|
||||
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
m.Authoritative = true
|
||||
|
||||
@@ -1,123 +1,144 @@
|
||||
package traffic
|
||||
|
||||
/*
|
||||
func TestTraffic(t *testing.T) {
|
||||
rm := Traffic{Next: handler()}
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/coredns/coredns/plugin/pkg/dnstest"
|
||||
"github.com/coredns/coredns/plugin/pkg/dnsutil"
|
||||
"github.com/coredns/coredns/plugin/test"
|
||||
"github.com/coredns/coredns/plugin/traffic/xds"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
endpointpb "github.com/envoyproxy/go-control-plane/envoy/api/v2/endpoint"
|
||||
"github.com/miekg/dns"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestTraffic(t *testing.T) {
|
||||
c, err := xds.New("127.0.0.1:0", "test-id", grpc.WithInsecure())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tr := &Traffic{c: c, origins: []string{"lb.example.org."}}
|
||||
|
||||
// the first X records must be cnames after this test
|
||||
tests := []struct {
|
||||
answer []dns.RR
|
||||
extra []dns.RR
|
||||
cnameAnswer int
|
||||
cnameExtra int
|
||||
addressAnswer int
|
||||
addressExtra int
|
||||
mxAnswer int
|
||||
mxExtra int
|
||||
cla *xdspb.ClusterLoadAssignment
|
||||
cluster string
|
||||
qtype uint16
|
||||
rcode int
|
||||
answer string // address value of the A/AAAA record.
|
||||
ns bool // should there be a ns section.
|
||||
}{
|
||||
{
|
||||
answer: []dns.RR{
|
||||
test.CNAME("cname1.region2.skydns.test. 300 IN CNAME cname2.region2.skydns.test."),
|
||||
test.CNAME("cname2.region2.skydns.test. 300 IN CNAME cname3.region2.skydns.test."),
|
||||
test.CNAME("cname5.region2.skydns.test. 300 IN CNAME cname6.region2.skydns.test."),
|
||||
test.CNAME("cname6.region2.skydns.test. 300 IN CNAME endpoint.region2.skydns.test."),
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.1"),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx1.region2.skydns.test."),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 2 mx2.region2.skydns.test."),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 3 mx3.region2.skydns.test."),
|
||||
},
|
||||
cnameAnswer: 4,
|
||||
addressAnswer: 1,
|
||||
mxAnswer: 3,
|
||||
cla: &xdspb.ClusterLoadAssignment{},
|
||||
cluster: "web", qtype: dns.TypeA, rcode: dns.RcodeSuccess, ns: true,
|
||||
},
|
||||
{
|
||||
answer: []dns.RR{
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.1"),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx1.region2.skydns.test."),
|
||||
test.CNAME("cname.region2.skydns.test. 300 IN CNAME endpoint.region2.skydns.test."),
|
||||
},
|
||||
cnameAnswer: 1,
|
||||
addressAnswer: 1,
|
||||
mxAnswer: 1,
|
||||
cla: &xdspb.ClusterLoadAssignment{},
|
||||
cluster: "web", qtype: dns.TypeSRV, rcode: dns.RcodeSuccess, ns: true,
|
||||
},
|
||||
{
|
||||
answer: []dns.RR{
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx1.region2.skydns.test."),
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.1"),
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.2"),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx2.region2.skydns.test."),
|
||||
test.CNAME("cname2.region2.skydns.test. 300 IN CNAME cname3.region2.skydns.test."),
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.3"),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx3.region2.skydns.test."),
|
||||
cla: &xdspb.ClusterLoadAssignment{},
|
||||
cluster: "does-not-exist", qtype: dns.TypeA, rcode: dns.RcodeNameError, ns: true,
|
||||
},
|
||||
extra: []dns.RR{
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.1"),
|
||||
test.AAAA("endpoint.region2.skydns.test. 300 IN AAAA ::1"),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx1.region2.skydns.test."),
|
||||
test.CNAME("cname2.region2.skydns.test. 300 IN CNAME cname3.region2.skydns.test."),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx2.region2.skydns.test."),
|
||||
test.A("endpoint.region2.skydns.test. 300 IN A 10.240.0.3"),
|
||||
test.AAAA("endpoint.region2.skydns.test. 300 IN AAAA ::2"),
|
||||
test.MX("mx.region2.skydns.test. 300 IN MX 1 mx3.region2.skydns.test."),
|
||||
// healthy backend
|
||||
{
|
||||
cla: &xdspb.ClusterLoadAssignment{
|
||||
ClusterName: "web",
|
||||
Endpoints: endpoints([]EndpointHealth{{"127.0.0.1", corepb.HealthStatus_HEALTHY}}),
|
||||
},
|
||||
cnameAnswer: 1,
|
||||
cnameExtra: 1,
|
||||
addressAnswer: 3,
|
||||
addressExtra: 4,
|
||||
mxAnswer: 3,
|
||||
mxExtra: 3,
|
||||
cluster: "web", qtype: dns.TypeA, rcode: dns.RcodeSuccess, answer: "127.0.0.1",
|
||||
},
|
||||
// unknown backend
|
||||
{
|
||||
cla: &xdspb.ClusterLoadAssignment{
|
||||
ClusterName: "web",
|
||||
Endpoints: endpoints([]EndpointHealth{{"127.0.0.1", corepb.HealthStatus_UNKNOWN}}),
|
||||
},
|
||||
cluster: "web", qtype: dns.TypeA, rcode: dns.RcodeSuccess, ns: true,
|
||||
},
|
||||
// unknown backend and healthy backend
|
||||
{
|
||||
cla: &xdspb.ClusterLoadAssignment{
|
||||
ClusterName: "web",
|
||||
Endpoints: endpoints([]EndpointHealth{
|
||||
{"127.0.0.1", corepb.HealthStatus_UNKNOWN},
|
||||
{"127.0.0.2", corepb.HealthStatus_HEALTHY},
|
||||
}),
|
||||
},
|
||||
cluster: "web", qtype: dns.TypeA, rcode: dns.RcodeSuccess, answer: "127.0.0.2",
|
||||
},
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
for i, tc := range tests {
|
||||
a := xds.NewAssignment()
|
||||
a.SetClusterLoadAssignment("web", tc.cla) // web is our cluster
|
||||
c.SetAssignments(a)
|
||||
|
||||
m := new(dns.Msg)
|
||||
cl := dnsutil.Join(tc.cluster, tr.origins[0])
|
||||
m.SetQuestion(cl, tc.qtype)
|
||||
|
||||
rec := dnstest.NewRecorder(&test.ResponseWriter{})
|
||||
|
||||
for i, test := range tests {
|
||||
req := new(dns.Msg)
|
||||
req.SetQuestion("region2.skydns.test.", dns.TypeSRV)
|
||||
req.Answer = test.answer
|
||||
req.Extra = test.extra
|
||||
|
||||
_, err := rm.ServeDNS(context.TODO(), rec, req)
|
||||
_, err := tr.ServeDNS(ctx, rec, m)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: Expected no error, but got %s", i, err)
|
||||
continue
|
||||
t.Errorf("Test %d: Expected no error, but got %q", i, err)
|
||||
}
|
||||
if rec.Msg.Rcode != tc.rcode {
|
||||
t.Errorf("Test %d: Expected no rcode %d, but got %d", i, tc.rcode, rec.Msg.Rcode)
|
||||
}
|
||||
if tc.ns && len(rec.Msg.Ns) == 0 {
|
||||
t.Errorf("Test %d: Expected authority section, but got none", i)
|
||||
}
|
||||
if tc.answer != "" && len(rec.Msg.Answer) == 0 {
|
||||
t.Fatalf("Test %d: Expected answer section, but got none", i)
|
||||
}
|
||||
if tc.answer != "" {
|
||||
record := rec.Msg.Answer[0]
|
||||
addr := ""
|
||||
switch x := record.(type) {
|
||||
case *dns.A:
|
||||
addr = x.A.String()
|
||||
case *dns.AAAA:
|
||||
addr = x.AAAA.String()
|
||||
}
|
||||
if tc.answer != addr {
|
||||
t.Errorf("Test %d: Expected answer %s, but got %s", i, tc.answer, addr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
cname, address, mx, sorted := countRecords(rec.Msg.Answer)
|
||||
if !sorted {
|
||||
t.Errorf("Test %d: Expected CNAMEs, then AAAAs, then MX in Answer, but got mixed", i)
|
||||
}
|
||||
if cname != test.cnameAnswer {
|
||||
t.Errorf("Test %d: Expected %d CNAMEs in Answer, but got %d", i, test.cnameAnswer, cname)
|
||||
}
|
||||
if address != test.addressAnswer {
|
||||
t.Errorf("Test %d: Expected %d A/AAAAs in Answer, but got %d", i, test.addressAnswer, address)
|
||||
}
|
||||
if mx != test.mxAnswer {
|
||||
t.Errorf("Test %d: Expected %d MXs in Answer, but got %d", i, test.mxAnswer, mx)
|
||||
}
|
||||
|
||||
cname, address, mx, sorted = countRecords(rec.Msg.Extra)
|
||||
if !sorted {
|
||||
t.Errorf("Test %d: Expected CNAMEs, then AAAAs, then MX in Extra, but got mixed", i)
|
||||
}
|
||||
if cname != test.cnameExtra {
|
||||
t.Errorf("Test %d: Expected %d CNAMEs in Extra, but got %d", i, test.cnameAnswer, cname)
|
||||
}
|
||||
if address != test.addressExtra {
|
||||
t.Errorf("Test %d: Expected %d A/AAAAs in Extra, but got %d", i, test.addressAnswer, address)
|
||||
}
|
||||
if mx != test.mxExtra {
|
||||
t.Errorf("Test %d: Expected %d MXs in Extra, but got %d", i, test.mxAnswer, mx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handler() plugin.Handler {
|
||||
return plugin.HandlerFunc(func(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
|
||||
w.WriteMsg(r)
|
||||
return dns.RcodeSuccess, nil
|
||||
})
|
||||
type EndpointHealth struct {
|
||||
Address string
|
||||
Health corepb.HealthStatus
|
||||
}
|
||||
|
||||
func endpoints(e []EndpointHealth) []*endpointpb.LocalityLbEndpoints {
|
||||
ep := make([]*endpointpb.LocalityLbEndpoints, len(e))
|
||||
for i := range e {
|
||||
ep[i] = &endpointpb.LocalityLbEndpoints{
|
||||
LbEndpoints: []*endpointpb.LbEndpoint{{
|
||||
HostIdentifier: &endpointpb.LbEndpoint_Endpoint{
|
||||
Endpoint: &endpointpb.Endpoint{
|
||||
Address: &corepb.Address{
|
||||
Address: &corepb.Address_SocketAddress{
|
||||
SocketAddress: &corepb.SocketAddress{
|
||||
Address: e[i].Address,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
HealthStatus: e[i].Health,
|
||||
}},
|
||||
}
|
||||
}
|
||||
return ep
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -6,15 +6,21 @@ import (
|
||||
"sync"
|
||||
|
||||
xdspb "github.com/envoyproxy/go-control-plane/envoy/api/v2"
|
||||
corepb "github.com/envoyproxy/go-control-plane/envoy/api/v2/core"
|
||||
)
|
||||
|
||||
type assignment struct {
|
||||
mu sync.RWMutex
|
||||
cla map[string]*xdspb.ClusterLoadAssignment
|
||||
version int // not sure what do with and if we should discard all clusters.
|
||||
}
|
||||
|
||||
func (a *assignment) setClusterLoadAssignment(cluster string, cla *xdspb.ClusterLoadAssignment) {
|
||||
// NewAssignment returns a pointer to an assignment.
|
||||
func NewAssignment() *assignment {
|
||||
return &assignment{cla: make(map[string]*xdspb.ClusterLoadAssignment)}
|
||||
}
|
||||
|
||||
// SetClusterLoadAssignment sets the assignment for the cluster to cla.
|
||||
func (a *assignment) SetClusterLoadAssignment(cluster string, cla *xdspb.ClusterLoadAssignment) {
|
||||
// If cla is nil we just found a cluster, check if we already know about it, or if we need to make a new entry.
|
||||
a.mu.Lock()
|
||||
defer a.mu.Unlock()
|
||||
@@ -30,7 +36,8 @@ func (a *assignment) setClusterLoadAssignment(cluster string, cla *xdspb.Cluster
|
||||
|
||||
}
|
||||
|
||||
func (a *assignment) clusterLoadAssignment(cluster string) *xdspb.ClusterLoadAssignment {
|
||||
// ClusterLoadAssignment returns the assignment for the cluster or nil if there is none.
|
||||
func (a *assignment) ClusterLoadAssignment(cluster string) *xdspb.ClusterLoadAssignment {
|
||||
a.mu.RLock()
|
||||
cla, ok := a.cla[cluster]
|
||||
a.mu.RUnlock()
|
||||
@@ -52,55 +59,58 @@ func (a *assignment) clusters() []string {
|
||||
return clusters
|
||||
}
|
||||
|
||||
// Select selects a backend from cla, using weighted random selection. It only selects
|
||||
// Select selects a backend from cluster load assignments, using weighted random selection. It only selects
|
||||
// backends that are reporting healthy.
|
||||
func (a *assignment) Select(cluster string) (net.IP, bool) {
|
||||
cla := a.clusterLoadAssignment(cluster)
|
||||
cla := a.ClusterLoadAssignment(cluster)
|
||||
if cla == nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
total := 0
|
||||
i := 0
|
||||
healthy := 0
|
||||
for _, ep := range cla.Endpoints {
|
||||
for _, lb := range ep.GetLbEndpoints() {
|
||||
// if lb.GetHealthStatus() != corepb.HealthStatus_HEALTHY {
|
||||
// continue
|
||||
// }
|
||||
if lb.GetHealthStatus() != corepb.HealthStatus_HEALTHY {
|
||||
continue
|
||||
}
|
||||
total += int(lb.GetLoadBalancingWeight().GetValue())
|
||||
i++
|
||||
healthy++
|
||||
}
|
||||
}
|
||||
if healthy == 0 {
|
||||
return nil, true
|
||||
}
|
||||
|
||||
if total == 0 {
|
||||
// all weights are 0, randomly select one of the endpoints.
|
||||
r := rand.Intn(i)
|
||||
r := rand.Intn(healthy)
|
||||
i := 0
|
||||
for _, ep := range cla.Endpoints {
|
||||
for _, lb := range ep.GetLbEndpoints() {
|
||||
// if lb.GetHealthStatus() != corepb.HealthStatus_HEALTHY {
|
||||
// continue
|
||||
// }
|
||||
if lb.GetHealthStatus() != corepb.HealthStatus_HEALTHY {
|
||||
continue
|
||||
}
|
||||
if r == i {
|
||||
return net.ParseIP(lb.GetEndpoint().GetAddress().GetSocketAddress().GetAddress()), true
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return nil, true
|
||||
}
|
||||
|
||||
r := rand.Intn(total) + 1
|
||||
|
||||
for _, ep := range cla.Endpoints {
|
||||
for _, lb := range ep.GetLbEndpoints() {
|
||||
// if lb.GetHealthStatus() != corepb.HealthStatus_HEALTHY {
|
||||
// continue
|
||||
// }
|
||||
if lb.GetHealthStatus() != corepb.HealthStatus_HEALTHY {
|
||||
continue
|
||||
}
|
||||
r -= int(lb.GetLoadBalancingWeight().GetValue())
|
||||
if r <= 0 {
|
||||
return net.ParseIP(lb.GetEndpoint().GetAddress().GetSocketAddress().GetAddress()), true
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
return nil, true
|
||||
}
|
||||
|
||||
@@ -52,12 +52,14 @@ type adsStream adsgrpc.AggregatedDiscoveryService_StreamAggregatedResourcesClien
|
||||
type Client struct {
|
||||
cc *grpc.ClientConn
|
||||
ctx context.Context
|
||||
assignments *assignment
|
||||
assignments *assignment // assignments contains the current clusters and endpoints.
|
||||
node *corepb.Node
|
||||
cancel context.CancelFunc
|
||||
stop chan struct{}
|
||||
mu sync.RWMutex
|
||||
nonce string
|
||||
|
||||
version map[string]string
|
||||
nonce map[string]string
|
||||
}
|
||||
|
||||
// New returns a new client that's dialed to addr using node as the local identifier.
|
||||
@@ -79,6 +81,7 @@ func New(addr, node string, opts ...grpc.DialOption) (*Client, error) {
|
||||
},
|
||||
}
|
||||
c.assignments = &assignment{cla: make(map[string]*xdspb.ClusterLoadAssignment)}
|
||||
c.version, c.nonce = make(map[string]string), make(map[string]string)
|
||||
c.ctx, c.cancel = context.WithCancel(context.Background())
|
||||
|
||||
return c, nil
|
||||
@@ -106,13 +109,15 @@ func (c *Client) Run() {
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
tick := time.NewTicker(1 * time.Second)
|
||||
if err := c.clusterDiscovery(stream, c.Version(cdsURL), c.Nonce(cdsURL), []string{}); err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
tick := time.NewTicker(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
// send empty list for cluster discovery again and again
|
||||
log.Debugf("Requesting cluster list, nonce %q:", c.Nonce())
|
||||
if err := c.clusterDiscovery(stream, "", c.Nonce(), []string{}); err != nil {
|
||||
// send empty list for cluster discovery every 10 seconds
|
||||
if err := c.clusterDiscovery(stream, c.Version(cdsURL), c.Nonce(cdsURL), []string{}); err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
|
||||
@@ -124,7 +129,7 @@ func (c *Client) Run() {
|
||||
}()
|
||||
|
||||
if err := c.Receive(stream); err != nil {
|
||||
log.Debug(err)
|
||||
log.Warning(err)
|
||||
}
|
||||
close(done)
|
||||
}
|
||||
@@ -164,7 +169,7 @@ func (c *Client) Receive(stream adsStream) error {
|
||||
|
||||
switch resp.GetTypeUrl() {
|
||||
case cdsURL:
|
||||
a := &assignment{cla: make(map[string]*xdspb.ClusterLoadAssignment)}
|
||||
a := NewAssignment()
|
||||
for _, r := range resp.GetResources() {
|
||||
var any ptypes.DynamicAny
|
||||
if err := ptypes.UnmarshalAny(r, &any); err != nil {
|
||||
@@ -175,24 +180,18 @@ func (c *Client) Receive(stream adsStream) error {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
a.setClusterLoadAssignment(cluster.GetName(), nil)
|
||||
a.SetClusterLoadAssignment(cluster.GetName(), nil)
|
||||
}
|
||||
log.Debugf("Cluster discovery processed with %d resources", len(resp.GetResources()))
|
||||
|
||||
// ack the CDS proto, with we we've got. (empty version would be NACK)
|
||||
if err := c.clusterDiscovery(stream, resp.GetVersionInfo(), resp.GetNonce(), a.clusters()); err != nil {
|
||||
log.Debug(err)
|
||||
continue
|
||||
}
|
||||
// need to figure out how to handle the versions and nounces exactly.
|
||||
|
||||
c.SetNonce(resp.GetNonce())
|
||||
log.Debugf("Cluster discovery processed with %d resources, version %q and nonce %q, clusters: %v", len(resp.GetResources()), c.Version(cdsURL), c.Nonce(cdsURL), a.clusters())
|
||||
// set our local administration and ack the reply. Empty version would signal NACK.
|
||||
c.SetNonce(cdsURL, resp.GetNonce())
|
||||
c.SetVersion(cdsURL, resp.GetVersionInfo())
|
||||
c.SetAssignments(a)
|
||||
c.clusterDiscovery(stream, resp.GetVersionInfo(), resp.GetNonce(), a.clusters())
|
||||
|
||||
// now kick off discovery for endpoints
|
||||
if err := c.endpointDiscovery(stream, "", resp.GetNonce(), a.clusters()); err != nil {
|
||||
if err := c.endpointDiscovery(stream, c.Version(edsURL), c.Nonce(edsURL), a.clusters()); err != nil {
|
||||
log.Debug(err)
|
||||
continue
|
||||
}
|
||||
case edsURL:
|
||||
for _, r := range resp.GetResources() {
|
||||
@@ -205,10 +204,12 @@ func (c *Client) Receive(stream adsStream) error {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
c.assignments.setClusterLoadAssignment(cla.GetClusterName(), cla)
|
||||
// ack the bloody thing
|
||||
c.assignments.SetClusterLoadAssignment(cla.GetClusterName(), cla)
|
||||
}
|
||||
log.Debugf("Endpoint discovery processed with %d resources", len(resp.GetResources()))
|
||||
log.Debugf("Endpoint discovery processed with %d resources, version %q and nonce %q, clusters: %v", len(resp.GetResources()), c.Version(edsURL), c.Nonce(edsURL), c.assignments.clusters())
|
||||
// set our local administration and ack the reply. Empty version would signal NACK.
|
||||
c.SetNonce(edsURL, resp.GetNonce())
|
||||
c.SetVersion(edsURL, resp.GetVersionInfo())
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown response URL for discovery: %q", resp.GetTypeUrl())
|
||||
@@ -218,4 +219,9 @@ func (c *Client) Receive(stream adsStream) error {
|
||||
|
||||
// Select returns an address that is deemed to be the correct one for this cluster. The returned
|
||||
// boolean indicates if the cluster exists.
|
||||
func (c *Client) Select(cluster string) (net.IP, bool) { return c.assignments.Select(cluster) }
|
||||
func (c *Client) Select(cluster string) (net.IP, bool) {
|
||||
if cluster == "" {
|
||||
return nil, false
|
||||
}
|
||||
return c.assignments.Select(cluster)
|
||||
}
|
||||
|
||||
@@ -1,17 +1,5 @@
|
||||
package xds
|
||||
|
||||
func (c *Client) Nonce() string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.nonce
|
||||
}
|
||||
|
||||
func (c *Client) SetNonce(n string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.nonce = n
|
||||
}
|
||||
|
||||
func (c *Client) Assignments() *assignment {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
@@ -23,3 +11,27 @@ func (c *Client) SetAssignments(a *assignment) {
|
||||
defer c.mu.Unlock()
|
||||
c.assignments = a
|
||||
}
|
||||
|
||||
func (c *Client) Version(typeURL string) string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.version[typeURL]
|
||||
}
|
||||
|
||||
func (c *Client) SetVersion(typeURL, a string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.version[typeURL] = a
|
||||
}
|
||||
|
||||
func (c *Client) Nonce(typeURL string) string {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.nonce[typeURL]
|
||||
}
|
||||
|
||||
func (c *Client) SetNonce(typeURL, n string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.nonce[typeURL] = n
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user