2016-03-21 21:21:29 +00:00
|
|
|
package proxy
|
|
|
|
|
|
2017-09-14 09:36:06 +01:00
|
|
|
// functions other plugin might want to use to do lookup in the same style as the proxy.
|
2016-03-21 21:21:29 +00:00
|
|
|
|
|
|
|
|
import (
|
2017-09-02 18:43:52 +02:00
|
|
|
"fmt"
|
plugin/proxy: decrease health timeouts (#1107)
Turn down the timeouts and numbers a bit:
FailTimeout 10s -> 5s
Future 60s -> 12s
TryDuration 60s -> 16s
The timeout for decrementing the fails in a host: 10s -> 2s
And the biggest change: don't set fails when the error is Timeout(),
meaning we loop for a bit and may try the same server again, but we
don't mark our upstream as bad, see comments in proxy.go. Testing this
with "ANY isc.org" and "MX miek.nl" we see:
~~~
::1 - [24/Sep/2017:08:06:17 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.001621221s
24/Sep/2017:08:06:17 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:37420->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:17 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 35.957284ms
127.0.0.1 - [24/Sep/2017:08:06:18 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.002051726s
24/Sep/2017:08:06:18 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:54901->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:19 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 56.848416ms
127.0.0.1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 48.118349ms
::1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 1.055172915s
~~~
So the ANY isc.org queries show up twice, because we retry internally -
this is I think WAI.
The `miek.nl MX` queries are just processed normally as no backend is
marked as unreachable.
May fix #1035 #486
2017-09-24 20:05:36 +01:00
|
|
|
"net"
|
2016-03-21 21:21:29 +00:00
|
|
|
"sync/atomic"
|
|
|
|
|
"time"
|
|
|
|
|
|
2017-09-14 09:36:06 +01:00
|
|
|
"github.com/coredns/coredns/plugin/pkg/healthcheck"
|
2017-02-21 22:51:47 -08:00
|
|
|
"github.com/coredns/coredns/request"
|
2016-09-07 11:10:16 +01:00
|
|
|
|
2016-03-21 21:21:29 +00:00
|
|
|
"github.com/miekg/dns"
|
2018-01-30 16:19:37 +02:00
|
|
|
"golang.org/x/net/context"
|
2016-03-21 21:21:29 +00:00
|
|
|
)
|
|
|
|
|
|
2017-01-15 08:12:58 +00:00
|
|
|
// NewLookup create a new proxy with the hosts in host and a Random policy.
|
2017-09-02 18:43:52 +02:00
|
|
|
func NewLookup(hosts []string) Proxy { return NewLookupWithOption(hosts, Options{}) }
|
2017-03-14 21:32:21 +00:00
|
|
|
|
2017-06-14 09:37:10 -07:00
|
|
|
// NewLookupWithOption process creates a simple round robin forward with potentially forced proto for upstream.
|
2017-03-14 21:32:21 +00:00
|
|
|
func NewLookupWithOption(hosts []string, opts Options) Proxy {
|
2017-01-15 08:12:58 +00:00
|
|
|
p := Proxy{Next: nil}
|
2016-03-21 21:21:29 +00:00
|
|
|
|
2017-04-26 10:58:14 +01:00
|
|
|
// TODO(miek): this needs to be unified with upstream.go's NewStaticUpstreams, caddy uses NewHost
|
|
|
|
|
// we should copy/make something similar.
|
2016-03-21 21:21:29 +00:00
|
|
|
upstream := &staticUpstream{
|
2017-08-09 09:21:33 -07:00
|
|
|
from: ".",
|
|
|
|
|
HealthCheck: healthcheck.HealthCheck{
|
plugin/proxy: decrease health timeouts (#1107)
Turn down the timeouts and numbers a bit:
FailTimeout 10s -> 5s
Future 60s -> 12s
TryDuration 60s -> 16s
The timeout for decrementing the fails in a host: 10s -> 2s
And the biggest change: don't set fails when the error is Timeout(),
meaning we loop for a bit and may try the same server again, but we
don't mark our upstream as bad, see comments in proxy.go. Testing this
with "ANY isc.org" and "MX miek.nl" we see:
~~~
::1 - [24/Sep/2017:08:06:17 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.001621221s
24/Sep/2017:08:06:17 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:37420->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:17 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 35.957284ms
127.0.0.1 - [24/Sep/2017:08:06:18 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.002051726s
24/Sep/2017:08:06:18 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:54901->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:19 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 56.848416ms
127.0.0.1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 48.118349ms
::1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 1.055172915s
~~~
So the ANY isc.org queries show up twice, because we retry internally -
this is I think WAI.
The `miek.nl MX` queries are just processed normally as no backend is
marked as unreachable.
May fix #1035 #486
2017-09-24 20:05:36 +01:00
|
|
|
FailTimeout: 5 * time.Second,
|
|
|
|
|
MaxFails: 3,
|
2017-08-09 09:21:33 -07:00
|
|
|
},
|
|
|
|
|
ex: newDNSExWithOption(opts),
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
2017-08-09 09:21:33 -07:00
|
|
|
upstream.Hosts = make([]*healthcheck.UpstreamHost, len(hosts))
|
2016-03-21 21:21:29 +00:00
|
|
|
|
|
|
|
|
for i, host := range hosts {
|
2017-08-09 09:21:33 -07:00
|
|
|
uh := &healthcheck.UpstreamHost{
|
2016-04-30 15:54:41 +01:00
|
|
|
Name: host,
|
|
|
|
|
FailTimeout: upstream.FailTimeout,
|
2017-09-24 19:37:43 +01:00
|
|
|
CheckDown: checkDownFunc(upstream),
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
2017-06-30 10:13:45 +01:00
|
|
|
|
2016-03-21 21:21:29 +00:00
|
|
|
upstream.Hosts[i] = uh
|
|
|
|
|
}
|
2017-02-06 19:32:48 +00:00
|
|
|
p.Upstreams = &[]Upstream{upstream}
|
2016-03-21 21:22:23 +00:00
|
|
|
return p
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
|
|
|
|
|
2016-04-09 16:17:53 +01:00
|
|
|
// Lookup will use name and type to forge a new message and will send that upstream. It will
|
2016-03-25 20:26:42 +00:00
|
|
|
// set any EDNS0 options correctly so that downstream will be able to process the reply.
|
2016-10-30 15:54:16 +00:00
|
|
|
func (p Proxy) Lookup(state request.Request, name string, typ uint16) (*dns.Msg, error) {
|
2016-03-21 21:21:29 +00:00
|
|
|
req := new(dns.Msg)
|
2016-10-30 15:54:16 +00:00
|
|
|
req.SetQuestion(name, typ)
|
2016-04-09 16:17:53 +01:00
|
|
|
state.SizeAndDo(req)
|
2016-09-07 11:10:16 +01:00
|
|
|
|
2017-01-15 08:12:58 +00:00
|
|
|
state2 := request.Request{W: state.W, Req: req}
|
|
|
|
|
|
|
|
|
|
return p.lookup(state2)
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-08 14:46:22 +01:00
|
|
|
// Forward forward the request in state as-is. Unlike Lookup that adds EDNS0 suffix to the message.
|
2016-09-07 11:10:16 +01:00
|
|
|
func (p Proxy) Forward(state request.Request) (*dns.Msg, error) {
|
2017-01-15 08:12:58 +00:00
|
|
|
return p.lookup(state)
|
2016-03-25 20:26:42 +00:00
|
|
|
}
|
|
|
|
|
|
2017-01-15 08:12:58 +00:00
|
|
|
func (p Proxy) lookup(state request.Request) (*dns.Msg, error) {
|
2017-02-07 18:01:16 +00:00
|
|
|
upstream := p.match(state)
|
|
|
|
|
if upstream == nil {
|
|
|
|
|
return nil, errInvalidDomain
|
|
|
|
|
}
|
|
|
|
|
for {
|
2016-03-21 21:21:29 +00:00
|
|
|
start := time.Now()
|
2017-09-02 18:43:52 +02:00
|
|
|
reply := new(dns.Msg)
|
|
|
|
|
var backendErr error
|
2016-03-21 21:21:29 +00:00
|
|
|
|
|
|
|
|
// Since Select() should give us "up" hosts, keep retrying
|
|
|
|
|
// hosts until timeout (or until we get a nil host).
|
2017-08-06 05:54:24 -07:00
|
|
|
for time.Since(start) < tryDuration {
|
2016-03-21 21:21:29 +00:00
|
|
|
host := upstream.Select()
|
|
|
|
|
if host == nil {
|
2017-09-02 18:43:52 +02:00
|
|
|
return nil, fmt.Errorf("%s: %s", errUnreachable, "no upstream host")
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
|
|
|
|
|
2016-10-08 14:46:22 +01:00
|
|
|
// duplicated from proxy.go, but with a twist, we don't write the
|
plugin/proxy: decrease health timeouts (#1107)
Turn down the timeouts and numbers a bit:
FailTimeout 10s -> 5s
Future 60s -> 12s
TryDuration 60s -> 16s
The timeout for decrementing the fails in a host: 10s -> 2s
And the biggest change: don't set fails when the error is Timeout(),
meaning we loop for a bit and may try the same server again, but we
don't mark our upstream as bad, see comments in proxy.go. Testing this
with "ANY isc.org" and "MX miek.nl" we see:
~~~
::1 - [24/Sep/2017:08:06:17 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.001621221s
24/Sep/2017:08:06:17 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:37420->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:17 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 35.957284ms
127.0.0.1 - [24/Sep/2017:08:06:18 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.002051726s
24/Sep/2017:08:06:18 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:54901->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:19 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 56.848416ms
127.0.0.1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 48.118349ms
::1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 1.055172915s
~~~
So the ANY isc.org queries show up twice, because we retry internally -
this is I think WAI.
The `miek.nl MX` queries are just processed normally as no backend is
marked as unreachable.
May fix #1035 #486
2017-09-24 20:05:36 +01:00
|
|
|
// reply back to the client, we return it and there is no monitoring to update here.
|
2016-10-08 14:46:22 +01:00
|
|
|
|
2016-03-21 21:21:29 +00:00
|
|
|
atomic.AddInt64(&host.Conns, 1)
|
2016-10-08 14:46:22 +01:00
|
|
|
|
2017-09-02 18:43:52 +02:00
|
|
|
reply, backendErr = upstream.Exchanger().Exchange(context.TODO(), host.Name, state)
|
2016-10-08 14:46:22 +01:00
|
|
|
|
2016-03-21 21:21:29 +00:00
|
|
|
atomic.AddInt64(&host.Conns, -1)
|
|
|
|
|
|
2016-10-08 14:46:22 +01:00
|
|
|
if backendErr == nil {
|
2016-03-21 21:21:29 +00:00
|
|
|
return reply, nil
|
|
|
|
|
}
|
plugin/proxy: decrease health timeouts (#1107)
Turn down the timeouts and numbers a bit:
FailTimeout 10s -> 5s
Future 60s -> 12s
TryDuration 60s -> 16s
The timeout for decrementing the fails in a host: 10s -> 2s
And the biggest change: don't set fails when the error is Timeout(),
meaning we loop for a bit and may try the same server again, but we
don't mark our upstream as bad, see comments in proxy.go. Testing this
with "ANY isc.org" and "MX miek.nl" we see:
~~~
::1 - [24/Sep/2017:08:06:17 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.001621221s
24/Sep/2017:08:06:17 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:37420->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:17 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 35.957284ms
127.0.0.1 - [24/Sep/2017:08:06:18 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.002051726s
24/Sep/2017:08:06:18 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:54901->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:19 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 56.848416ms
127.0.0.1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 48.118349ms
::1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 1.055172915s
~~~
So the ANY isc.org queries show up twice, because we retry internally -
this is I think WAI.
The `miek.nl MX` queries are just processed normally as no backend is
marked as unreachable.
May fix #1035 #486
2017-09-24 20:05:36 +01:00
|
|
|
|
|
|
|
|
if oe, ok := backendErr.(*net.OpError); ok {
|
|
|
|
|
if oe.Timeout() { // see proxy.go for docs.
|
|
|
|
|
continue
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-21 21:21:29 +00:00
|
|
|
timeout := host.FailTimeout
|
|
|
|
|
if timeout == 0 {
|
2017-10-15 19:38:39 +02:00
|
|
|
timeout = defaultFailTimeout
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
plugin/proxy: decrease health timeouts (#1107)
Turn down the timeouts and numbers a bit:
FailTimeout 10s -> 5s
Future 60s -> 12s
TryDuration 60s -> 16s
The timeout for decrementing the fails in a host: 10s -> 2s
And the biggest change: don't set fails when the error is Timeout(),
meaning we loop for a bit and may try the same server again, but we
don't mark our upstream as bad, see comments in proxy.go. Testing this
with "ANY isc.org" and "MX miek.nl" we see:
~~~
::1 - [24/Sep/2017:08:06:17 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.001621221s
24/Sep/2017:08:06:17 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:37420->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:17 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 35.957284ms
127.0.0.1 - [24/Sep/2017:08:06:18 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.002051726s
24/Sep/2017:08:06:18 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:54901->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:19 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 56.848416ms
127.0.0.1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 48.118349ms
::1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 1.055172915s
~~~
So the ANY isc.org queries show up twice, because we retry internally -
this is I think WAI.
The `miek.nl MX` queries are just processed normally as no backend is
marked as unreachable.
May fix #1035 #486
2017-09-24 20:05:36 +01:00
|
|
|
|
2016-03-21 21:21:29 +00:00
|
|
|
atomic.AddInt32(&host.Fails, 1)
|
2017-10-15 19:38:39 +02:00
|
|
|
fails := atomic.LoadInt32(&host.Fails)
|
plugin/proxy: decrease health timeouts (#1107)
Turn down the timeouts and numbers a bit:
FailTimeout 10s -> 5s
Future 60s -> 12s
TryDuration 60s -> 16s
The timeout for decrementing the fails in a host: 10s -> 2s
And the biggest change: don't set fails when the error is Timeout(),
meaning we loop for a bit and may try the same server again, but we
don't mark our upstream as bad, see comments in proxy.go. Testing this
with "ANY isc.org" and "MX miek.nl" we see:
~~~
::1 - [24/Sep/2017:08:06:17 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.001621221s
24/Sep/2017:08:06:17 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:37420->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:17 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 35.957284ms
127.0.0.1 - [24/Sep/2017:08:06:18 +0100] "ANY IN isc.org. udp 37 false 4096" SERVFAIL qr,rd 37 10.002051726s
24/Sep/2017:08:06:18 +0100 [ERROR 0 isc.org. ANY] unreachable backend: read udp 192.168.1.148:54901->8.8.8.8:53: i/o timeout
::1 - [24/Sep/2017:08:06:19 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 56.848416ms
127.0.0.1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 48.118349ms
::1 - [24/Sep/2017:08:06:21 +0100] "MX IN miek.nl. udp 37 false 4096" NOERROR qr,rd,ra,ad 170 1.055172915s
~~~
So the ANY isc.org queries show up twice, because we retry internally -
this is I think WAI.
The `miek.nl MX` queries are just processed normally as no backend is
marked as unreachable.
May fix #1035 #486
2017-09-24 20:05:36 +01:00
|
|
|
|
2017-08-09 09:21:33 -07:00
|
|
|
go func(host *healthcheck.UpstreamHost, timeout time.Duration) {
|
2016-03-21 21:21:29 +00:00
|
|
|
time.Sleep(timeout)
|
|
|
|
|
atomic.AddInt32(&host.Fails, -1)
|
2017-10-15 19:38:39 +02:00
|
|
|
if fails%failureCheck == 0 { // Kick off healthcheck on eveyry third failure.
|
|
|
|
|
host.HealthCheckURL()
|
|
|
|
|
}
|
2016-03-21 21:21:29 +00:00
|
|
|
}(host, timeout)
|
|
|
|
|
}
|
2017-09-02 18:43:52 +02:00
|
|
|
return nil, fmt.Errorf("%s: %s", errUnreachable, backendErr)
|
2016-03-21 21:21:29 +00:00
|
|
|
}
|
|
|
|
|
}
|