Adding wildcard support (#190)

* Commenting out unused functions. TODO: remove when it is not needed

* Update README with namespace and template example

* Adding note about changing the record name format via a template

* Adding test scripts to automate k8s startup

* Automating k8s namespace creation

* Adding automation to start 4 k8s services

* Updating documentation for k8s tests

* Avoid downloading kubectl if already exists

* Adding debug statement when namespace is not exposed.

* Adding basic kubernetes integration tests

* Makefile now contains a "testk8s" target. This target requires k8s to
  be running.
* Adding test/kubernetes_test.go file with a couple of basic A record
  tests.

* Updating k8s integration tests to only run k8s integration tests

* Adding support for namespace wildcards

* Refactoring to move filtering logic to kubernetes.go file

* go fmt fixes

* Adding wildcard support for namespaces and service names

* Kubernetes integration tests updated for A records.
* Expanded record name assembly for answer section not yet implemented.
* Refactoring to focus k8sclient code just on accessing k8s API.
 Filtering now handled in kubernetes.go

* Adding wildcard test cases

* Adding skydns startup script. (To allow side by side testing of wildcards.)
* Commenting out record name assmebly based on NameTemplate. Need to improve template before this makes sense.

* Adding basic SRV integration tests

* Need to add verification for additional answer section

* Fixing comments and formatting

* Moving wildcard constants to vars

* Travis test execution appears to be failing on access to these
 constants

* Fixing access to util package

* Trying to work around Travis test bug

* Reverting to access kubernetes/util as "util"

Travis breakage is due to "Infoblox-CTO" in src path
This commit is contained in:
Michael Richmond
2016-07-14 14:50:14 -07:00
committed by Miek Gieben
parent 319d30697a
commit 3f4ec783d2
17 changed files with 527 additions and 67 deletions

1
.gitignore vendored
View File

@@ -2,3 +2,4 @@ query.log
Corefile
*.swp
coredns
kubectl

View File

@@ -2,7 +2,7 @@
BUILD_VERBOSE := -v
TEST_VERBOSE :=
#TEST_VERBOSE := -v
TEST_VERBOSE := -v
all:
go build $(BUILD_VERBOSE)
@@ -20,6 +20,11 @@ deps:
test:
go test $(TEST_VERBOSE) ./...
.PHONY: testk8s
testk8s:
# go test $(TEST_VERBOSE) -tags=k8sIntegration ./...
go test $(TEST_VERBOSE) -tags=k8sIntegration -run 'TestK8sIntegration' ./test
.PHONY: clean
clean:
go clean

View File

@@ -88,9 +88,9 @@ func TestSaveAndLoadECCPrivateKey(t *testing.T) {
}
// verify loaded key is correct
if !PrivateKeysSame(privateKey, loadedKey) {
t.Error("Expected key bytes to be the same, but they weren't")
}
if !PrivateKeysSame(privateKey, loadedKey) {
t.Error("Expected key bytes to be the same, but they weren't")
}
}
// PrivateKeysSame compares the bytes of a and b and returns true if they are the same.

View File

@@ -7,6 +7,7 @@ are constructed as "myservice.mynamespace.coredns.local" where:
* "mynamespace" is the k8s namespace for the service, and
* "coredns.local" is the zone configured for `kubernetes`.
The record name format can be changed by specifying a name template in the Corefile.
## Syntax
@@ -36,6 +37,10 @@ This is the default kubernetes setup, with everything specified in full:
kubernetes coredns.local {
# Use url for k8s API endpoint
endpoint http://localhost:8080
# Assemble k8s record names with the template
template {service}.{namespace}.{zone}
# Only expose the k8s namespace "demo"
namespaces demo
}
# cache 160 coredns.local
}
@@ -247,7 +252,7 @@ return the IP addresses for all services with "nginx" in the service name.
TBD:
* How does this relate the the k8s load-balancer configuration?
* Do wildcards search across namespaces?
* Do wildcards search across namespaces? (Yes)
* Initial implementation assumes that a namespace maps to the first DNS label
below the zone managed by the kubernetes middleware. This assumption may
need to be revised.
@@ -344,4 +349,5 @@ TBD:
and A-record queries. Automate testing with cache in place.
* Automate CoreDNS performance tests. Initially for zone files, and for
pre-loaded k8s API cache.
* Automate integration testing with kubernetes.
* Automate integration testing with kubernetes. (k8s launch and service start-up
automation is in middleware/kubernetes/tests)

View File

@@ -10,7 +10,6 @@ import (
)
func (k Kubernetes) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) (int, error) {
fmt.Printf("[debug] here entering ServeDNS: ctx:%v dnsmsg:%v\n", ctx, r)
state := middleware.State{W: w, Req: r}

View File

@@ -117,30 +117,6 @@ func (c *K8sConnector) GetServicesByNamespace() (map[string][]ServiceItem, error
return items, nil
}
// GetServiceItemsInNamespace returns the ServiceItems that match
// servicename in the namespace
func (c *K8sConnector) GetServiceItemsInNamespace(namespace string, servicename string) ([]*ServiceItem, error) {
itemMap, err := c.GetServicesByNamespace()
if err != nil {
fmt.Printf("[ERROR] Getting service list produced error: %v", err)
return nil, err
}
// TODO: Handle case where namespace == nil
var serviceItems []*ServiceItem
for _, x := range itemMap[namespace] {
if x.Metadata.Name == servicename {
serviceItems = append(serviceItems, &x)
}
}
return serviceItems, nil
}
func NewK8sConnector(baseURL string) *K8sConnector {
k := new(K8sConnector)

View File

@@ -62,7 +62,7 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
typeName string
)
fmt.Println("enter Records('", name, "', ", exact, ")")
fmt.Println("[debug] enter Records('", name, "', ", exact, ")")
zone, serviceSegments := g.getZoneForName(name)
/*
@@ -83,6 +83,18 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
serviceName = g.NameTemplate.GetServiceFromSegmentArray(serviceSegments)
typeName = g.NameTemplate.GetTypeFromSegmentArray(serviceSegments)
if namespace == "" {
err := errors.New("Parsing query string did not produce a namespace value. Assuming wildcard namespace.")
fmt.Printf("[WARN] %v\n", err)
namespace = util.WildcardStar
}
if serviceName == "" {
err := errors.New("Parsing query string did not produce a serviceName value. Assuming wildcard serviceName.")
fmt.Printf("[WARN] %v\n", err)
serviceName = util.WildcardStar
}
fmt.Println("[debug] exact: ", exact)
fmt.Println("[debug] zone: ", zone)
fmt.Println("[debug] servicename: ", serviceName)
@@ -90,21 +102,18 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
fmt.Println("[debug] typeName: ", typeName)
fmt.Println("[debug] APIconn: ", g.APIConn)
// TODO: Implement wildcard support to allow blank namespace value
if namespace == "" {
err := errors.New("Parsing query string did not produce a namespace value")
fmt.Printf("[ERROR] %v\n", err)
return nil, err
}
nsWildcard := util.SymbolContainsWildcard(namespace)
serviceWildcard := util.SymbolContainsWildcard(serviceName)
// Abort if the namespace is not published per CoreFile
if g.Namespaces != nil && !util.StringInSlice(namespace, *g.Namespaces) {
// Abort if the namespace does not contain a wildcard, and namespace is not published per CoreFile
// Case where namespace contains a wildcard is handled in Get(...) method.
if (!nsWildcard) && (g.Namespaces != nil && !util.StringInSlice(namespace, *g.Namespaces)) {
fmt.Printf("[debug] Namespace '%v' is not published by Corefile\n", namespace)
return nil, nil
}
k8sItems, err := g.APIConn.GetServiceItemsInNamespace(namespace, serviceName)
k8sItems, err := g.Get(namespace, nsWildcard, serviceName, serviceWildcard)
fmt.Println("[debug] k8s items:", k8sItems)
if err != nil {
fmt.Printf("[ERROR] Got error while looking up ServiceItems. Error is: %v\n", err)
return nil, err
@@ -114,29 +123,27 @@ func (g Kubernetes) Records(name string, exact bool) ([]msg.Service, error) {
return nil, nil
}
// test := g.NameTemplate.GetRecordNameFromNameValues(nametemplate.NameValues{ServiceName: serviceName, TypeName: typeName, Namespace: namespace, Zone: zone})
// fmt.Printf("[debug] got recordname %v\n", test)
records := g.getRecordsForServiceItems(k8sItems, name)
records := g.getRecordsForServiceItems(k8sItems, nametemplate.NameValues{TypeName: typeName, ServiceName: serviceName, Namespace: namespace, Zone: zone})
return records, nil
}
// TODO: assemble name from parts found in k8s data based on name template rather than reusing query string
func (g Kubernetes) getRecordsForServiceItems(serviceItems []*k8sc.ServiceItem, name string) []msg.Service {
func (g Kubernetes) getRecordsForServiceItems(serviceItems []k8sc.ServiceItem, values nametemplate.NameValues) []msg.Service {
var records []msg.Service
for _, item := range serviceItems {
fmt.Println("[debug] clusterIP:", item.Spec.ClusterIP)
clusterIP := item.Spec.ClusterIP
fmt.Println("[debug] clusterIP:", clusterIP)
// Create records by constructing record name from template...
//values.Namespace = item.Metadata.Namespace
//values.ServiceName = item.Metadata.Name
//s := msg.Service{Host: g.NameTemplate.GetRecordNameFromNameValues(values)}
//records = append(records, s)
// Create records for each exposed port...
for _, p := range item.Spec.Ports {
fmt.Println("[debug] port:", p.Port)
}
clusterIP := item.Spec.ClusterIP
s := msg.Service{Host: name}
records = append(records, s)
for _, p := range item.Spec.Ports {
s := msg.Service{Host: clusterIP, Port: p.Port}
records = append(records, s)
}
@@ -146,17 +153,50 @@ func (g Kubernetes) getRecordsForServiceItems(serviceItems []*k8sc.ServiceItem,
return records
}
/*
// Get performs the call to the Kubernetes http API.
func (g Kubernetes) Get(path string, recursive bool) (bool, error) {
func (g Kubernetes) Get(namespace string, nsWildcard bool, servicename string, serviceWildcard bool) ([]k8sc.ServiceItem, error) {
serviceList, err := g.APIConn.GetServiceList()
fmt.Println("[debug] in Get path: ", path)
fmt.Println("[debug] in Get recursive: ", recursive)
if err != nil {
fmt.Printf("[ERROR] Getting service list produced error: %v", err)
return nil, err
}
return false, nil
var resultItems []k8sc.ServiceItem
for _, item := range serviceList.Items {
if symbolMatches(namespace, item.Metadata.Namespace, nsWildcard) && symbolMatches(servicename, item.Metadata.Name, serviceWildcard) {
// If namespace has a wildcard, filter results against Corefile namespace list.
// (Namespaces without a wildcard were filtered before the call to this function.)
if nsWildcard && (g.Namespaces != nil && !util.StringInSlice(item.Metadata.Namespace, *g.Namespaces)) {
fmt.Printf("[debug] Namespace '%v' is not published by Corefile\n", item.Metadata.Namespace)
continue
}
resultItems = append(resultItems, item)
}
}
return resultItems, nil
}
*/
func symbolMatches(queryString string, candidateString string, wildcard bool) bool {
result := false
switch {
case !wildcard:
result = (queryString == candidateString)
case queryString == util.WildcardStar:
result = true
case queryString == util.WildcardAny:
result = true
}
return result
}
// TODO: Remove these unused functions. One is related to Ttl calculation
// Implement Ttl and priority calculation based on service count before
// removing this code.
/*
// splitDNSName separates the name into DNS segments and reverses the segments.
func (g Kubernetes) splitDNSName(name string) []string {
l := dns.SplitDomainName(name)
@@ -166,16 +206,15 @@ func (g Kubernetes) splitDNSName(name string) []string {
return l
}
*/
// skydns/local/skydns/east/staging/web
// skydns/local/skydns/west/production/web
//
// skydns/local/skydns/*/*/web
// skydns/local/skydns/*/web
/*
// loopNodes recursively loops through the nodes and returns all the values. The nodes' keyname
// will be match against any wildcards when star is true.
/*
func (g Kubernetes) loopNodes(ns []*etcdc.Node, nameParts []string, star bool, bx map[msg.Service]bool) (sx []msg.Service, err error) {
if bx == nil {
bx = make(map[msg.Service]bool)

View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Based on instructions at: http://kubernetes.io/docs/getting-started-guides/docker/
#K8S_VERSION=$(curl -sS https://storage.googleapis.com/kubernetes-release/release/latest.txt)
K8S_VERSION="v1.2.4"
ARCH="amd64"
export K8S_VERSION
export ARCH
#RUN_SKYDNS="yes"
RUN_SKYDNS="no"
if [ "${RUN_SKYDNS}" = "yes" ]; then
DNS_ARGUMENTS="--cluster-dns=10.0.0.10 --cluster-domain=cluster.local"
else
DNS_ARGUMENTS=""
fi
docker run -d \
--volume=/:/rootfs:ro \
--volume=/sys:/sys:ro \
--volume=/var/lib/docker/:/var/lib/docker:rw \
--volume=/var/lib/kubelet/:/var/lib/kubelet:rw \
--volume=/var/run:/var/run:rw \
--net=host \
--pid=host \
--privileged \
gcr.io/google_containers/hyperkube-${ARCH}:${K8S_VERSION} \
/hyperkube kubelet \
--containerized \
--hostname-override=127.0.0.1 \
--api-servers=http://localhost:8080 \
--config=/etc/kubernetes/manifests \
${DNS_ARGUMENTS} \
--allow-privileged --v=2

View File

@@ -0,0 +1,18 @@
#!/bin/bash
PWD=`pwd`
BASEDIR=`realpath $(dirname ${0})`
cd ${BASEDIR}
if [ ! -e kubectl ]; then
curl -O http://storage.googleapis.com/kubernetes-release/release/v1.2.4/bin/linux/amd64/kubectl
chmod u+x kubectl
fi
${BASEDIR}/kubectl config set-cluster test-doc --server=http://localhost:8080
${BASEDIR}/kubectl config set-context test-doc --cluster=test-doc
${BASEDIR}/kubectl config use-context test-doc
cd ${PWD}
alias kubctl="${BASEDIR}/kubectl"

View File

@@ -0,0 +1,39 @@
#!/bin/bash
# Running skydns based on instructions at: https://testdatamanagement.wordpress.com/2015/09/01/running-kubernetes-in-docker-with-dns-on-a-single-node/
KUBECTL='./kubectl'
#RUN_SKYDNS="yes"
RUN_SKYDNS="no"
wait_until_k8s_ready() {
# Wait until kubernetes is up and fully responsive
while :
do
${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
if [ "${?}" = "0" ]; then
break
else
echo "sleeping for 5 seconds"
sleep 5
fi
done
echo "kubernetes nodes:"
${KUBECTL} get nodes
}
if [ "${RUN_SKYDNS}" = "yes" ]; then
wait_until_k8s_ready
echo "Launch kube2sky..."
docker run -d --net=host gcr.io/google_containers/kube2sky:1.11 --kube_master_url=http://127.0.0.1:8080 --domain=cluster.local
echo ""
echo "Launch SkyDNS..."
docker run -d --net=host gcr.io/google_containers/skydns:2015-03-11-001 --machines=http://localhost:4001 --addr=0.0.0.0:53 --domain=cluster.local
else
true
fi

View File

@@ -0,0 +1,80 @@
#!/bin/bash
KUBECTL='./kubectl'
wait_until_k8s_ready() {
# Wait until kubernetes is up and fully responsive
while :
do
${KUBECTL} get nodes 2>/dev/null | grep -q '127.0.0.1'
if [ "${?}" = "0" ]; then
break
else
echo "sleeping for 5 seconds"
sleep 5
fi
done
echo "kubernetes nodes:"
${KUBECTL} get nodes
}
create_namespaces() {
for n in ${NAMESPACES};
do
echo "Creating namespace: ${n}"
${KUBECTL} get namespaces --no-headers 2>/dev/null | grep -q ${n}
if [ "${?}" != "0" ]; then
${KUBECTL} create namespace ${n}
fi
done
echo "kubernetes namespaces:"
${KUBECTL} get namespaces
}
# run_and_expose_service <servicename> <namespace> <image> <port>
run_and_expose_service() {
if [ "${#}" != "4" ]; then
return -1
fi
service="${1}"
namespace="${2}"
image="${3}"
port="${4}"
echo " starting service '${service}' in namespace '${namespace}"
${KUBECTL} get deployment --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${service}
if [ "${?}" != "0" ]; then
${KUBECTL} run ${service} --namespace=${namespace} --image=${image}
else
echo "warn: service '${service}' already running in namespace '${namespace}'"
fi
${KUBECTL} get service --namespace=${namespace} --no-headers 2>/dev/null | grep -q ${service}
if [ "${?}" != "0" ]; then
${KUBECTL} expose deployment ${service} --namespace=${namespace} --port=${port}
else
echo "warn: service '${service}' already exposed in namespace '${namespace}'"
fi
}
wait_until_k8s_ready
NAMESPACES="demo test"
create_namespaces
echo ""
echo "Starting services:"
run_and_expose_service mynginx demo nginx 80
run_and_expose_service webserver demo nginx 80
run_and_expose_service mynginx test nginx 80
run_and_expose_service webserver test nginx 80
echo ""
echo "Services exposed:"
${KUBECTL} get services --all-namespaces

View File

@@ -0,0 +1,35 @@
## Test scripts to automate kubernetes startup
Requirements:
docker
curl
The scripts in this directory startup kubernetes with docker as the container runtime.
After starting kubernetes, a couple of kubernetes services are started to allow automatic
testing of CoreDNS with kubernetes.
To use, run the scripts as:
~~~
$ ./00_run_k8s.sh && ./10_setup_kubectl.sh && ./20_setup_k8s_services.sh
~~~
After running the above scripts, kubernetes will be running on the localhost with the following services
exposed:
~~
NAMESPACE NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes 10.0.0.1 <none> 443/TCP 48m
demo mynginx 10.0.0.168 <none> 80/TCP 9m
demo webserver 10.0.0.28 <none> 80/TCP 2m
test mynginx 10.0.0.4 <none> 80/TCP 2m
test webserver 10.0.0.39 <none> 80/TCP 2m
~~
Kubernetes and all running containers can be uncerimoniously stopped by
running the `kill_all_containers.sh` script.
~~~
$ ./kill_all_containers.sh
~~~

View File

@@ -0,0 +1,5 @@
#!/bin/bash
docker rm -f $(docker ps -a -q)
sleep 1
docker rm -f $(docker ps -a -q)

View File

@@ -1,6 +1,10 @@
// Package kubernetes/util provides helper functions for the kubernetes middleware
package util
import (
"strings"
)
// StringInSlice check whether string a is a member of slice.
func StringInSlice(a string, slice []string) bool {
for _, b := range slice {
@@ -10,3 +14,13 @@ func StringInSlice(a string, slice []string) bool {
}
return false
}
// SymbolContainsWildcard checks whether symbol contains a wildcard value
func SymbolContainsWildcard(symbol string) bool {
return (strings.Contains(symbol, WildcardStar) || (symbol == WildcardAny))
}
const (
WildcardStar = "*"
WildcardAny = "any"
)

View File

@@ -31,3 +31,25 @@ func TestStringInSlice(t *testing.T) {
}
}
}
// Test data for TestSymbolContainsWildcard cases.
var testdataSymbolContainsWildcard = []struct {
Symbol string
ExpectedResult bool
}{
{"mynamespace", false},
{"*", true},
{"any", true},
{"my*space", true},
{"*space", true},
{"myname*", true},
}
func TestSymbolContainsWildcard(t *testing.T) {
for _, example := range testdataSymbolContainsWildcard {
actualResult := SymbolContainsWildcard(example.Symbol)
if actualResult != example.ExpectedResult {
t.Errorf("Expected SymbolContainsWildcard result '%v' for example string='%v'. Instead got result '%v'.", example.ExpectedResult, example.Symbol, actualResult)
}
}
}

182
test/kubernetes_test.go Normal file
View File

@@ -0,0 +1,182 @@
// +build k8sIntegration
package test
import (
"fmt"
"io/ioutil"
"log"
"net/http"
"testing"
"github.com/miekg/dns"
)
// Test data for A records
var testdataLookupA = []struct {
Query string
TotalAnswerCount int
ARecordCount int
}{
// Matching queries
{"mynginx.demo.coredns.local.", 1, 1}, // One A record, should exist
// Failure queries
{"mynginx.test.coredns.local.", 0, 0}, // One A record, is not exposed
{"someservicethatdoesnotexist.demo.coredns.local.", 0, 0}, // Record does not exist
// Namespace wildcards
{"mynginx.*.coredns.local.", 1, 1}, // One A record, via wildcard namespace
{"mynginx.any.coredns.local.", 1, 1}, // One A record, via wildcard namespace
{"someservicethatdoesnotexist.*.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
{"someservicethatdoesnotexist.any.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
{"*.demo.coredns.local.", 2, 2}, // Two A records, via wildcard
{"any.demo.coredns.local.", 2, 2}, // Two A records, via wildcard
{"*.test.coredns.local.", 0, 0}, // Two A record, via wildcard that is not exposed
{"any.test.coredns.local.", 0, 0}, // Two A record, via wildcard that is not exposed
{"*.*.coredns.local.", 2, 2}, // Two A records, via namespace and service wildcard
}
// Test data for SRV records
var testdataLookupSRV = []struct {
Query string
TotalAnswerCount int
// ARecordCount int
SRVRecordCount int
}{
// Matching queries
{"mynginx.demo.coredns.local.", 1, 1}, // One SRV record, should exist
// Failure queries
{"mynginx.test.coredns.local.", 0, 0}, // One SRV record, is not exposed
{"someservicethatdoesnotexist.demo.coredns.local.", 0, 0}, // Record does not exist
// Namespace wildcards
{"mynginx.*.coredns.local.", 1, 1}, // One SRV record, via wildcard namespace
{"mynginx.any.coredns.local.", 1, 1}, // One SRV record, via wildcard namespace
{"someservicethatdoesnotexist.*.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
{"someservicethatdoesnotexist.any.coredns.local.", 0, 0}, // Record does not exist with wildcard for namespace
{"*.demo.coredns.local.", 1, 1}, // One SRV record, via wildcard
{"any.demo.coredns.local.", 1, 1}, // One SRV record, via wildcard
{"*.test.coredns.local.", 0, 0}, // One SRV record, via wildcard that is not exposed
{"any.test.coredns.local.", 0, 0}, // One SRV record, via wildcard that is not exposed
{"*.*.coredns.local.", 1, 1}, // One SRV record, via namespace and service wildcard
}
// checkKubernetesRunning performs a basic
func checkKubernetesRunning() bool {
_, err := http.Get("http://localhost:8080/api/v1")
return err == nil
}
func TestK8sIntegration(t *testing.T) {
t.Log(" === RUN testLookupA")
testLookupA(t)
t.Log(" === RUN testLookupSRV")
testLookupSRV(t)
}
func testLookupA(t *testing.T) {
if !checkKubernetesRunning() {
t.Skip("Skipping Kubernetes Integration tests. Kubernetes is not running")
}
// Note: Use different port to avoid conflict with servers used in other tests.
coreFile :=
`.:2053 {
kubernetes coredns.local {
endpoint http://localhost:8080
namespaces demo
}
`
server, _, udp, err := Server(t, coreFile)
if err != nil {
t.Fatal("Could not get server: %s", err)
}
defer server.Stop()
log.SetOutput(ioutil.Discard)
for _, testData := range testdataLookupA {
t.Logf("[log] Testing query string: '%v'\n", testData.Query)
dnsClient := new(dns.Client)
dnsMessage := new(dns.Msg)
dnsMessage.SetQuestion(testData.Query, dns.TypeA)
dnsMessage.SetEdns0(4096, true)
res, _, err := dnsClient.Exchange(dnsMessage, udp)
if err != nil {
t.Fatal("Could not send query: %s", err)
}
// Count A records in the answer section
ARecordCount := 0
for _, a := range res.Answer {
if a.Header().Rrtype == dns.TypeA {
ARecordCount++
}
}
if ARecordCount != testData.ARecordCount {
t.Errorf("Expected '%v' A records in response. Instead got '%v' A records. Test query string: '%v'", testData.ARecordCount, ARecordCount, testData.Query)
}
if len(res.Answer) != testData.TotalAnswerCount {
t.Errorf("Expected '%v' records in answer section. Instead got '%v' records in answer section. Test query string: '%v'", testData.TotalAnswerCount, len(res.Answer), testData.Query)
}
}
}
func testLookupSRV(t *testing.T) {
if !checkKubernetesRunning() {
t.Skip("Skipping Kubernetes Integration tests. Kubernetes is not running")
}
// Note: Use different port to avoid conflict with servers used in other tests.
coreFile :=
`.:2054 {
kubernetes coredns.local {
endpoint http://localhost:8080
namespaces demo
}
`
server, _, udp, err := Server(t, coreFile)
if err != nil {
t.Fatal("Could not get server: %s", err)
}
defer server.Stop()
log.SetOutput(ioutil.Discard)
// TODO: Add checks for A records in additional section
for _, testData := range testdataLookupSRV {
t.Logf("[log] Testing query string: '%v'\n", testData.Query)
dnsClient := new(dns.Client)
dnsMessage := new(dns.Msg)
dnsMessage.SetQuestion(testData.Query, dns.TypeSRV)
dnsMessage.SetEdns0(4096, true)
res, _, err := dnsClient.Exchange(dnsMessage, udp)
if err != nil {
t.Fatal("Could not send query: %s", err)
}
// Count SRV records in the answer section
srvRecordCount := 0
for _, a := range res.Answer {
fmt.Printf("RR: %v\n", a)
if a.Header().Rrtype == dns.TypeSRV {
srvRecordCount++
}
}
if srvRecordCount != testData.SRVRecordCount {
t.Errorf("Expected '%v' SRV records in response. Instead got '%v' SRV records. Test query string: '%v'", testData.SRVRecordCount, srvRecordCount, testData.Query)
}
if len(res.Answer) != testData.TotalAnswerCount {
t.Errorf("Expected '%v' records in answer section. Instead got '%v' records in answer section. Test query string: '%v'", testData.TotalAnswerCount, len(res.Answer), testData.Query)
}
}
}