1. 程式人生 > >手動搭建Kubernetes1.8高可用叢集(7)dnsmasq

手動搭建Kubernetes1.8高可用叢集(7)dnsmasq

接著上一篇

一、準備

andyshinn/dnsmasq:2.72
gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1

2、建立資料夾

/etc/dnsmasq.d
/etc/dnsmasq.d-available

二、配置檔案

1、/etc/dnsmasq.d-available/01-kube-dns.conf

      /etc/dnsmasq.d/01-kube-dns.conf          兩個檔案內容一樣連結一下

#Listen on localhost
bind-interfaces
listen-address=0.0.0.0

addn-hosts=/etc/hosts

strict-order
# Forward k8s domain to kube-dns
server=/cluster.local/10.233.0.3
# Reply NXDOMAIN to bogus domains requests like com.cluster.local.cluster.local
local=/cluster.local.default.svc.cluster.local./default.svc.cluster.local.default.svc.cluster.local./com.default.svc.cluster.local./cluster.local.svc.cluster.local./svc.cluster.local.svc.cluster.local./com.svc.cluster.local./

#Set upstream dns servers
server=192.168.1.1

no-resolv
bogus-priv
no-negcache
cache-size=1000
dns-forward-max=150
max-cache-ttl=10
max-ttl=20
log-facility=-

2、/etc/kubernetes/dnsmasq-clusterrolebinding.yml

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: dnsmasq
  namespace: "kube-system"
subjects:
  - kind: ServiceAccount
    name: dnsmasq
    namespace: "kube-system"
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io

3、/etc/kubernetes/dnsmasq-serviceaccount.yml

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: dnsmasq
  namespace: "kube-system"
  labels:
    kubernetes.io/cluster-service: "true"

4、/etc/kubernetes/dnsmasq-deploy.yml

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: dnsmasq
  namespace: "kube-system"
  labels:
    k8s-app: dnsmasq
    kubernetes.io/cluster-service: "true"
spec:
  replicas: 1
  selector:
    matchLabels:
      k8s-app: dnsmasq
  strategy:
    type: "Recreate"
  template:
    metadata:
      labels:
        k8s-app: dnsmasq
        kubernetes.io/cluster-service: "true"
        kubespray/dnsmasq-checksum: "37a3d39ad780e599d0ba2405abfb43cd8d6139a3"
    spec:
      tolerations:
        - effect: NoSchedule
          operator: Exists
      containers:
        - name: dnsmasq
          image: "andyshinn/dnsmasq:2.72"
          imagePullPolicy: IfNotPresent
          command:
            - dnsmasq
          args:
            - -k
            - -C
            - /etc/dnsmasq.d/01-kube-dns.conf
          securityContext:
            capabilities:
              add:
                - NET_ADMIN
          resources:
            limits:
              cpu: 100m
              memory: 170Mi
            requests:
              cpu: 40m
              memory: 50Mi
          ports:
            - name: dns
              containerPort: 53
              protocol: UDP
            - name: dns-tcp
              containerPort: 53
              protocol: TCP
          volumeMounts:
            - name: etcdnsmasqd
              mountPath: /etc/dnsmasq.d
            - name: etcdnsmasqdavailable
              mountPath: /etc/dnsmasq.d-available
      volumes:
        - name: etcdnsmasqd
          hostPath:
            path: /etc/dnsmasq.d
        - name: etcdnsmasqdavailable
          hostPath:
            path: /etc/dnsmasq.d-available
      dnsPolicy: Default  # Don't use cluster DNS.

5、/etc/kubernetes/dnsmasq-svc.yml

---
apiVersion: v1
kind: Service
metadata:
  labels:
    kubernetes.io/cluster-service: 'true'
    k8s-app: dnsmasq
  name: dnsmasq
  namespace: kube-system
spec:
  ports:
    - port: 53
      name: dns-tcp
      targetPort: 53
      protocol: TCP
    - port: 53
      name: dns
      targetPort: 53
      protocol: UDP
  type: ClusterIP
  clusterIP: 10.233.0.2
  selector:
    k8s-app: dnsmasq

6、/etc/kubernetes/dnsmasq-autoscaler.yml

---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: dnsmasq-autoscaler
  namespace: kube-system
  labels:
    k8s-app: dnsmasq-autoscaler
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
spec:
  template:
    metadata:
      labels:
        k8s-app: dnsmasq-autoscaler
      annotations:
        scheduler.alpha.kubernetes.io/critical-pod: ''
        scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
    spec:
      serviceAccountName: dnsmasq
      tolerations:
        - effect: NoSchedule
          operator: Exists
      containers:
        - name: autoscaler
          image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
          resources:
            requests:
              cpu: "20m"
              memory: "10Mi"
          command:
            - /cluster-proportional-autoscaler
            - --namespace=kube-system
            - --configmap=dnsmasq-autoscaler
            - --target=Deployment/dnsmasq
            # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
            # If using small nodes, "nodesPerReplica" should dominate.
            - --default-params={"linear":{"nodesPerReplica":10,"preventSinglePointFailure":true}}
            - --logtostderr=true
            - --v=2

三、建立

1、apply

kubectl apply -f /etc/kubernetes/dnsmasq-clusterrolebinding.yml
kubectl apply -f /etc/kubernetes/dnsmasq-serviceaccount.yml

2、create

kubectl create -f /etc/kubernetes/dnsmasq-deploy.yml
kubectl create -f /etc/kubernetes/dnsmasq-svc.yml
kubectl create -f /etc/kubernetes/dnsmasq-autoscaler.yml

四、驗證

1、kubectl get po -o wide -n kube-system

[root@node1 ~]# kubectl get po -o wide -n kube-system
NAME                                  READY     STATUS    RESTARTS   AGE       IP              NODE
calico-node-5d56t                     1/1       Running   0          2h        192.168.1.122   node2
calico-node-t8z9l                     1/1       Running   0          2h        192.168.1.126   node3
calico-node-z7nr5                     1/1       Running   0          2h        192.168.1.121   node1
dnsmasq-775767cfd7-654vs              1/1       Running   0          43m       10.233.75.3     node2
dnsmasq-775767cfd7-m5hfl              1/1       Running   0          38m       10.233.71.6     node3
dnsmasq-autoscaler-856b5c899b-tvzkl   1/1       Running   0          43m       10.233.71.2     node3
kube-apiserver-node1                  1/1       Running   0          5h        192.168.1.121   node1
kube-apiserver-node2                  1/1       Running   0          5h        192.168.1.122   node2
kube-controller-manager-node1         1/1       Running   0          5h        192.168.1.121   node1
kube-controller-manager-node2         1/1       Running   0          5h        192.168.1.122   node2
kube-proxy-node1                      1/1       Running   0          5h        192.168.1.121   node1
kube-proxy-node2                      1/1       Running   0          5h        192.168.1.122   node2
kube-proxy-node3                      1/1       Running   0          5h        192.168.1.126   node3
kube-scheduler-node1                  1/1       Running   0          5h        192.168.1.121   node1
kube-scheduler-node2                  1/1       Running   0          5h        192.168.1.122   node2
nginx-proxy-node3                     1/1       Running   0          5h        192.168.1.126   node3