1. 程式人生 > >k8s kubernetes 高可用https and http叢集實戰 HA

k8s kubernetes 高可用https and http叢集實戰 HA

kubernetes高可用叢集佈署

#[email protected] 20170509

 本文分為兩篇

1.http方式仿問kube-apiserver的高可用模式,無證書模式1.5x yum安裝k8s所有元件

2.https方式訪問kube-apiserver的高可用模式,即證書模式基於kubernetes1.6x,二進至安裝kubernetes所有元件,證書模式請看後面.

#1.系統centos7 所有節點是yum 安裝的kubernetes 1.5版本,本方案也實用於二進至版本k8s1.5X,1.6X版本, 物理機叢集等。

#前提是叢集已經正確執行沒有任何問題

#原理參照

 kubadm 和kargo  三個master做高可用所有node上安裝haproxy 負載均衡反代三臺kube-apiserver的8080 api-server為無狀態服務

#注意,之前用過nginx配置反代三臺apiserver出現建立、刪除pod 容器非常慢,3-5分鐘,應該有BUG ,建議用haproxy反代,非常順暢.

#注意所有的機器的SELINUX=disabled  # 關閉

#controller-manager 和scheduler 為有狀態服務,同一時間只有一臺當選,會在三臺master機之間進行選舉,由其中一臺擔任leader的角色

原理圖架構一,  本文件基於架構一


 

架構二


#節點構造如下

cat /etc/hosts

#master

192.168.1.61 master1.txg.com   #512M

192.168.1.62 master2.txg.com   #512M

192.168.1.63 master2.txg.com   #512M

#master軟體包

[[email protected] kubernetes]# rpm -qa|grep kube

kubernetes-client-1.5.2-0.2.gitc55cf2b.el7.x86_64

kubernetes-master-1.5.2-0.2.gitc55cf2b.el7.x86_64

flannel-0.7.0-1.el7.x86_64

#etcd-server

192.168.1.65 etcd1.txg.com   #512M

192.168.1.66 etcd2.txg.com   #512M

192.168.1.67 etcd3.txg.com   #512M

#node節點 

192.168.1.68 node1.txg.com   #4G

192.168.1.69 node2.txg.com   #4G

192.168.2.68 node3.txg.com   #4G

192.168.2.69 node4.txg.com   #4G

#node節點軟體包

[[email protected] ~]# rpm -qa|egrep 'kube|docker'

kubernetes-client-1.5.2-0.5.gita552679.el7.x86_64

docker-common-1.12.6-11.el7.centos.x86_64

docker-1.12.6-11.el7.centos.x86_64

kubernetes-node-1.5.2-0.5.gita552679.el7.x86_64

docker-client-1.12.6-11.el7.centos.x86_64

flannel-0.7.0-1.el7.x86_64

[[email protected] ~]# uname -a

Linux node4.txg.com 3.10.0-514.6.2.el7.x86_64 #1 SMP Thu Feb 23 03:04:39 UTC 2017 x86_64 x86_64 x86_64 GNU/Linux

修改master server上的配置檔案,我的配置檔案在/etc/kubernetes/下面

[[email protected] kubernetes]# pwd

/etc/kubernetes

[[email protected] kubernetes]# ls

apiserver  config  controller-manager  scheduler  ssl  sslbk

1.修改controller-manager和scheduler配置檔案在KUBE_CONTROLLER_MANAGER_ARGS="  " 中間加入 --address=127.0.0.1 --leader-elect=true

KUBE_CONTROLLER_MANAGER_ARGS="  --address=127.0.0.1 --leader-elect=true --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem --root-ca-file=/etc/kubernetes/ssl/ca.pem"

修改scheduler  KUBE_SCHEDULER_ARGS=" --address=127.0.0.1 --leader-elect=true"

讓節點有選舉master leader 功能,ok master配置完成

同步master1上的配置檔案到master2 3 節點

2.所有node節點安裝 haproxy , yum install haproxy 

#配置haproxy.cfg檔案 監聽5002埠,所向代理kube-apiserver:8080

[[email protected] ~]# cat /etc/haproxy/haproxy.cfg 

#---------------------------------------------------------------------

# Example configuration for a possible web application.  See the

# full configuration options online.

#

#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt

#

#---------------------------------------------------------------------

#---------------------------------------------------------------------

# Global settings

#---------------------------------------------------------------------

global

    # to have these messages end up in /var/log/haproxy.log you will

    # need to:

    #

    # 1) configure syslog to accept network log events.  This is done

    #    by adding the '-r' option to the SYSLOGD_OPTIONS in

    #    /etc/sysconfig/syslog

    #

    # 2) configure local2 events to go to the /var/log/haproxy.log

    #   file. A line like the following can be added to

    #   /etc/sysconfig/syslog

    #

    #

    log         127.0.0.1 local3

    #      local2.*                 /var/log/haproxy.log

    chroot      /var/lib/haproxy

    pidfile     /var/run/haproxy.pid

    maxconn     4000

    user        haproxy

    group       haproxy

    daemon

    # turn on stats unix socket

    stats socket /var/lib/haproxy/stats

#---------------------------------------------------------------------

# common defaults that all the 'listen' and 'backend' sections will

# use if not designated in their block

#---------------------------------------------------------------------

defaults

    mode                    http

    log                     global

    option                  httplog

    option                  dontlognull

    option http-server-close

    option forwardfor       except 127.0.0.0/8

    option                  redispatch

    retries                 3

    timeout http-request    10s

    timeout queue           1m

    timeout connect         10s

    timeout client          1m

    timeout server          1m

    timeout http-keep-alive 10s

    timeout check           10s

    maxconn                 3000

#---------------------------------------------------------------------

# main frontend which proxys to the backends

#---------------------------------------------------------------------

frontend  main *:5002

stats uri /haproxy

 #   acl url_static       path_beg       -i /static /images /javascript /stylesheets

 #   acl url_static       path_end       -i .jpg .gif .png .css .js

  #  use_backend static          if url_static

    default_backend             app

#---------------------------------------------------------------------

# static backend for serving up images, stylesheets and such

#---------------------------------------------------------------------

#backend static

#    balance     roundrobin

#    server      static 127.0.0.1:4331 check

#---------------------------------------------------------------------

# round robin balancing between the various backends

#---------------------------------------------------------------------

backend app

    mode http

    balance     roundrobin

    server  app1 192.168.1.61:8080 check

    server  app2 192.168.1.62:8080 check

    server  app3 192.168.1.63:8080 check

#server 部份按照自己apiserver 三臺  配置進來即可

3.配置rsyslog收集haproxy日誌

[[email protected] ~]# echo -e '$ModLoad imudp \n $UDPServerRun 514 \n local3.* /var/log/haproxy.log' >> /etc/rsyslog.conf

4.配置node節點配置

#配置config 檔案 KUBE_MASTER="--master=http://127.0.0.1:5002" 引數指向haproxy的5002埠

[[email protected] kubernetes]# pwd

/etc/kubernetes

[[email protected] kubernetes]# ls

config  kubelet  proxy

[[email protected] kubernetes]# cat config 

###

# kubernetes system config

#

# The following values are used to configure various aspects of all

# kubernetes services, including

#

#   kube-apiserver.service

#   kube-controller-manager.service

#   kube-scheduler.service

#   kubelet.service

#   kube-proxy.service

# logging to stderr means we get it in the systemd journal

KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug

KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers

KUBE_ALLOW_PRIV="--allow-privileged=true"

# How the controller-manager, scheduler, and proxy find the apiserver

KUBE_MASTER="--master=http://127.0.0.1:5002"

#配置kubelet  KUBELET_API_SERVER="--api-servers=http://127.0.0.1:5002"

[[email protected] kubernetes]# cat kubelet 

# kubernetes kubelet (minion) config

# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)

KUBELET_ADDRESS="--address=0.0.0.0"

# The port for the info server to serve on

#KUBELET_PORT="--port=10250"

# You may leave this blank to use the actual hostname

KUBELET_HOSTNAME="--hostname-override=192.168.2.69"

# location of the api-server

KUBELET_API_SERVER="--api-servers=http://127.0.0.1:5002"

# pod infrastructure container

#KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

# Add your own!

KUBELET_ARGS="--cluster_dns=172.1.0.2 --cluster_domain=cluster.local"

#所有node節點照此配置完成

5.#重啟所有node節點上的服務,在這裡我用ansible來處理,ansible請自行腦補,建議大家用ansible來批量處理會快很多

#沒有安裝ansible的,請自行手動重啟

[[email protected] ~]# ansible -m shell -a ' systemctl restart rsyslog.service ;service haproxy restart ;systemctl restart kubelet.service;systemctl restart kube-proxy.service' 'nodes'

node3.txg.com | SUCCESS | rc=0 >>

Redirecting to /bin/systemctl restart  haproxy.service

node4.txg.com | SUCCESS | rc=0 >>

Redirecting to /bin/systemctl restart  haproxy.service

node2.txg.com | SUCCESS | rc=0 >>

Redirecting to /bin/systemctl restart  haproxy.service

node1.txg.com | SUCCESS | rc=0 >>

Redirecting to /bin/systemctl restart  haproxy.service

#檢視所有node haproxy 日誌 200為正常

[[email protected] kubernetes]# tail -f /var/log/haproxy.log 

2017-05-09T11:23:12+08:00 localhost haproxy[18278]: 127.0.0.1:42970 [09/May/2017:11:23:11.992] main app/app1 52/0/0/186/238 200 2507 - - ---- 6/6/5/2/0 0/0 "PUT /api/v1/nodes/192.168.2.69/status HTTP/1.1"

2017-05-09T11:23:22+08:00 localhost haproxy[18278]: 127.0.0.1:42970 [09/May/2017:11:23:12.229] main app/app2 10000/0/1/1/10002 200 2519 - - ---- 6/6/5/1/0 0/0 "GET /api/v1/nodes?fieldSelector=metadata.name%3D192.168.2.69&resourceVersion=0 HTTP/1.1"

2017-05-09T11:23:22+08:00 localhost haproxy[18278]: 127.0.0.1:42970 [09/May/2017:11:23:22.232] main app/app3 60/0/0/123/183 200 2507 - - ---- 6/6/5/2/0 0/0 "PUT /api/v1/nodes/192.168.2.69/status HTTP/1.1"

2017-05-09T11:23:28+08:00 localhost haproxy[18278]: 127.0.0.1:42722 [09/May/2017:11:22:21.385] main app/app1 7384/0/1/0/67387 200 167 - - sD-- 5/5/4/1/0 0/0 "GET /api/v1/watch/pods?fieldSelector=spec.nodeName%3D192.168.2.69&resourceVersion=2348326&timeoutSeconds=424 HTTP/1.1"

2017-05-09T11:23:32+08:00 localhost haproxy[18278]: 127.0.0.1:43096 [09/May/2017:11:23:32.416] main app/app2 0/0/0/1/1 200 2519 - - ---- 6/6/5/1/0 0/0 "GET /api/v1/nodes?fieldSelector=metadata.name%3D192.168.2.69&resourceVersion=0 HTTP/1.1"

2017-05-09T11:23:32+08:00 localhost haproxy[18278]: 127.0.0.1:43096 [09/May/2017:11:23:32.418] main app/app3 53/0/0/92/145 200 2507 - - ---- 6/6/5/2/0 0/0 "PUT /api/v1/nodes/192.168.2.69/status HTTP/1.1"

2017-05-09T11:23:35+08:00 localhost haproxy[18278]: 127.0.0.1:43096 [09/May/2017:11:23:32.564] main app/app1 2459/0/1/1/2461 200 2507 - - ---- 6/6/5/3/0 0/0 "GET /api/v1/namespaces/kube-system/secrets/default-token-p5l8p HTTP/1.1"

2017-05-09T11:23:42+08:00 localhost haproxy[18278]: 127.0.0.1:38410 [09/May/2017:11:14:38.515] main app/app3 0/0/1/1/544002 200 254800 - - ---- 6/6/4/1/0 0/0 "GET /api/v1/watch/endpoints?resourceVersion=2347840&timeoutSeconds=544 HTTP/1.1"

2017-05-09T11:23:42+08:00 localhost haproxy[18278]: 127.0.0.1:43096 [09/May/2017:11:23:35.024] main app/app3 7540/0/0/1/7541 200 2519 - - ---- 6/6/5/1/0 0/0 "GET /api/v1/nodes?fieldSelector=metadata.name%3D192.168.2.69&resourceVersion=0 HTTP/1.1"

2017-05-09T11:23:42+08:00 localhost haproxy[18278]: 127.0.0.1:43096 [09/May/2017:11:23:42.566] main app/app1 51/0/1/111/163 200 2507 - - ---- 6/6/5/2/0 0/0 "PUT /api/v1/nodes/192.168.2.69/status HTTP/1.1"

#重啟所有master節點上的服務

ansible -m shell -a 'systemctl  restart kube-apiserver.service;systemctl restart kube-controller-manager.service ;systemctl restart kube-scheduler.service ' 'masters'

6.檢視leader資訊位於哪個節點

[[email protected] ~]# tail -f /var/log/messages

May  9 11:09:43 master1 kube-scheduler: I0509 11:09:43.354272    4636 leaderelection.go:247] lock is held by master3.txg.com and has not yet expired

May  9 11:09:43 master1 kube-controller-manager: I0509 11:09:43.887592    4532 leaderelection.go:247] lock is held by master2.txg.com and has not yet expired

#這時, kube-scheduler leader位於master3 和kube-controller-manager 在master2

[[email protected] ~]# kubectl -n kube-system get ep kube-controller-manager -o yaml

apiVersion: v1

kind: Endpoints

metadata:

  annotations:

    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master2.txg.com","leaseDurationSeconds":15,"acquireTime":"2017-05-08T10:41:07Z","renewTime":"2017-05-09T03:14:02Z","leaderTransitions":0}'

  creationTimestamp: 2017-05-08T10:41:07Z

  name: kube-controller-manager

  namespace: kube-system

  resourceVersion: "2347791"

  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-controller-manager

  uid: d7dae24f-33da-11e7-9a51-525400c2bc59

subsets: []

[[email protected] ~]# kubectl -n kube-system get ep kube-scheduler -o yaml

apiVersion: v1

kind: Endpoints

metadata:

  annotations:

    control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"master3.txg.com","leaseDurationSeconds":15,"acquireTime":"2017-05-08T10:41:08Z","renewTime":"2017-05-09T03:14:27Z","leaderTransitions":0}'

  creationTimestamp: 2017-05-08T10:41:08Z

  name: kube-scheduler

  namespace: kube-system

  resourceVersion: "2347830"

  selfLink: /api/v1/namespaces/kube-system/endpoints/kube-scheduler

  uid: d87a235a-33da-11e7-9eb5-52540081c06a

subsets: []

6.我們關掉一臺叢集,測試叢集建立yaml看看會怎麼樣

[[email protected] ~]# poweroff

Connection to master2.txg.com closed by remote host.

Connection to master2.txg.com closed.

#可以看到 controller-manager和  scheduler已轉移到master3

[[email protected] ~]# tail -f /var/log/messages

May  9 14:16:26 master1 kube-controller-manager: I0509 14:16:26.550307    4532 leaderelection.go:247] lock is held by master3.txg.com and has not yet expired

May  9 14:16:26 master1 kube-scheduler: I0509 14:16:26.574123    4636 leaderelection.go:247] lock is held by master3.txg.com and has not yet expired

May  9 14:16:27 master1 kube-apiserver: W0509 14:16:27.593709    4322 controller.go:392] Resetting endpoints for master service "kubernetes" to &TypeMeta{Kind:,APIVersion:,}

May  9 14:16:29 master1 kube-controller-manager: I0509 14:16:29.708160    4532 leaderelection.go:247] lock is held by master3.txg.com and has not yet expired

#看看haproxy狀態,進入任意一臺node監控頁面檢視haproxy狀態

#master2已被踢出叢集負載


叢集狀態正常

登入任意一臺UI

可以正常仿問

Kubectl也應該正常

[[email protected] ~]# kubectl get cs

NAME                 STATUS    MESSAGE              ERROR

scheduler            Healthy   ok                   

controller-manager   Healthy   ok                   

etcd-1               Healthy   {"health": "true"}   

etcd-2               Healthy   {"health": "true"}   

etcd-0               Healthy   {"health": "true"}   

[[email protected] ~]#  kubectl get all --all-namespaces

NAMESPACE     NAME                          DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

kube-system   deploy/default-http-backend   1         1         1            1           6d

kube-system   deploy/heapster-v1.2.0.1      1         1         1            1           19d

kube-system   deploy/kube-dns               1         1         1            1           19d

kube-system   deploy/kubernetes-dashboard   1         1         1            1           20d

NAMESPACE   NAME                  DESIRED   CURRENT   READY     AGE

default     rc/nginx-controller   1         1         1         3h

NAMESPACE     NAME                         CLUSTER-IP      EXTERNAL-IP   PORT(S)             AGE

default       svc/kubernetes               172.1.0.1       <none>        443/TCP             20d

default       svc/nginx-service-nodeport   172.1.64.195    <nodes>       80:30097/TCP        2d

kube-system   svc/default-http-backend     172.1.222.13    <none>        80/TCP              6d

kube-system   svc/heapster                 172.1.10.20     <none>        80/TCP              19d

kube-system   svc/kube-dns                 172.1.0.2       <none>        53/UDP,53/TCP       19d

kube-system   svc/kubernetes-dashboard     172.1.118.216   <nodes>       80:31574/TCP        20d

kube-system   svc/monitoring-grafana       172.1.220.49    <none>        80/TCP              19d

kube-system   svc/monitoring-influxdb      172.1.111.159   <none>        8083/TCP,8086/TCP   19d

kube-system   svc/traefik-web-ui           172.1.41.72     <none>        80/TCP              5d

NAMESPACE     NAME                                DESIRED   CURRENT   READY     AGE

kube-system   rs/default-http-backend-673928963   1         1         1         6d

kube-system   rs/heapster-v1.2.0.1-3604517863     1         1         1         19d

kube-system   rs/kube-dns-167683681               1         1         1         19d

kube-system   rs/kubernetes-dashboard-744911765   1         1         1         20d

NAMESPACE     NAME                                      READY     STATUS    RESTARTS   AGE

default       po/busybox                                1/1       Running   2          2h

default       po/nginx-controller-sb24q                 1/1       Running   0          3h

kube-system   po/default-http-backend-673928963-m2txz   1/1       Running   0          15h

kube-system   po/heapster-v1.2.0.1-3604517863-l25wx     1/1       Running   0          15h

kube-system   po/kube-dns-167683681-nzb98               4/4       Running   0          15h

kube-system   po/kubernetes-dashboard-744911765-1l0gv   1/1       Running   1          2d

kube-system   po/traefik-ingress-controller-2wnnz       1/1       Running   1          5d

kube-system   po/traefik-ingress-controller-2xrf8       1/1       Running   1          5d

kube-system   po/traefik-ingress-controller-4xx72       1/1       Running   1          5d

kube-system   po/traefik-ingress-controller-cv0bt       1/1       Running   1          5d

[[email protected] ~]# 

#至此非http方式的配置高可用叢集配置完成

2.OK前面的http模式我們已經佈署完成了,現在佈署https叢集模式

基於k8bernets 1.6x叢集使用https證書來連線apiserver,前提條件是證書下發到所有節點,所有節點工作正常,存在以下檔案

具體證書的製作請參考 宋靜超大神的文章

https://github.com/rootsongjc/kubernetes-handbook/blob/master/practice/create-tls-and-secret-key.md

[[email protected] kubernetes]# cd /etc/kubernetes
[[email protected] kubernetes]# ls
bootstrap.kubeconfig  kubelet.kubeconfig  kube-proxy.kubeconfig  ssl  token.csv
[[email protected] kubernetes]# pwd
/etc/kubernetes
[[email protected] kubernetes]# 

首選我們配置haproxy用tcp穿透https即haproxy可不用配置證書,注意此處非http模式

    mode tcp

cat     /etc/haproxy/haproxy.cfg

#---------------------------------------------------------------------
# Example configuration for a possible web application.  See the
# full configuration options online.
#
#   http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
#
#---------------------------------------------------------------------


#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    #
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    #
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #
    log         127.0.0.1 local3


    #      local2.*                 /var/log/haproxy.log
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon


    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats


#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000


#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
#frontend  main *:6443
#stats uri /haproxy
 #   acl url_static       path_beg       -i /static /images /javascript /stylesheets
 #   acl url_static       path_end       -i .jpg .gif .png .css .js


  #  use_backend static          if url_static
#    default_backend             k8s-apiserver


#---------------------------------------------------------------------
# static backend for serving up images, stylesheets and such
#---------------------------------------------------------------------
#backend static
#    balance     roundrobin
#    server      static 127.0.0.1:4331 check


#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
#backend k8s-apiserver
listen k8s-apiserver
bind 0.0.0.0:6443
    mode tcp   #tcp模式
    balance     roundrobin
    server  master1.txg.com 192.168.1.61:6443 check  #此處輪尋三臺api
    server  master2.txg.com 192.168.1.62:6443 check
    server  master3.txg.com 192.168.1.63:6443 check


#--------------------------------------------------------------------


listen localhost 0.0.0.0:8090
       mode http  
      transparent  
      stats refresh 30s 
       stats uri /haproxy-stats    監控頁面

#ok 完成後

#修改三處證書檔案

cd /etc/kubernetes/

bootstrap.kubeconfig  kubelet.kubeconfig  kube-proxy.kubeconfig 

server: https://192.168.1.63:6443 為製作證書時使用的apiserver的地址

將server: https://192.168.1.63:6443 這裡修改為  https://127.0.0.1:6443

重啟相關服務 systemctl restart haproxy.service  kubelet.service  kube-proxy.service 

下發證書檔案到所有node節點

#去任意一臺master上檢視 叢集狀態

[[email protected] ~]# kubectl get node -o wide
NAME            STATUS    AGE       VERSION   EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION
jenkins-2       Ready     11d       v1.6.4    <none>        CentOS Linux 7 (Core)   4.4.71-1.el7.elrepo.x86_64
node1.txg.com   Ready     11d       v1.6.4    <none>        CentOS Linux 7 (Core)   4.4.71-1.el7.elrepo.x86_64
node2.txg.com   Ready     11d       v1.6.4    <none>        CentOS Linux 7 (Core)   4.4.71-1.el7.elrepo.x86_64
node3.txg.com   Ready     11d       v1.6.4    <none>        CentOS Linux 7 (Core)   4.4.71-1.el7.elrepo.x86_64
node4.txg.com   Ready     11d       v1.6.4    <none>        CentOS Linux 7 (Core)   3.10.0-514.6.2.el7.x86_64

#所有叢集節點應該OK,此時關閉任意一臺apiserver叢集應該能正常工作,haproxy會踢掉無法使用的apiserver節點

#仿問任一 node節點上的haproxy監控介面

http://192.168.2.68:8090/haproxy-stats

應該所有l4 ok

至此https高可用完成