1. 程式人生 > >二進制安裝kubernetes1.14.1

二進制安裝kubernetes1.14.1

esp reat 隧道 not ntc .cn ssl sna ask

kubernetes安裝
192.168.1.101 k8s-node02
192.168.1.73 k8s-node01
192.168.1.23 k8s-master01
下載:鏈接: https://pan.baidu.com/s/1dN51XMMNw8GbZ246YubVPQ 提取碼: d3ca 

技術分享圖片

1:配置TLS證書

組件:           需要的證書
 etcd           ca.pem server.pem server-key.pem
 kube-apiserver ca.pem server.pem server-key.pem
 kubelet        ca.pem ca-key.pem
 kube-proxy     ca.pem kube-proxy.pem kube-proxy-key.pem
 kubectl        ca.pem admin.pem admin-key.pem
安裝證書生成工具
[[email protected] ~]#    wget  http://pkg.cfssl.org/R1.2/cfssl_linux-amd64
[[email protected] ~]#    wget  http://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
[[email protected] ~]#    wget  http://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
[[email protected] ~]#    chmod +x cfssl*
[[email protected] ~]#    mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
[[email protected] ~]#    mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
[[email protected] ~]#    mv cfssl_linux-amd64 /usr/local/bin/cfssl
[[email protected] ~]#    mkdir /root/ssl
[[email protected] ~]#    cd /root/ssl

生成ca證書

[[email protected] ssl]# cat ca-config.json 
   {
     "signing": {
       "default": {
         "expiry": "87600h"
       },
       "profiles": {
         "kubernetes": {
            "expiry": "87600h",
            "usages": [
               "signing",
               "key encipherment",
               "server auth",
               "client auth"
           ]
         }
       }
     }
   }
[[email protected] ssl]# cat ca-csr.json 
   {
       "CN": "kubernetes",
       "key": {
           "algo": "rsa",
           "size": 2048
       },
       "names": [
           {
               "C": "CN",
               "L": "Zhengzhou",
               "ST": "Zhengzhou",
               "O": "k8s",
               "OU": "System"
           }
       ]
   }

cfssl gencert -initca ca-csr.json | cfssljson -bare ca -

生成server證書

[[email protected] ssl]# cat server-csr.json 
    {
     "CN": "kubernetes",
     "hosts": [
     "127.0.0.1",
     "192.168.1.23",
     "192.168.1.73",
     "192.168.1.101",
     "kubernetes",
     "k8s-node01",
     "k8s-master01",
     "k8s-node02",
     "kubernetes.default",
     "kubernetes.default.svc",
     "kubernetes.default.svc.cluster",
     "kubernetes.default.svc.cluster.local"
     ],
     "key": {
         "algo": "rsa",
         "size": 2048
     },
     "names": [
         {
             "C": "CN",
             "L": "Zhengzhou",
             "ST": "Zhengzhou",
             "O": "k8s",
             "OU": "System"
         }
      ]
     }
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  server-csr.json | cfssljson -bare server

生成admin證書

[[email protected] ssl]# cat admin-csr.json 
{
    "CN": "admin",
    "hosts": [],
    "key": {
        "algo": "rsa",
        "size": 2048
    },
    "names": [
        {
            "C": "CN",
            "L": "Zhengzhou",
            "ST": "Zhengzhou",
            "O": "System:masters",
            "OU": "System"
        }
    ]
}

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  admin-csr.json  | cfssljson -bare admin

生成kube-proxy證書

[[email protected] ssl]# cat kube-proxy-csr.json 
    {
        "CN": "system:kube-proxy",
        "hosts": [],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "Zhengzhou",
                "ST": "Zhengzhou",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }

cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes  kube-proxy-csr.json  | cfssljson -bare kube-proxy
    註意生成的證書要每個服務器同步一下
    [[email protected] flanneld]# scp -r  /root/ssl k8s-node01:/root/
    [[email protected] flanneld]# scp -r  /root/ssl k8s-node02:/root/

部署etcd存儲集群

[[email protected] ~]#wget https://github.com/etcd-io/etcd/releases/download/v3.3.11/etcd-v3.3.11-linux-amd64.tar.gz
[[email protected] ~]#tar xf etcd-v3.3.11-linux-amd64.tar.gz
[[email protected] ~]#mkdir /k8s/etcd/{bin,cfg} -p
[[email protected] ~]#mv etcd-v3.3.11-linux-amd64/etcd* /k8s/etcd/bin
[[email protected] ~]#vim /k8s/etcd/cfg/etcd
#[[email protected] etcd-v3.3.11-linux-amd64]# cat /k8s/etcd/cfg/etcd 
      #[Member]
      ETCD_NAME="etcd01"
      ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
      ETCD_LISTEN_PEER_URLS="https://192.168.1.23:2380"
      ETCD_LISTEN_CLIENT_URLS="https://192.168.1.23:2379"

      #[Clustering]
      ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.23:2380"
      ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.23:2379"
      ETCD_INITIAL_CLUSTER="etcd01=https://192.168.1.23:2380,etcd02=https://192.168.1.73:2380,etcd03=https://192.168.1.101:2380"
      ETCD_INITIAL_CLUSTER_TOKEN="etcd-clusters"
      ETCD_INITIAL_CLUSTER_STATE="new"
[[email protected] etcd-v3.3.11-linux-amd64]# cat /usr/lib/systemd/system/etcd.service
      [Unit]
      Description=Etcd Server
      After=network.target
      After=network-online.target
      Wants=network-online.target

      [Service]
      Type=notify
      EnvironmentFile=/k8s/etcd/cfg/etcd
      ExecStart=/k8s/etcd/bin/etcd       --name=${ETCD_NAME}       --data-dir=${ETCD_DATA_DIR}       --listen-peer-urls=${ETCD_LISTEN_PEER_URLS}       --listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379       --advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS}       --initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS}       --initial-cluster=${ETCD_INITIAL_CLUSTER}       --initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN}       --initial-cluster-state=new       --cert-file=/root/ssl/server.pem       --key-file=/root/ssl/server-key.pem       --peer-cert-file=/root/ssl/server.pem       --peer-key-file=/root/ssl/server-key.pem       --trusted-ca-file=/root/ssl/ca.pem       --peer-trusted-ca-file=/root/ssl/ca.pem
      Restart=on-failure
      LimitNOFILE=65536

      [Install]
      WantedBy=multi-user.target
[[email protected] etcd-v3.3.11-linux-amd64]# systemctl  daemon-reload
[[email protected] etcd-v3.3.11-linux-amd64]# systemctl  restart etcd
    復制到從節點
[[email protected] ~]# scp /usr/lib/systemd/system/etcd.service k8s-node01:/usr/lib/systemd/system/etcd.service
[[email protected] ~]# scp /usr/lib/systemd/system/etcd.service k8s-node02:/usr/lib/systemd/system/etcd.service
[[email protected] ~]# scp -r etcd k8s-node01:/k8s/
[[email protected] ~]# scp -r etcd k8s-node02:/k8s/
註意修改:
[[email protected] k8s]# cat /k8s/etcd/cfg/etcd 
     #[Member]
     ETCD_NAME="etcd01" #對應的服務器 修改為下列:   ETCD_INITIAL_CLUSTER裏面的etcd0#
     ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
     ETCD_LISTEN_PEER_URLS="https://192.168.1.23:2380"  #修改為對應服務器的ip
     ETCD_LISTEN_CLIENT_URLS="https://192.168.1.23:2379" #修改為對應服務器的ip

     #[Clustering]
     ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.1.23:2380" #修改為對應服務器的ip
     ETCD_ADVERTISE_CLIENT_URLS="https://192.168.1.23:2379" #修改為對應服務器的ip
     ETCD_INITIAL_CLUSTER="etcd01=https://192.168.1.23:2380,etcd02=https://192.168.1.73:2380,etcd03=https://192.168.1.101:2380"
     ETCD_INITIAL_CLUSTER_TOKEN="etcd-clusters"
     ETCD_INITIAL_CLUSTER_STATE="new"
三臺分別執行:systemctl  daemon-reload&&systemctl  enable etcd&& systemctl  restart etcd&&ps -ef|grep etcd 
檢查集群健康狀態
[[email protected] ~]# etcdctl --ca-file=/root/ssl/ca.pem  --cert-file=/root/ssl/server.pem  --key-file=/root/ssl/server-key.pem --endpoints="  https://192.168.1.23:2379,https://192.168.1.73:2379,https://192.168.1.101:2379" cluster-health

技術分享圖片

部署flannel網路

是Overkay網絡的一種,也是將源數據包封裝在另一種網絡裏面進行路由轉發和通信,目前已經支持UDP,CXLAN,AWS VPC和GCE路由等數據轉發方式。
多主機容器網絡通信其他主流方案:隧道(Weave,openSwitch),路由方案(calico)等
[[email protected] ~]# wget  https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
[[email protected] ~]# tar xf flannel-v0.11.0-linux-amd64.tar.gz
[[email protected] ~]# mkdir /k8s/flanneld/{bin,cfg}
[[email protected] ~]# cd  flannel-v0.11.0-linux-amd64
[[email protected] ~]# mv flanneld  mk-docker-opts.sh /k8s/flanneld/bin
[[email protected] ~]#  cat /etc/profile
   export PATH=/k8s/etcd/bin:/k8s/flanneld/bin:$PATH

向 etcd 寫入集群 Pod 網段信息

[[email protected] ~]# etcdctl --ca-file=/root/ssl/ca.pem  --cert-file=/root/ssl/server.pem  --key-file=/root/ssl/server-key.pem --endpoints="https://192.168.1.23:2379,https://192.168.1.73:2379,https://192.168.1.101:2379"  set /coreos.com/network/config  ‘{ "Network": "172.17.0.0/16", "Backend": {"Type": "vxlan"}}‘

設置flanneld配置文件和啟動管理文件

[[email protected] flanneld]# vim /k8s/flanneld/cfg/flanneld 

  FLANNEL_OPTIONS="--etcd-endpoints=https://192.168.1.23:2379,https://192.168.1.73:2379,https://192.168.1.101:2379  -etcd-cafile=/root/ssl/ca.pem -etcd-certfile=/root/ssl/server.pem -etcd-keyfile=/root/ssl/server-key.pem"   
[[email protected] flanneld]# vim /usr/lib/systemd/system/flanneld.service 
   [Unit]
   Description=Flanneld overlay address etcd agent
   After=network-online.target network.target
   Before=docker.service

   [Service]
   Type=notify
   EnvironmentFile=/k8s/flanneld/cfg/flanneld
   ExecStart=/k8s/flanneld/bin/flanneld --ip-masq $FLANNEL_OPTIONS
   ExecStartPost=/k8s/flanneld/bin/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/subnet.env
   Restart=on-failure

   [Install]
   WantedBy=multi-user.target
[[email protected] flanneld]#systemctl  daemon-reload
[[email protected] flanneld]#systemctl  enable flanneld
[[email protected] flanneld]#systemctl  start  flanneld
檢查啟動:ifconfig查看flanneld網口
   flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
    inet 172.17.39.0  netmask 255.255.255.255  broadcast 0.0.0.0
    inet6 fe80::ec16:16ff:fe4b:cd1  prefixlen 64  scopeid 0x20<link>
    ether ee:16:16:4b:0c:d1  txqueuelen 0  (Ethernet)
    RX packets 0  bytes 0 (0.0 B)
    RX errors 0  dropped 0  overruns 0  frame 0
    TX packets 0  bytes 0 (0.0 B)
    TX errors 0  dropped 12 overruns 0  carrier 0  collisions 0
查看生成子網的接口
  [[email protected] flanneld]# vim /run/flannel/subnet.env 
     DOCKER_OPT_BIP="--bip=172.17.39.1/24"
     DOCKER_OPT_IPMASQ="--ip-masq=false"
     DOCKER_OPT_MTU="--mtu=1450"
     DOCKER_NETWORK_OPTIONS=" --bip=172.17.39.1/24 --ip-masq=false --mtu=1450"

配置Docker啟動指定flanneld子網段

[[email protected] flanneld]# mv /usr/lib/systemd/system/docker.service /usr/lib/systemd/system/docker.service_back
[[email protected] flanneld]# cat /usr/lib/systemd/system/docker.service
    [Unit]
    Description=Docker Application Container Engine
    Documentation=https://docs.docker.com
    After=network-online.target firewalld.service
    Wants=network-online.target

    [Service]
    Type=notify
    EnvironmentFile=/run/flannel/subnet.env
    ExecStart=/usr/bin/dockerd $DOCKER_NETWORK_OPTIONS
    ExecReload=/bin/kill -s HUP $MAINPID
    LimitNOFILE=infinity
    LimitNPROC=infinity
    LimitCORE=infinity
    TimeoutStartSec=0
    Delegate=yes
    KillMode=process
    Restart=on-failure
    StartLimitBurst=3
    StartLimitInterval=60s

    [Install]
    WantedBy=multi-user.target
[[email protected] flanneld]# systemctl  daemon-reload
[[email protected] flanneld]# systemctl  restart docker
然後ifconfig查看docker是否從flanneld得到ip地址
[[email protected] flanneld]# ifconfig
   docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
    inet 172.17.39.1  netmask 255.255.255.0  broadcast 172.17.39.255
    ether 02:42:f0:f7:a0:74  txqueuelen 0  (Ethernet)
    RX packets 0  bytes 0 (0.0 B)
    RX errors 0  dropped 0  overruns 0  frame 0
    TX packets 0  bytes 0 (0.0 B)
    TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

   flannel.1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1450
    inet 172.17.39.0  netmask 255.255.255.255  broadcast 0.0.0.0
    inet6 fe80::ec16:16ff:fe4b:cd1  prefixlen 64  scopeid 0x20<link>
    ether ee:16:16:4b:0c:d1  txqueuelen 0  (Ethernet)
    RX packets 0  bytes 0 (0.0 B)
    RX errors 0  dropped 0  overruns 0  frame 0
    TX packets 0  bytes 0 (0.0 B)
    TX errors 0  dropped 13 overruns 0  carrier 0  collisions 0
    復制配置到從節點
    [[email protected] ~]# cd /k8s/
    [[email protected] k8s]# scp -r flanneld k8s-node01:/k8s/
    [[email protected] k8s]# scp -r flanneld k8s-node02:/k8s/
    [[email protected] k8s]# scp /usr/lib/systemd/system/docker.service k8s-node01:/usr/lib/systemd/system/docker.service
    [[email protected] k8s]# scp /usr/lib/systemd/system/docker.service k8s-node02:/usr/lib/systemd/system/docker.service
    [[email protected] k8s]# scp /usr/lib/systemd/system/flanneld.service  k8s-node01:/usr/lib/systemd/system/flanneld.service
    [[email protected] k8s]# scp /usr/lib/systemd/system/flanneld.service  k8s-node02:/usr/lib/systemd/system/flanneld.service
    node01執行
  [[email protected] cfg]# systemctl daemon-reload
  [[email protected] cfg]# systemctl enable docker
  [[email protected] cfg]# systemctl enable flanneld
     Created symlink from /etc/systemd/system/multi-user.target.wants/flanneld.service to /usr/lib/systemd/system/flanneld.service.
  [[email protected] cfg]# systemctl start  flanneld
  [[email protected] cfg]# systemctl start  docker    
    node02執行
  [[email protected] flanneld]# systemctl daemon-reload
  [[email protected] flanneld]# systemctl enable docker
  [[email protected] flanneld]# systemctl enable flanneld
  [[email protected] flanneld]# systemctl restart  flanneld
  [[email protected] flanneld]# systemctl restart  docker
        這樣:不同的服務器flanneld會生成不同的IP地址,docker會根據flanneld生成的網絡接口生成*.1的ip地址

技術分享圖片
技術分享圖片
技術分享圖片
#檢查網絡是否互通
ping 對應docker的ip地址即可

技術分享圖片
#查看etcd註冊的ip地址
[[email protected] k8s]# etcdctl --ca-file=/root/ssl/ca.pem --cert-file=/root/ssl/server.pem --key-file=/root/ssl/server-key.pem --endpoints="https://192.168.1.23:2379,https://192.168.1.73:2379,https://192.168.1.101:2379" ls /coreos.com/network/subnets
/coreos.com/network/subnets/172.17.89.0-24
/coreos.com/network/subnets/172.17.44.0-24
/coreos.com/network/subnets/172.17.39.0-24

    [[email protected] k8s]# etcdctl --ca-file=/root/ssl/ca.pem  --cert-file=/root/ssl/server.pem  --key-file=/root/ssl/server-key.pem --endpoints="https://192.168.1.23:2379,https://192.168.1.73:2379,https://192.168.1.101:2379" get /coreos.com/network/subnets/172.17.39.0-24
         {"PublicIP":"192.168.1.23","BackendType":"vxlan","BackendData":{"VtepMAC":"ee:16:16:4b:0c:d1"}}
    PublicIP: 節點ip地址
    BackendType: 類型
    VtepMAC: 虛擬的mac
查看下路由表:

技術分享圖片

### master上創建node節點的kubeconfig文件

創建 TLS Bootstrapping Token生成token.csv文件

    head -c 16 /dev/urandom |od  -An -t x |tr -d ‘ ‘ > /k8s/kubenerets/token.csv
 [[email protected] kubenerets]# cat toker.csv 
   454b513c7148ab3a0d2579e8f0c4e884,kubelet-bootstrap,10001,"system:kubelet-bootstrap"

創建apiserver配置文件

 [[email protected] kubenerets]# export KUBE_APISERVER="https://192.168.1.23:6443"
創建kubelet bootstrapping kubeconfig
 BOOTSTRAP_TOKEN=454b513c7148ab3a0d2579e8f0c4e884
 KUBE_APISERVER="https://192.168.1.23:6443"
設置集群參數
 kubectl config set-cluster kubernetes    --certificate-authority=/root/ssl/ca.pem   --embed-certs=true    --server=${KUBE_APISERVER}    --kubeconfig=bootstrap.kubeconfig
設置客戶端認證參數
 kubectl config set-credentials kubelet-bootstrap    --token=${BOOTSTRAP_TOKEN}    --kubeconfig=bootstrap.kubeconfig
設置上下文參數
 kubectl config set-context default    --cluster=kubernetes    --user=kubelet-bootstrap    --kubeconfig=bootstrap.kubeconfig
設置默認上下文
 kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

創建kube-proxy kubeconfig文件

 kubectl config set-cluster kubernetes    --certificate-authority=/root/ssl/ca.pem    --embed-certs=true    --server=${KUBE_APISERVER}    --kubeconfig=kube-proxy.kubeconfig

 kubectl config set-credentials kube-proxy    --client-certificate=/root/ssl/kube-proxy.pem    --client-key=/root/ssl/kube-proxy-key.pem    --embed-certs=true    --kubeconfig=kube-proxy.kubeconfig

 kubectl config set-context default    --cluster=kubernetes    --user=kube-proxy    --kubeconfig=kube-proxy.kubeconfig

 kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig

部署 apiserver kube-scheduler kube-controller-manager

創建apiserver配置文件

     [[email protected] cfg]# cat /k8s/kubenerets/cfg/kube-apisever 
    KUBE_APISERVER_OPTS="--logtostderr=true     --v=4     --etcd-servers=https://192.168.1.23:2379,https://192.168.1.73:2379,https://192.168.1.101:2379     --insecure-bind-address=0.0.0.0     --insecure-port=8080     --bind-address=192.168.1.23     --secure-port=6443     --advertise-address=192.168.1.23     --allow-privileged=true     --service-cluster-ip-range=10.10.10.0/24     --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction     --authorization-mode=RBAC,Node     --enable-bootstrap-token-auth     --token-auth-file=/k8s/kubenerets/cfg/toker.csv     --service-node-port-range=30000-50000     --tls-cert-file=/root/ssl/server.pem      --kubelet-https=true     --tls-private-key-file=/root/ssl/server-key.pem     --client-ca-file=/root/ssl/ca.pem     --service-account-key-file=/root/ssl/ca-key.pem     --etcd-cafile=/root/ssl/ca.pem     --etcd-certfile=/root/ssl/server.pem     --etcd-keyfile=/root/ssl/server-key.pem"

kube-apiserver啟動腳本

    [[email protected] cfg]# cat /usr/lib/systemd/system/kube-apiserver.service 
     [Unit]
     Description=Kubernetes API Server
     Documentation=https://github.com/kubernetes/kubernetes

     [Service]
     EnvironmentFile=-/k8s/kubenerets/cfg/kube-apisever
     ExecStart=/k8s/kubenerets/bin/kube-apiserver $KUBE_APISERVER_OPTS
     Restart=on-failure

     [Install]
     WantedBy=multi-user.target

scheduler 部署

 [[email protected] cfg]# cat kube-scheduler 
  KUBE_SCHEDULER_OPTS="--logtostderr=true --v=4 --master=127.0.0.1:8080 --leader-elect"

啟動腳本

[[email protected] cfg]# cat /usr/lib/systemd/system/kube-scheduler.service 
   [Unit]
   Description=Kubernetes Scheduler
   Documentation=https://github.com/kubernetes/kubernetes

   [Service]
   EnvironmentFile=-/k8s/kubenerets/cfg/kube-scheduler
   ExecStart=/k8s/kubenerets/bin/kube-scheduler  $KUBE_SCHEDULER_OPTS
   Restart=on-failure

   [Install]
   WantedBy=multi-user.target 

kube-controller-manager 部署

[[email protected] cfg]# cat kube-controller-manager 
 KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true  --v=4  --master=127.0.0.1:8080  --leader-elect=true  --address=127.0.0.1  --service-cluster-ip-range=10.10.10.0/24  --cluster-name=kubernetes  --cluster-signing-cert-file=/root/ssl/ca.pem  --cluster-signing-key-file=/root/ssl/ca-key.pem   --root-ca-file=/root/ssl/ca.pem  --service-account-private-key-file=/root/ssl/ca-key.pem

啟動腳本

[[email protected] cfg]# cat /usr/lib/systemd/system/kube-controller-manager.service 
  [Unit]
  Description=Kubernetes Controller Manager
  Documentation=https://github.com/kubernetes/kubernetes

  [Service]
  EnvironmentFile=-/k8s/kubenerets/cfg/kube-controller-manager
  ExecStart=/k8s/kubenerets/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
  Restart=on-failure

  [Install]
  WantedBy=multi-user.target
systemctl daemon-reload
systemctl enable kube-apiserver
systemctl enable kube-controller-manager
systemctl enable kube-scheduler
systemctl restart kube-apiserver
systemctl restart kube-controller-manager
systemctl restart  kube-scheduler

#查看master集群狀態
[[email protected] cfg]# kubectl get cs,nodes
技術分享圖片

復制 文件到從節點

復制證書文件到node節點

 [[email protected]aster01 cfg]# scp -r /root/ssl k8s-node01:/root/
     [[email protected] cfg]# scp -r /root/ssl k8s-node02:/root/

復制bootstrap.kubeconfig kube-proxy.kubeconfig

[[email protected] kubenerets]# scp *.kubeconfig k8s-node01:/k8s/kubenerets/
    bootstrap.kubeconfig        100% 2182     4.1MB/s   00:00    
    kube-proxy.kubeconfig       100% 6300    12.2MB/s   00:00    
[[email protected] kubenerets]# scp *.kubeconfig k8s-node02:/k8s/kubenerets/
    bootstrap.kubeconfig        100% 2182     4.1MB/s   00:00    
    kube-proxy.kubeconfig       100% 6300    12.2MB/s   00:00   

我這裏直接把可執行命令都發送到測試環境

 [[email protected] bin]# scp ./* k8s-node01:/k8s/kubenerets/bin/ && scp ./* k8s-node02:/k8s/kubenerets/bin/
  apiextensions-apiserver   100%   41MB  70.0MB/s   00:00    
  cloud-controller-manager  100%   96MB  95.7MB/s   00:01    
  hyperkube                 100%  201MB  67.1MB/s   00:03    
  kubeadm                   100%   38MB  55.9MB/s   00:00    
  kube-apiserver            100%  160MB  79.9MB/s   00:02    
  kube-controller-manager   100%  110MB  69.4MB/s   00:01    
  kubectl                   100%   41MB  80.6MB/s   00:00    
  kubelet                   100%  122MB 122.0MB/s   00:01    
  kube-proxy                100%   35MB  66.0MB/s   00:00    
  kube-scheduler            100%   37MB  78.5MB/s   00:00    
  mounter                   100% 1610KB  17.9MB/s   00:00   

部署node節點組件

kubernetes work 節點運行如下組件:
docker 前面已經部署
kubelet
kube-proxy  

部署 kubelet 組件

     kublet 運行在每個 worker 節點上,接收 kube-apiserver 發送的請求,管理 Pod 容器,執行交互式命令,如exec、run、logs 等;
kublet 啟動時自動向 kube-apiserver 註冊節點信息,內置的 cadvisor 統計和監控節點的資源使用情況;
為確保安全,本文檔只開啟接收 https 請求的安全端口,對請求進行認證和授權,拒絕未授權的訪問(如apiserver、heapster)。

部署kubelet

[[email protected] cfg]# cat /k8s/kubenerets/cfg/kubelet 
       KUBELET_OPTS="--logtostderr=true        --v=4        --address=192.168.1.73        --hostname-override=192.168.1.73        --kubeconfig=/k8s/kubenerets/cfg/kubelet.kubeconfig \  #自己生成 不需要創建
       --experimental-bootstrap-kubeconfig=/k8s/kubenerets/bootstrap.kubeconfig        --cert-dir=/root/ssl        --allow-privileged=true        --cluster-dns=10.10.10.2        --cluster-domain=cluster.local        --fail-swap-on=false        --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
#kubelet啟動腳本 
   [[email protected] cfg]# cat /usr/lib/systemd/system/kubelet.service
        [Unit]
        Description=Kubernetes Kubelet
        After=docker.service
        Requires=docker.service

        [Service]
        EnvironmentFile=/k8s/kubenerets/cfg/kubelet
        ExecStart=/k8s/kubenerets/bin/kubelet $KUBELET_OPTS
        Restart=on-failure
        KillMode=process

        [Install]
        WantedBy=multi-user.target

部署kube-proxy

  kube-proxy 運行在所有 node節點上,它監聽 apiserver 中 service 和 Endpoint 的變化情況,創建路由規則來進行服務負載均衡。    

創建 kube-proxy 配置文件

 [[email protected] cfg]# vim /k8s/kubenerets/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true     --v=4     --hostname-override=192.168.1.73     --kubeconfig=/k8s/kubenerets/kube-proxy.kubeconfig"

bindAddress: 監聽地址;
clientConnection.kubeconfig: 連接 apiserver 的 kubeconfig 文件;
clusterCIDR: kube-proxy 根據 –cluster-cidr 判斷集群內部和外部流量,指定 –cluster-cidr 或 –masquerade-all 選項後 kube-proxy 才會對訪問 Service IP 的請求做 SNAT;
hostnameOverride: 參數值必須與 kubelet 的值一致,否則 kube-proxy 啟動後會找不到該 Node,從而不會創建任何 ipvs 規則;
mode: 使用 ipvs 模式;

創建kube-proxy systemd unit 文件

[[email protected] cfg]# cat /usr/lib/systemd/system/kube-proxy.service 
    [Unit]
    Description=Kubernetes Proxy
    After=network.target

    [Service]
    EnvironmentFile=-/k8s/kubenerets/cfg/kube-proxy
    ExecStart=/k8s/kubenerets/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure

    [Install]
    WantedBy=multi-user.target 
      systemctl daemon-reload
    systemctl enable kubelet
    systemctl start  kubelet
    systemctl enable kube-proxy
        systemctl start  kube-prox  

在master創建用戶角色並綁定權限

   kubectl  create  clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

master節點查看csr

 [[email protected] cfg]# kubectl  get csr
      NAME                                                   AGE   REQUESTOR           CONDITION
      node-csr-YCL1SJyx3q0tSDCQuFLe4DmMdxUZgLA3-2EmDCOKiD4   19m   kubelet-bootstrap   Pending

master節點授權允許node節點皆在csr

 kubectl  certificate approve node-csr-YCL1SJyx3q0tSDCQuFLe4DmMdxUZgLA3-2EmDCOKiD4

再次查看src發現CONDITION 變更為:Approved,Issued

master查看node加載進度

[[email protected] cfg]# kubectl  get nodes
   NAME           STATUS   ROLES    AGE   VERSION
   192.168.1.73   Ready    <none>   48s   v1.14.1

這時候node01節點應該自動生成了kubelet的證書

[[email protected] cfg]# ls /root/ssl/kubelet*
     /root/ssl/kubelet-client-2019-05-14-11-29-40.pem  /root/ssl/kubelet-client-current.pem  /root/ssl/kubelet.crt  /root/ssl/kubelet.key

其他從節點加入集群方式同上

[[email protected] kubenerets]# scp /usr/lib/systemd/system/kube*  k8s-node02:/usr/lib/systemd/system/
[[email protected] cfg]# cd /k8s/kubenerets/cfg
 [[email protected] cfg]# scp kubelet kube-proxy  k8s-node02:/k8s/kubenerets/cfg/

修改kubelet和kube-proxy

[[email protected] cfg]# cat kubelet 
KUBELET_OPTS="--logtostderr=true --v=4 --address=192.168.1.101 --hostname-override=192.168.1.101 --kubeconfig=/k8s/kubenerets/cfg/kubelet.kubeconfig --experimental-bootstrap-kubeconfig=/k8s/kubenerets/bootstrap.kubeconfig --cert-dir=/root/ssl --allow-privileged=true --cluster-dns=10.10.10.2 --cluster-domain=cluster.local --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
[[email protected] cfg]# cat  kube-proxy  
KUBE_PROXY_OPTS="--logtostderr=true --v=4 --hostname-override=192.168.1.101 --kubeconfig=/k8s/kubenerets/kube-proxy.kubeconfig"    

啟動

systemctl  daemon-reload
systemctl  enable kubelet
systemctl  start  kubelet
systemctl  enable kube-proxy
systemctl  start  kube-proxy

master節點加載crs

[[email protected] cfg]# kubectl  get csr
[[email protected] cfg]# kubectl  certificate approve node-csr-gHgQ5AYjpn6nFUMVEEYvIfyNqUK2ctmpA14YMecQtHY    

二進制安裝kubernetes1.14.1