1. 程式人生 > >kubernetes 1.7.2 + Calico部署

kubernetes 1.7.2 + Calico部署

系統環境

系統

[root@kubernetes-master-1 ~]# cat /etc/redhat-release 
CentOS Linux release 7.4.1708 (Core) 

hosts

[[email protected]1 ~]# cat /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain
6 192.168.1.109 kubernetes-master-1 192.168.1.110 kubernetes-master-2 192.168.1.111 k8s-node-1

建立 驗證

安裝 cfssl

mkdir -p /opt/local/cfssl

cd /opt/local/cfssl

wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
mv cfssl_linux-amd64 cfssl

wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
mv cfssljson_linux-amd64
cfssljson wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 mv cfssl-certinfo_linux-amd64 cfssl-certinfo chmod +x * ls -l 總用量 18808 -rwxr-xr-x 1 root root 10376657 330 2016 cfssl -rwxr-xr-x 1 root root 6595195 330 2016 cfssl-certinfo -rwxr-xr-x 1 root root 2277873 330 2016 cfssljson

建立 CA 證書配置

mkdir /opt/ssl

cd /opt/ssl

/opt/local/cfssl/cfssl print-defaults config > config.json

/opt/local/cfssl/cfssl print-defaults csr > csr.json

[root@k8s-node-1 ssl]# cat config.json csr.json 
{
  "signing": {
    "default": {
      "expiry": "87600h"
    },
    "profiles": {
      "kubernetes": {
        "usages": [
            "signing",
            "key encipherment",
            "server auth",
            "client auth"
        ],
        "expiry": "87600h"
      }
    }
  }
}
{
  "CN": "kubernetes",
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成 CA 證書和私鑰

cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca

[[email protected]-node-1 ssl]# ls -l
總用量 20
-rw-r--r-- 1 root root 1005 4月   7 15:29 ca.csr
-rw------- 1 root root 1675 4月   7 15:29 ca-key.pem
-rw-r--r-- 1 root root 1363 4月   7 15:29 ca.pem
-rw-r--r-- 1 root root  292 4月   7 15:27 config.json
-rw-r--r-- 1 root root  210 4月   7 15:27 csr.json

分發證書

# 建立證書目錄
mkdir -p /etc/kubernetes/ssl

# 拷貝所有檔案到目錄下
cp * /etc/kubernetes/ssl

# 這裡要將檔案拷貝到所有的k8s 機器上,也要建立相應的目錄

scp * [email protected]192.168.1.110:/etc/kubernetes/ssl/

scp * [email protected]192.168.1.111:/etc/kubernetes/ssl/

etcd 叢集

yum install etcd3 -y

建立 etcd 證書

cd /opt/ssl/

[root@kubernetes-master-1 ssl]# vi etcd-csr.json

{
  "CN": "etcd",
  "hosts": [
    "127.0.0.1",
    "192.168.1.109",
    "192.168.1.110",
    "192.168.1.111"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

# 生成 etcd   金鑰

/opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \
  -ca-key=/opt/ssl/ca-key.pem \
  -config=/opt/ssl/config.json \
  -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd
# 檢視生成

[root@k8s-node-1 ssl]# ls -l etcd*
-rw-r--r-- 1 root root 1066 47 15:38 etcd.csr
-rw-r--r-- 1 root root  301 47 15:37 etcd-csr.json
-rw------- 1 root root 1675 47 15:38 etcd-key.pem
-rw-r--r-- 1 root root 1440 47 15:38 etcd.pem



# 拷貝到etcd伺服器

# etcd-1 
cp etcd*.pem /etc/kubernetes/ssl/

# etcd-2
scp etcd* root@192.168.1.110:/etc/kubernetes/ssl/

# etcd-3
scp etcd* root@192.168.1.111:/etc/kubernetes/ssl/



# 如果 etcd 非 root 使用者,讀取證書會提示沒許可權

chmod 644 /etc/kubernetes/ssl/etcd-key.pem

修改 etcd 配置

# etcd-1


vi /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=etcd1 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.1.109:2380 \
  --listen-peer-urls=https://192.168.1.109:2380 \
  --listen-client-urls=https://192.168.1.109:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.1.109:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.1.109:2380,etcd2=https://192.168.1.110:2380,etcd3=https://192.168.1.111:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
# etcd-2


vi /usr/lib/systemd/system/etcd.service


[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=etcd2 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.1.110:2380 \
  --listen-peer-urls=https://192.168.1.110:2380 \
  --listen-client-urls=https://192.168.1.110:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.1.110:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.1.109:2380,etcd2=https://192.168.1.110:2380,etcd3=https://192.168.1.111:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
# etcd-3

vi /usr/lib/systemd/system/etcd.service

[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target

[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
User=etcd
# set GOMAXPROCS to number of processors
ExecStart=/usr/bin/etcd \
  --name=etcd3 \
  --cert-file=/etc/kubernetes/ssl/etcd.pem \
  --key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
  --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
  --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
  --initial-advertise-peer-urls=https://192.168.1.111:2380 \
  --listen-peer-urls=https://192.168.1.111:2380 \
  --listen-client-urls=https://192.168.1.111:2379,http://127.0.0.1:2379 \
  --advertise-client-urls=https://192.168.1.111:2379 \
  --initial-cluster-token=k8s-etcd-cluster \
  --initial-cluster=etcd1=https://192.168.1.109:2380,etcd2=https://192.168.1.110:2380,etcd3=https://192.168.1.111:2380 \
  --initial-cluster-state=new \
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target

啟動 etcd

在每個節點執行

systemctl enable etcd

systemctl start etcd

systemctl status etcd
# 如果報錯 請使用
journalctl -f -t etcd  和 journalctl -u etcd 來定位問題

驗證 etcd 叢集狀態
檢視 etcd 叢集狀態:

etcdctl --endpoints=https://192.168.1.110:2379 \
        --cert-file=/etc/kubernetes/ssl/etcd.pem \
        --ca-file=/etc/kubernetes/ssl/ca.pem \
        --key-file=/etc/kubernetes/ssl/etcd-key.pem \
        cluster-health

member 69fb6a35f1ce3d83 is healthy: got healthy result from https://192.168.1.109:2379
member 95605c07b7eb732b is healthy: got healthy result from https://192.168.1.111:2379
member fdb0d4304dcee33c is healthy: got healthy result from https://192.168.1.110:2379
cluster is healthy

檢視 etcd 叢集成員

etcdctl --endpoints=https://192.168.1.110:2379 \
        --cert-file=/etc/kubernetes/ssl/etcd.pem \
        --ca-file=/etc/kubernetes/ssl/ca.pem \
        --key-file=/etc/kubernetes/ssl/etcd-key.pem \
        member list

69fb6a35f1ce3d83: name=etcd1 peerURLs=https://192.168.1.109:2380 clientURLs=https://192.168.1.109:2379 isLeader=true
95605c07b7eb732b: name=etcd3 peerURLs=https://192.168.1.111:2380 clientURLs=https://192.168.1.111:2379 isLeader=false
fdb0d4304dcee33c: name=etcd2 peerURLs=https://192.168.1.110:2380 clientURLs=https://192.168.1.110:2379 isLeader=false

安裝 docker

# 匯入 yum 源

# 安裝 yum-config-manager

yum -y install yum-utils

# 匯入
yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo


# 更新 repo
yum makecache

# 安裝

yum install docker-ce

新增dockerhub加速器

[root@kubernetes-master-1 ~]# cat /etc/docker/daemon.json 
{"registry-mirrors": ["http://579fe187.m.daocloud.io","https://pee6w651.mirror.aliyuncs.com"]}

啟動docker

systemctl daemon-reload
systemctl start docker
systemctl enable docker

安裝 kubectl 工具

Master 端

# 首先安裝 kubectl

wget https://dl.k8s.io/v1.7.2/kubernetes-client-linux-amd64.tar.gz

tar -xzvf kubernetes-client-linux-amd64.tar.gz

cp kubernetes/client/bin/* /usr/local/bin/

chmod a+x /usr/local/bin/kube*


# 驗證安裝

kubectl version
[root@kubernetes-master-1 ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"7", GitVersion:"v1.7.2", GitCommit:"922a86cfcd65915a9b2f69f3f193b8907d741d9c", GitTreeState:"clean", BuildDate:"2017-07-21T08:23:22Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}

建立 admin 證書

cd /opt/ssl/

vi admin-csr.json
{
  "CN": "admin",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "system:masters",
      "OU": "System"
    }
  ]
}
# 生成 admin 證書和私鑰
cd /opt/ssl/

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=/etc/kubernetes/ssl/config.json \
  -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin


# 檢視生成

[root@k8s-master-1 ssl]# ls admin*
admin.csr  admin-csr.json  admin-key.pem  admin.pem

cp admin*.pem /etc/kubernetes/ssl/

scp admin*.pem root@192.168.1.110:/etc/kubernetes/ssl/
scp admin*.pem root@192.168.1.111:/etc/kubernetes/ssl/

配置 kubectl kubeconfig 檔案

# 配置 kubernetes 叢集

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.1.109:6443


# 配置 客戶端認證

kubectl config set-credentials admin \
  --client-certificate=/etc/kubernetes/ssl/admin.pem \
  --embed-certs=true \
  --client-key=/etc/kubernetes/ssl/admin-key.pem



kubectl config set-context kubernetes \
  --cluster=kubernetes \
  --user=admin


kubectl config use-context kubernetes

分發 kubectl config 檔案

# 將上面配置的 kubeconfig 檔案分發到其他機器

# 其他伺服器建立目錄

mkdir /root/.kube //在其他節點也要建立該目錄

scp /root/.kube/config root@192.168.1.110:/root/.kube/

scp /root/.kube/config root@192.168.1.111:/root/.kube/

部署 kubernetes Master 節點

Master 需要部署 kube-apiserver , kube-scheduler , kube-controller-manager 這三個元件

安裝元件

# 從github 上下載版本

cd /tmp

wget https://dl.k8s.io/v1.7.2/kubernetes-server-linux-amd64.tar.gz

tar -xzvf kubernetes-server-linux-amd64.tar.gz

cd kubernetes

cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} /usr/local/bin/

建立 kubernetes 證書

cd /opt/ssl

vi kubernetes-csr.json

{
  "CN": "kubernetes",
  "hosts": [
    "127.0.0.1",
    "192.168.1.109",
    "192.168.1.110",
    "192.254.0.1",
    "kubernetes",
    "kubernetes.default",
    "kubernetes.default.svc",
    "kubernetes.default.svc.cluster",
    "kubernetes.default.svc.cluster.local"
  ],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}


## 這裡 hosts 欄位中 三個 IP 分別為 127.0.0.1 本機, 192.168.1.109, 192.168.1.111 為 Master 的IP, 192.254.0.1 為 kubernetes SVC 的 IP, 一般是 部署網路的第一個IP , 如: 192.254.0.1 , 在啟動完成後,我們使用   kubectl get svc , 就可以檢視到

生成 kubernetes 證書和私鑰

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=/etc/kubernetes/ssl/config.json \
  -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes

# 檢視生成

[root@k8s-node-1 ssl]# ls -lt kubernetes*
-rw-r--r-- 1 root root 1253 47 16:57 kubernetes.csr
-rw------- 1 root root 1679 47 16:57 kubernetes-key.pem
-rw-r--r-- 1 root root 1627 47 16:57 kubernetes.pem
-rw-r--r-- 1 root root  461 47 16:56 kubernetes-csr.json


# 拷貝到目錄
cp -r kubernetes* /etc/kubernetes/ssl/
scp kubernetes* root@192.168.1.110:/etc/kubernetes/ssl/
scp kubernetes* root@192.168.1.111:/etc/kubernetes/ssl/

配置 kube-apiserver

kubelet 首次啟動時向 kube-apiserver 傳送 TLS Bootstrapping 請求,kube-apiserver 驗證 kubelet 請求中的 token 是否與它配置的 token 一致,如果一致則自動為 kubelet生成證書和祕鑰。

# 生成 token

[root@k8s-node-1 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
cdc7201eb10d76842ff5b6e35bfbb516


# 建立 token.csv 檔案

cd /opt/ssl

vi token.csv

cdc7201eb10d76842ff5b6e35bfbb516,kubelet-bootstrap,10001,"system:kubelet-bootstrap"


# 拷貝

cp token.csv /etc/kubernetes/
scp token.csv root@192.168.1.110:/etc/kubernetes/
scp token.csv root@192.168.1.111:/etc/kubernetes/

建立 kube-apiserver.service 檔案

# 自定義 系統 service 檔案一般存於 /etc/systemd/system/ 下

vi /etc/systemd/system/kube-apiserver.service

[Unit]
Description=kubernetes API Server
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=network.target

[Service]
User=root
ExecStart=/usr/local/bin/kube-apiserver \
  --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
  --advertise-address=192.168.1.109 \
  --allow-privileged=true \
  --apiserver-count=3 \
  --audit-log-maxage=30 \
  --audit-log-maxbackup=3 \
  --audit-log-maxsize=100 \
  --audit-log-path=/var/lib/audit.log \
  --authorization-mode=RBAC \
  --bind-address=192.168.1.109 \
  --client-ca-file=/etc/kubernetes/ssl/ca.pem \
  --enable-swagger-ui=true \
  --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
  --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
  --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
  --etcd-servers=https://192.168.1.109:2379,https://192.168.1.110:2379,https://192.168.1.111:2379 \
  --event-ttl=1h \
  --kubelet-https=true \
  --insecure-bind-address=192.168.1.109 \
  --runtime-config=rbac.authorization.k8s.io/v1alpha1 \
  --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-cluster-ip-range=192.254.0.0/16 \
  --service-node-port-range=30000-32000 \
  --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
  --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
  --experimental-bootstrap-token-auth \
  --token-auth-file=/etc/kubernetes/token.csv \
  --v=2
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536

[Install]
WantedBy=multi-user.target
# 這裡面要注意的是 --service-node-port-range=30000-32000
# 這個地方是 對映外部埠時 的埠範圍,隨機對映也在這個範圍內對映,指定對映埠必須也在這個範圍內。

啟動 kube-apiserver

systemctl daemon-reload
systemctl enable kube-apiserver
systemctl start kube-apiserver
systemctl status kube-apiserver

配置 kube-controller-manager

# 建立 kube-controller-manager.service 檔案

vi /etc/systemd/system/kube-controller-manager.service


[Unit]
Description=kubernetes Controller Manager
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-controller-manager \
  --address=127.0.0.1 \
  --master=http://192.168.1.109:8080 \
  --allocate-node-cidrs=true \
  --service-cluster-ip-range=192.254.0.0/16 \
  --cluster-cidr=192.233.0.0/16 \
  --cluster-name=kubernetes \
  --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
  --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
  --root-ca-file=/etc/kubernetes/ssl/ca.pem \
  --leader-elect=true \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

啟動 kube-controller-manager

systemctl daemon-reload
systemctl enable kube-controller-manager
systemctl start kube-controller-manager
systemctl status kube-controller-manager

配置 kube-scheduler

# 建立 kube-cheduler.service 檔案

vi /etc/systemd/system/kube-scheduler.service


[Unit]
Description=kubernetes Scheduler
Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-scheduler \
  --address=127.0.0.1 \
  --master=http://192.168.1.109:8080 \
  --leader-elect=true \
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target

啟動 kube-scheduler

systemctl daemon-reload
systemctl enable kube-scheduler
systemctl start kube-scheduler
systemctl status kube-scheduler

驗證 Master 節點

[root@kubernetes-master-1 kubernetes]# kubectl get componentstatuses
NAME                 STATUS    MESSAGE              ERROR
scheduler            Healthy   ok                   
controller-manager   Healthy   ok                   
etcd-0               Healthy   {"health": "true"}   
etcd-1               Healthy   {"health": "true"}   
etcd-2               Healthy   {"health": "true"}   

部署 kubernetes Node 節點

Node 節點 需要部署的元件有 docker calico kubectl kubelet kube-proxy 這幾個元件。

配置 kubelet
kubelet 啟動時向 kube-apiserver 傳送 TLS bootstrapping 請求,需要先將 bootstrap token 檔案中的 kubelet-bootstrap 使用者賦予 system:node-bootstrapper 角色,然後 kubelet 才有許可權建立認證請求(certificatesigningrequests)。

# 先建立認證請求
# user 為 master 中 token.csv 檔案裡配置的使用者
# 只需在一個node中建立一次就可以

kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap

建立 kubelet kubeconfig 檔案

# 配置叢集

kubectl config set-cluster kubernetes \
  --certificate-authority=/etc/kubernetes/ssl/ca.pem \
  --embed-certs=true \
  --server=https://192.168.1.109:6443 \
  --kubeconfig=bootstrap.kubeconfig

# 配置客戶端認證

kubectl config set-credentials kubelet-bootstrap \
  --token=cdc7201eb10d76842ff5b6e35bfbb516 \
  --kubeconfig=bootstrap.kubeconfig


# 配置關聯

kubectl config set-context default \
  --cluster=kubernetes \
  --user=kubelet-bootstrap \
  --kubeconfig=bootstrap.kubeconfig


# 配置預設關聯
kubectl config use-context default --kubeconfig=bootstrap.kubeconfig

# 拷貝生成的 bootstrap.kubeconfig 檔案

mv bootstrap.kubeconfig /etc/kubernetes/

scp /etc/kubernetes/bootstrap.kubeconfig [email protected]:/etc/kubernetes/

scp /etc/kubernetes/bootstrap.kubeconfig [email protected]:/etc/kubernetes/

建立 kubelet.service 檔案

# 建立 kubelet 目錄

mkdir /var/lib/kubelet

vi /etc/systemd/system/kubelet.service


[Unit]
Description=kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service

[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/usr/local/bin/kubelet \
  --address=192.168.1.110 \
  --hostname-override=192.168.1.110 \
  --pod-infra-container-image=gcr.io/google_containers/pause-amd64:3.0 \
  --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --require-kubeconfig \
  --cert-dir=/etc/kubernetes/ssl \
  --cluster_dns=192.254.0.2 \
  --cluster_domain=cluster.local. \
  --hairpin-mode promiscuous-bridge \
  --allow-privileged=true \
  --serialize-image-pulls=false \
  --logtostderr=true \
  --cgroup-driver=systemd \
  --v=2
ExecStopPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT
ExecStopPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT
ExecStopPost=/sbin/iptables -A INPUT -s 192.168.0.0/16 -p tcp --dport 4194 -j ACCEPT
ExecStopPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
# 如上配置:
192.168.1.110      為本機的IP
192.254.0.2       預分配的 dns 地址
cluster.local.   為 kubernetes 叢集的 domain
gcr.io/google_containers/pause-amd64:3.0 映象,

啟動 kubelet

systemctl daemon-reload
systemctl enable kubelet
systemctl start kubelet
systemctl status kubelet

啟動失敗報錯

kubelet cgroup driver: "cgroupfs" is different from docker cgroup driver: "systemd"

參考 kubernetes.io
在kubelet啟動檔案中新增引數--cgroup-driver=systemd \ 問題解決

配置 TLS 認證
注意 每新增一個節點 都要進行認證

# 檢視 csr 的名稱

[root@kubernetes-master-1 kubernetes]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg   1m        kubelet-bootstrap   Pending


# 增加 認證

[root@kubernetes-master-1 kubernetes]# kubectl certificate approve node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg
certificatesigningrequest "node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg" approved

驗證 nodes

[[email protected]1 ssl]# kubectl get nodes 
NAME            STATUS    AGE       VERSION
192.168.1.110   Ready     11s       v1.7.2

# 成功以後會自動生成配置檔案與金鑰

# 配置檔案

ls /etc/kubernetes/kubelet.kubeconfig   
/etc/kubernetes/kubelet.kubeconfig


# 金鑰檔案

ls /etc/kubernetes/ssl/kubelet*
/etc/kubernetes/ssl/kubelet-client.crt  /etc/kubernetes/ssl/kubelet.crt
/etc/kubernetes/ssl/kubelet-client.key  /etc/kubernetes/ssl/kubelet.key
[root@kubernetes-master-1 kubernetes]# kubectl get csr
NAME                                                   AGE       REQUESTOR           CONDITION
node-csr-5sjslkPdNqpYZL8jRYp10seyVW4F91au7ftGQHG8YwM   2m        kubelet-bootstrap   Pending
node-csr-CB1uNi3e6099Jk-uYgJJHP1DdLIcOf9jxrJ2eH-PDZg   24m       kubelet-bootstrap   Approved,Issued
[root@kubernetes-master-1 kubernetes]# kubectl certificate approve node-csr-5sjslkPdNqpYZL8jRYp10seyVW4F91au7ftGQHG8YwM
certificatesigningrequest "node-csr-5sjslkPdNqpYZL8jRYp10seyVW4F91au7ftGQHG8YwM" approved
[root@kubernetes-master-1 kubernetes]# kubectl get nodes 
NAME            STATUS    AGE       VERSION
192.168.1.110   Ready     7s        v1.7.2
192.168.1.111   Ready     11m       v1.7.2

配置 kube-proxy

建立 kube-proxy 證書

# 證書方面由於我們node端沒有裝 cfssl
# 我們回到 master 端 機器 去配置證書,然後拷貝過來

[root@k8s-master-1 ~]# cd /opt/ssl


vi kube-proxy-csr.json

{
  "CN": "system:kube-proxy",
  "hosts": [],
  "key": {
    "algo": "rsa",
    "size": 2048
  },
  "names": [
    {
      "C": "CN",
      "ST": "ShenZhen",
      "L": "ShenZhen",
      "O": "k8s",
      "OU": "System"
    }
  ]
}

生成 kube-proxy 證書和私鑰

/opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
  -ca-key=/etc/kubernetes/ssl/ca-key.pem \
  -config=/etc/kubernetes/ssl/config.json \
  -profile=kubernetes  kube-proxy-csr.json | /opt/local/cfssl/cfssljson -bare kube-proxy

# 檢視生成
ls kube-proxy*
kube-proxy.csr  kube-proxy-csr.json  kube-proxy-key.pem  kube-proxy.pem

# 拷貝到目錄
cp kube-proxy*.pem /etc/kubernetes/ssl/

 scp kube-proxy*.pem root@192.
            
           

相關推薦

kubernetes 1.7.2 + Calico部署

系統環境 系統 [root@kubernetes-master-1 ~]# cat /etc/redhat-release CentOS Linux release 7.4.1708 (Core) hosts [[email pr

Kubernetes 1.7.2 版本釋出_Kubernetes中文社群

Kubernetes 1.7.2 版本釋出了,該版本從1.7.1版本以來有15處修改,其中修復一個Pod BUG#48786, 下載k8s 1.7.2版本,檢視 1.7 版本相關介紹。 以下是 1.7.2版本更新內容: Use port 20256 for node-problem-dete

kubernetes 1.6.2部署

kubenetes docker 部署環境服務器:騰訊雲操作系統版本:centos 7.21、修改系統配置:[[email protected]/* */~]# cat /etc/sysctl.d/k8s.conf net.bridge.bridge-nf-call-ip6tables=

CentOS7.3 部署Haproxy 1.7.2

-- sts lob 末尾 裝包 backend iptables 失敗 fresh haproxy:http://www.haproxy.org/ 本文涉及haproxy的安裝,並做簡單配置。 一.環境準備 1. 操作系統 CentOS-7-x86_64-Everyt

1kubernetes 1.5.2原理以及叢集HA部署

Kubernetes是什麼?  1.是一個全新的基於容器技術的分散式架構,是谷歌的Borg技術的一個開源版本  Borg是谷歌的一個久負盛名的內部使用的大規模叢集管理系統,基於容器技術,目的是實現資源管理的自動化,垮多個數據中心的資源利用率的最大化  2.Kuberne

使用 Go-Ethereum 1.7.2搭建以太坊私有鏈

lean nic 腳本 ica welcome accounts oot db4 upgrade 1、什麽是Ethereum(以太坊)   以太坊(Ethereum)並不是一個機構,而是一款能夠在區塊鏈上實現智能合約、開源的底層系統,以太坊從誕生到2017年5月,短短3年

[Python3網絡爬蟲開發實戰] 1.7.2-mitmproxy的安裝

否則 mit -o homebrew str 官方網站 over stripe docker mitmproxy是一個支持HTTP和HTTPS的抓包程序,類似Fiddler、Charles的功能,只不過它通過控制臺的形式操作。 此外,mitmproxy還有兩個關聯組件,一個

Kubernetes 1.11.2使用NFS作為共享存儲

ins 權限 labels ora cas 打開 sta program ports 環境:NFS服務器: 192.168.0.252 /data/nfs Kubernetes Master: 192.168.0.210Kubernetes Node: 192.168.0.

jquery 1.7.2原始碼解析(二)構造jquery物件

構造jquery物件 jQuery物件是一個類陣列物件。 一)建構函式jQuery() 建構函式的7種用法:   1.jQuery(selector [, context ]) 傳入字串引數:檢查該字串是選擇器表示式還是HTML程式碼。如果是選擇器表示式,則遍歷文件查詢匹配的DOM元

使用Kubeadm搭建Kubernetes(1.12.2)叢集

Kubeadm是Kubernetes官方提供的用於快速安裝Kubernetes叢集的工具,伴隨Kubernetes每個版本的釋出都會同步更新,在2018年將進入GA狀態,說明離生產環境中使用的距離越來越近了。 使用Kubeadm搭建Kubernetes叢集本來是件很簡單的事,但由於眾所周知的原因,在中國大陸

Kubernetes 1.12.2版,使用docker 映象安裝

kubernetes 1.12.2版本安裝起碼準兩臺機器:master,nodeip: master:192.168.1.220node1:192.168.1.221node2:192.168.1.205環境:centos7配置:2核4G起注意事項:docker版本最高支援18.06,高於要此版本報錯kube

Kubernetes 1.12.2版,使用docker 鏡像安裝

vim bus packages 清理 0.0.0.0 create 通過 als loading kubernetes 1.12.2版本安裝起碼準兩臺機器:master,nodeip: master:192.168.1.220node1:192.168.1.221node

CentOS 7.2 安裝部署 Ceph 及新增 PG

前期準備:準備三臺CentOS 7.2系統,每臺機器三塊硬碟,關閉selinux,關閉iptables,做好時間同步,做好本地域名解析,並對每臺機器做互信。192.168.10.101 ceph-node1192.168.10.22 ceph-node2192.168.10.33 ceph-node3每臺機器

CentOS 7.2 安裝部署 Ceph 及添加 PG

狀況 告警 通過 健康狀況 pub sort network check 多文件 前期準備:準備三臺CentOS 7.2系統,每臺機器三塊硬盤,關閉selinux,關閉iptables,做好時間同步,做好本地域名解析,並對每臺機器做互信。192.168.10.101 ce

Kubernetes 1.12.2快速升級

Kubernetes 1.12.2已經正式釋出,快速升級(含國內映象快速下載連結)包括升級kubeadm/kubectl/kubelet版本、拉取映象、升級Kubernetes叢集三個主要步驟。注意Kubernetes 1.12.2版本暫時不支援最新的Docker 18.09,只能用Docker 1

寫一個遞迴函式DigitSum(n),輸入一個非負整數,返回組成它的數字之和, 例如,呼叫DigitSum(1729),則應該返回1+7+2+9,它的和是19

#include <stdio.h> #define _CRT_SECURE_NO_WARNINGS 1 int DigitSum(int i) { int sum = 0; int j = 0; if (i != 0) { j = i % 10; i = i /

讀西瓜書:7.1/7.2/7.3章

Here p(X,Y)p(X, Y )p(X,Y) is a joint probability and is verbalized as “the probability of X and Y ”. Similarly, the quantity p(Y∣X)p(Y |X)p(Y∣X) is a condi

Kubernetes 1.8.2 個人手動安裝實戰筆記(系統能起來的!)

本博折騰了好一陣子,從0到1折騰起了基於 Kubernetes 1.8.2 的 全手動安裝 。 下面是個人這套手工安裝流程和註解。(本人的K8S就是這樣起來的)。 本次安裝版本為: Kubernetes v1.8.2 Etcd v3.2.9 Calico v

Kubernetes 1.7.6 版本釋出_Kubernetes中文社群

[fluentd-gcp addon] Fluentd will trim lines exceeding 100KB instead of dropping them. (#52289, @crassirostris) Cluster Autoscaler 0.6.2 (#52359, @mwielgus

Kubernetes 1.7.4 版本釋出_Kubernetes中文社群

昨天Kubernetes 1.7.4 版本釋出了,相比1.7.3版本共有17處明顯變化,例如: 修復建立或更新ELB會修改全域性定義的Security Group Bug(#49805,@ nbutton23) 修復kubefed在不同版本RBAC建立問題(#50537,@liggitt)