1. 程式人生 > >Docker實踐 -- 使用Open vSwitch實現跨主機通信

Docker實踐 -- 使用Open vSwitch實現跨主機通信

sha centos queue 基本架構 fan tom name uil str

作為目前最火熱的容器技術,docker在網絡實現和管理方面還存在不足。最開始的docker是依賴於Linux Bridge實現的網絡設置,只能在容器裏創建一個網卡。後來隨著docker/libnetwork項目的進展,docker的網絡管理功能逐漸多了起來。盡管如此,跨主機通信對於docker來說還是一個需要面對的問題,這一點對於kubernetes類的容器管理系統異常重要。目前市面上主流的解決方法有flannel, weave, Pipework, Open vSwitch等。


Open vSwitch實現比較簡單,成熟且功能強大,所以很適合作為解決docker底層網絡互聯互通的工具。言歸正傳,如下是基本架構圖:

技術分享圖片


具體的實現步驟如下:

1. 安裝docker, bridge-utils和openvswitch

[root@dockerserver1 ~]# yum install docker bridge-utils -y 
[root@dockerserver1 ~]# yum install wget openssl-devel -y 
[root@dockerserver1 ~]# yum groupinstall "Development Tools"
[root@dockerserver1 ~]# adduser ovswitch
[root@dockerserver1 ~]# su - ovswitch
[ovswitch@dockerserver1 ~]$ wget http://openvswitch.org/releases/openvswitch-2.3.0.tar.gz
[ovswitch@dockerserver1 ~]$ tar -zxvpf openvswitch-2.3.0.tar.gz
[ovswitch@dockerserver1 ~]$ mkdir -p ~/rpmbuild/SOURCES
[ovswitch@dockerserver1 ~]$ sed 's/openvswitch-kmod, //g' openvswitch-2.3.0/rhel/openvswitch.spec > openvswitch-2.3.0/rhel/openvswitch_no_kmod.spec
[ovswitch@dockerserver1 ~]$ cp openvswitch-2.3.0.tar.gz rpmbuild/SOURCES/
     
[ovswitch@dockerserver1 ~]$ rpmbuild -bb --without check ~/openvswitch-2.3.0/rhel/openvswitch_no_kmod.spec
     
[ovswitch@dockerserver1 ~]$ exit
     
[root@dockerserver1 ~]# yum localinstall /home/ovswitch/rpmbuild/RPMS/x86_64/openvswitch-2.3.0-1.x86_64.rpm -y
[root@dockerserver1 ~]# mkdir /etc/openvswitch
[root@dockerserver1 ~]# setenforce 0


2. 在dockerserver1上編輯/usr/lib/systemd/system/docker.service文件,添加docker啟動選項"--bip=10.0.0.1/24",如下:

[root@dockerserver1 ~]# cat /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target rhel-push-plugin.socket registries.service
Wants=docker-storage-setup.service
Requires=docker-cleanup.timer

[Service]
Type=notify
NotifyAccess=all
EnvironmentFile=-/run/containers/registries.conf
EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
Environment=GOTRACEBACK=crash
Environment=DOCKER_HTTP_HOST_COMPAT=1
Environment=PATH=/usr/libexec/docker:/usr/bin:/usr/sbin
ExecStart=/usr/bin/dockerd-current           --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current           --default-runtime=docker-runc           --exec-opt native.cgroupdriver=systemd           --userland-proxy-path=/usr/libexec/docker/docker-proxy-current           --bip=10.0.0.1/24           $OPTIONS           $DOCKER_STORAGE_OPTIONS           $DOCKER_NETWORK_OPTIONS           $ADD_REGISTRY           $BLOCK_REGISTRY           $INSECURE_REGISTRY          $REGISTRIES  
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=0
Restart=on-abnormal
MountFlags=slave
KillMode=process

[Install]
WantedBy=multi-user.target
[root@dockerserver1 ~]#

3. 在dockerserver2上編輯/usr/lib/systemd/system/docker.service文件,添加docker啟動選項"--bip=10.0.1.1/24",如下:

[root@dockerserver2 ~]# cat /usr/lib/systemd/system/docker.service   
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target rhel-push-plugin.socket registries.service
Wants=docker-storage-setup.service
Requires=docker-cleanup.timer

[Service]
Type=notify
NotifyAccess=all
EnvironmentFile=-/run/containers/registries.conf
EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
Environment=GOTRACEBACK=crash
Environment=DOCKER_HTTP_HOST_COMPAT=1
Environment=PATH=/usr/libexec/docker:/usr/bin:/usr/sbin
ExecStart=/usr/bin/dockerd-current           --add-runtime docker-runc=/usr/libexec/docker/docker-runc-current           --default-runtime=docker-runc           --exec-opt native.cgroupdriver=systemd           --userland-proxy-path=/usr/libexec/docker/docker-proxy-current           --bip=10.0.1.1/24           $OPTIONS           $DOCKER_STORAGE_OPTIONS           $DOCKER_NETWORK_OPTIONS           $ADD_REGISTRY           $BLOCK_REGISTRY           $INSECURE_REGISTRY          $REGISTRIES
ExecReload=/bin/kill -s HUP $MAINPID
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
TimeoutStartSec=0
Restart=on-abnormal
MountFlags=slave
KillMode=process

[Install]
WantedBy=multi-user.target
[root@dockerserver2 ~]#


4. 在兩個主機上啟動docker服務

# systemctl start docker  
# systemctl enable docker


5. docker服務啟動後,可以看到一個新的bridge docker0被創建出來,並且被賦予了我們之前配置的IP地址:

[root@dockerserver1 ~]# brctl show
bridge name     bridge id               STP enabled     interfaces
docker0         8000.02427076111e       no
[root@dockerserver1 ~]# ifconfig docker0
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 10.0.0.1  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 02:42:70:76:11:1e  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@dockerserver1 ~]#
[root@dockerserver2 ~]# brctl show
bridge name     bridge id               STP enabled     interfaces
docker0         8000.0242ba00c394       no
[root@dockerserver2 ~]# ifconfig docker0
docker0: flags=4099<UP,BROADCAST,MULTICAST>  mtu 1500
        inet 10.0.1.1  netmask 255.255.255.0  broadcast 0.0.0.0
        ether 02:42:ba:00:c3:94  txqueuelen 0  (Ethernet)
        RX packets 0  bytes 0 (0.0 B)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 0  bytes 0 (0.0 B)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@dockerserver2 ~]#


6. 在兩個主機上啟動openvswitch

# systemctl start openvswitch
# chkconfig openvswitch on


7. 在兩個主機上創建隧道網橋br-tun,並通過VXLAN協議創建隧道

[root@dockerserver1 ~]# ovs-vsctl add-br br-tun
[root@dockerserver1 ~]# ovs-vsctl add-port br-tun vxlan0 -- set Interface vxlan0 type=vxlan options:remote_ip=10.10.172.204
[root@dockerserver1 ~]# brctl show 
bridge name     bridge id               STP enabled     interfaces
docker0         8000.02427076111e       no              br-tun

[root@dockerserver2 ~]# ovs-vsctl add-br br-tun 
[root@dockerserver2 ~]# ovs-vsctl add-port br-tun vxlan0 -- set Interface vxlan0 type=vxlan options:remote_ip=10.10.172.203
[root@dockerserver2 ~]# brctl show
bridge name     bridge id               STP enabled     interfaces
docker0         8000.0242ba00c394       no              br-tun


8. 將br-tun作為接口加入docker0網橋

[root@dockerserver1 ~]# brctl addif docker0 br-tun 
[root@dockerserver2 ~]# brctl addif docker0 br-tun


9. 由於兩個主機的容器處於不同的網段,需要添加路由才能讓兩邊的容器互相通信

[root@dockerserver1 ~]# ip route add 10.0.1.0/24 via 10.10.172.204 dev eth0 
[root@dockerserver2 ~]# ip route add 10.0.0.0/24 via 10.10.172.203 dev eth0


10. 在兩個主機上各自啟動一個docker容器,驗證互相能否通信。

[root@dockerserver1 ~]# docker run --rm -it centos /bin/bash         
[root@8f3cac41a7d5 /]# yum install net-tools -y
[root@8f3cac41a7d5 /]# ifconfig eth0
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.0.2  netmask 255.255.255.0  broadcast 0.0.0.0
        inet6 fe80::42:aff:fe00:2  prefixlen 64  scopeid 0x20<link>
        ether 02:42:0a:00:00:02  txqueuelen 0  (Ethernet)
        RX packets 4266  bytes 13337782 (12.7 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 4144  bytes 288723 (281.9 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@8f3cac41a7d5 /]#
[root@dockerserver2 ~]# docker run --rm -it centos /bin/bash
[root@3edc0ed8f805 /]# yum install net-tools -y
[root@3edc0ed8f805 /]# ifconfig eth0
eth0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST>  mtu 1500
        inet 10.0.1.2  netmask 255.255.255.0  broadcast 0.0.0.0
        inet6 fe80::42:aff:fe00:102  prefixlen 64  scopeid 0x20<link>
        ether 02:42:0a:00:01:02  txqueuelen 0  (Ethernet)
        RX packets 4536  bytes 13344451 (12.7 MiB)
        RX errors 0  dropped 0  overruns 0  frame 0
        TX packets 4381  bytes 301685 (294.6 KiB)
        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0

[root@3edc0ed8f805 /]# ping 10.0.0.2
PING 10.0.0.2 (10.0.0.2) 56(84) bytes of data.
64 bytes from 10.0.0.2: icmp_seq=1 ttl=62 time=1.68 ms
^C
--- 10.0.0.2 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.683/1.683/1.683/0.000 ms
[root@3edc0ed8f805 /]#


11.查看兩個docker宿主機的網卡信息

[root@dockerserver1 ~]# ip addr list
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:86:3e:d8 brd ff:ff:ff:ff:ff:ff
    inet 10.10.172.203/24 brd 10.10.172.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:fe86:3ed8/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:70:76:11:1e brd ff:ff:ff:ff:ff:ff
    inet 10.0.0.1/24 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:70ff:fe76:111e/64 scope link 
       valid_lft forever preferred_lft forever
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN 
    link/ether 06:19:20:ae:f6:61 brd ff:ff:ff:ff:ff:ff
5: br-tun: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master docker0 state DOWN 
    link/ether 42:2c:39:7f:a2:4a brd ff:ff:ff:ff:ff:ff
[root@dockerserver1 ~]#
[root@dockerserver2 ~]# ip addr
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN 
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
    link/ether 00:50:56:86:22:d8 brd ff:ff:ff:ff:ff:ff
    inet 10.10.172.204/24 brd 10.10.172.255 scope global eth0
       valid_lft forever preferred_lft forever
    inet6 fe80::250:56ff:fe86:22d8/64 scope link 
       valid_lft forever preferred_lft forever
3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN 
    link/ether 02:42:ba:00:c3:94 brd ff:ff:ff:ff:ff:ff
    inet 10.0.1.1/24 scope global docker0
       valid_lft forever preferred_lft forever
    inet6 fe80::42:baff:fe00:c394/64 scope link 
       valid_lft forever preferred_lft forever
4: ovs-system: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN 
    link/ether be:d4:64:ee:cb:29 brd ff:ff:ff:ff:ff:ff
5: br-tun: <BROADCAST,MULTICAST> mtu 1500 qdisc noop master docker0 state DOWN 
    link/ether 6e:3d:3e:1a:6a:4e brd ff:ff:ff:ff:ff:ff
[root@dockerserver2 ~]#


Docker實踐 -- 使用Open vSwitch實現跨主機通信