1. 程式人生 > >Docker網路通訊openvswitch(來自龍果學院學習資料 + 自己實驗結果)

Docker網路通訊openvswitch(來自龍果學院學習資料 + 自己實驗結果)

1、 拓撲如下: 這裡寫圖片描述

為了更方便的管理Docker網路,我們這一講使用openvswitch網橋的方式來對容器網路進行管理,實現容器啟動後能夠互通。Open vSwitch是一個高質量的、多層虛擬交換機。通過構建隧道的方式使得,兩個內網網路能夠互相通訊。

1、 給每臺Docker所在主機,預先分配好IP地址: 18.141:

[root@bigdata2 ~]# cat /etc/sysconfig/docker-network 
# /etc/sysconfig/docker-network
DOCKER_NETWORK_OPTIONS=--bip=172.17.0.1/16

18.142:

[root@bigdata2 ~]# cat /etc/sysconfig/docker-network 
# /etc/sysconfig/docker-network
DOCKER_NETWORK_OPTIONS=--bip=172.16.0.1/16

注意除了上面的方式,還可以通過編輯/etc/docker/daemon.json檔案,然後重啟docker來實現(但是經過試驗,發現下面的方式和上面的方式不能同時配置,不然啟動docker啟動不了): 這裡寫圖片描述 這裡寫圖片描述

另外如果刪除已經有的路由,可以通過如下命令:

[root@bigdata2 ~]# route -n
Kernel IP routing table
Destination
Gateway Genmask Flags Metric Ref Use Iface 0.0.0.0 192.168.18.1 0.0.0.0 UG 100 0 0 enp3s0 192.168.18.0 0.0.0.0 255.255.255.0 U 100 0 0 enp3s0 192.168.18.0 0.0.0.0 255.255.255.0 U 100 0 0 enp3s0 192.168.200.0 0.0.0.0 255.255
.255.0 U 0 0 0 docker0 [root@bigdata2 ~]# route del -net 192.168.200.0/24 gw 0.0.0.0 [root@bigdata2 ~]# route -n Kernel IP routing table Destination Gateway Genmask Flags Metric Ref Use Iface 0.0.0.0 192.168.18.1 0.0.0.0 UG 100 0 0 enp3s0 192.168.18.0 0.0.0.0 255.255.255.0 U 100 0 0 enp3s0 192.168.18.0 0.0.0.0 255.255.255.0 U 100 0 0 enp3s0 [root@bigdata2 ~]#

2、 安裝openvswitch(兩臺都執行):

[root@bigdata2 ~]# yum install gcc make python-devel openssl-devel kernel-devel graphviz    kernel-debug-devel autoconf automake rpm-build redhat-rpm-config  libtool selinux-policy-devel –y
[root@bigdata2 ~]#cd /usr/local/src
[root@bigdata2 ~]#wget http://openvswitch.org/releases/openvswitch-2.7.0.tar.gz
[root@bigdata2 ~]#mkdir -p ~/rpmbuild/SOURCES
[root@bigdata2 ~]#cp openvswitch-2.7.0.tar.gz ~/rpmbuild/SOURCES/
[root@bigdata2 SOURCES]#cd /root/rpmbuild/SOURCES
[root@bigdata2 SOURCES]#pwd
/root/rpmbuild/SOURCES
[root@bigdata2 SOURCES]# tar -zxvf openvswitch-2.7.0.tar.gz
[root@bigdata2 SOURCES]# sed 's/openvswitch-kmod, //g' openvswitch-2.7.0/rhel/openvswitch.spec > openvswitch-2.7.0/rhel/openvswitch_no_kmod.spec
[root@bigdata2 SOURCES] #rpmbuild -bb --without check openvswitch-2.7.0/rhel/openvswitch_no_kmod.spec
執行完成之後,到/root/rpmbuild目錄下發現有新的目錄生成:
[root@bigdata2 rpmbuild]# pwd
/root/rpmbuild
[root@bigdata2 rpmbuild]# ls
BUILD  BUILDROOT  RPMS  SOURCES  SPECS  SRPMS
[root@bigdata2 rpmbuild]#
##編譯之後,x86目錄下:
[root@bigdata2 src]# cd ~/rpmbuild/RPMS/x86_64/
[root@bigdata2 x86_64]# ls
openvswitch-2.7.0-1.x86_64.rpm  openvswitch-debuginfo-2.7.0-1.x86_64.rpm  openvswitch-devel-2.7.0-1.x86_64.rpm
[root@bigdata2 x86_64]# yum localinstall -y openvswitch-2.7.0-1.x86_64.rpm
[root@bigdata2 x86_64]# systemctl start openvswitch

3、 安裝網橋管理工具:

[root@bigdata2 x86_64]# yum -y install bridge-utils

4、 部署ovs路由:18.141上操作。

4、1:在兩個主機上建立隧道網橋br0,並通過VXLAN協議建立隧道

[root@bigdata2 x86_64]# ovs-vsctl add-br br0
#要注意的是下面的ip是要連線的機器的ip地址
[root@bigdata2 x86_64]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.18.142
#新增br0到本地docker0,使得容器流量通過ovs流經tunnel
[root@bigdata2 x86_64]# brctl addif docker0 br0
[root@bigdata2 x86_64]# ip link set dev br0 up
[root@bigdata2 x86_64]# ip link set dev docker0 up
#新增一條路由,使得尋找16的地址主機,都轉向docker0
[root@bigdata2 x86_64]# ip route add 172.16.0.0/16 dev docker0

4、2:在18.142上面做相同操作,修改一下對應的IP即可:

[root@bigdata2 x86_64]# scp openvswitch-2.7.0-1.x86_64.rpm [email protected]:/usr/local/src/
[root@node3 src]# systemctl start openvswitch
[root@bigdata2 x86_64]# ovs-vsctl add-br br0
[root@bigdata2 x86_64]# ovs-vsctl add-port br0 gre1 -- set interface gre1 type=gre option:remote_ip=192.168.18.141
#新增br0到本地docker0,使得容器流量通過ovs流經tunnel
[root@bigdata2 x86_64]# brctl addif docker0 br0
[root@bigdata2 x86_64]# ip link set dev br0 up
[root@bigdata2 x86_64]# ip link set dev docker0 up
#新增一條路由,使得尋找16的地址主機,都轉向docker0
[root@bigdata2 x86_64]# ip route add 172.17.0.0/16 dev docker0

4.3:檢視一下相關路由,把上一講我們新增的路由全部刪掉:

[root@bigdata2 ~]# route -n
Kernel IP routing table
Destination     Gateway         Genmask         Flags Metric Ref    Use Iface
0.0.0.0         192.168.63.254  0.0.0.0         UG    100    0        0 ens33
172.16.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
172.18.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker_gwbridge
192.168.63.0    0.0.0.0         255.255.255.0   U     100    0        0 ens33

5、 為了方便測試,我們構建一個nginx映象:

[root@node3 test]# tree
├── default_nginx.conf
├── Dockerfile
└── index.html
##########
[root@node3 test]# cat Dockerfile 
from lnmp/nginx:1.0
ADD index.html /web/ 
ADD default_nginx.conf /usr/local/nginx/conf/vhosts/default.conf 
EXPOSE 80 
CMD ["/usr/local/nginx/sbin/nginx"]
##########
[root@node3 test]# cat default_nginx.conf 
server {
    listen       80 default_server;
    server_name  localhost;
    index        index.html;
    root         /web;
}
########
[root@node3 test]# cat index.html 
80

6、 建立映象並且啟動:

18.142:
[root@node3 test]#docker build -t test/nginx:1.0 .
[root@node3 test]#docker ps
[root@node3 test]#docker run -dit -P test/nginx:1.0
[root@node3 test]# docker ps
CONTAINER ID        IMAGE               COMMAND                  CREATED             STATUS              PORTS                   NAMES
78845f5cb91b        test/nginx:1.0      "/usr/local/nginx/..."   10 minutes ago      Up 10 minutes       0.0.0.0:32768->80/tcp   eager_euler
[root@node3 test]# docker exec -it 78845f5cb91b /bin/bash
[root@78845f5cb91b /]# ifconfig
eth0      Link encap:Ethernet  HWaddr 02:42:AC:10:00:02  
          inet addr:172.16.0.2  Bcast:0.0.0.0  Mask:255.255.0.0
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:72 errors:0 dropped:0 overruns:0 frame:0
          TX packets:35 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:6454 (6.3 KiB)  TX bytes:3970 (3.8 KiB)

18.141啟動一個:

[root@bigdata2 src]# docker run -it lnmp/nginx:1.0 /bin/bash
[root@dcff2de3f060 /]# ifconfig
eth0      Link encap:Ethernet  HWaddr 02:42:AC:11:00:03  
          inet addr:172.17.0.3  Bcast:0.0.0.0  Mask:255.255.0.0
          inet6 addr: fe80::42:acff:fe11:3/64 Scope:Link
          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1
          RX packets:7 errors:0 dropped:0 overruns:0 frame:0
          TX packets:7 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:0 
          RX bytes:578 (578.0 b)  TX bytes:578 (578.0 b)

lo        Link encap:Local Loopback  
          inet addr:127.0.0.1  Mask:255.0.0.0
          inet6 addr: ::1/128 Scope:Host
          UP LOOPBACK RUNNING  MTU:65536  Metric:1
          RX packets:0 errors:0 dropped:0 overruns:0 frame:0
          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
          collisions:0 txqueuelen:1 
          RX bytes:0 (0.0 b)  TX bytes:0 (0.0 b)

[root@dcff2de3f060 /]# curl 172.16.0.2
80
[root@dcff2de3f060 /]# curl 172.16.0.2
80

### 怕錯:我們可以檢視一下ovs想對應的狀態:
[root@node3 test]# ovs-vsctl show
1f6c6cde-0cb2-41da-a1b9-999c80a9512d
    Bridge "br0"
        Port "br0"
            Interface "br0"
                type: internal
        Port "gre1"
            Interface "gre1"
                type: gre
                options: {remote_ip="192.168.18.141"}
    ovs_version: "2.7.0"