1. 程式人生 > >用Docker swarm快速部署Nebula Graph叢集

用Docker swarm快速部署Nebula Graph叢集

用Docker swarm快速部署Nebula Graph叢集

一、前言

本文介紹如何使用 Docker Swarm 來部署 Nebula Graph 叢集。

二、nebula叢集搭建

2.1 環境準備

機器準備

 

 

ip

 

記憶體(Gb)

 

cpu(核數)

 

192.168.1.166

 

16

 

4

 

192.168.1.167

 

16

 

4

 

192.168.1.168

 

16

 

4

 

在安裝前確保所有機器已安裝docker

2.2 初始化swarm叢集

在192.168.1.166機器上執行

 

$ docker swarm init --advertise-addr 192.168.1.166
Swarm initialized: current node (dxn1zf6l61qsb1josjja83ngz) is now a manager.
To add a worker to this swarm, run the following command:
 docker swarm join \
 --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
 192.168.1.166:2377

To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.

2.3 加入worker節點

根據init命令提示內容,加入swarm worker節點,在192.168.1.167 192.168.1.168分別執行

docker swarm join \
 --token SWMTKN-1-49nj1cmql0jkz5s954yi3oex3nedyz0fb0xx14ie39trti4wxv-8vxv8rssmk743ojnwacrr2e7c \
 192.168.1.166:2377

2.4 驗證叢集

docker node ls

ID                            HOSTNAME            STATUS              AVAILABILITY        MANAGER STATUS      ENGINE VERSION
h0az2wzqetpwhl9ybu76yxaen *   KF2-DATA-166        Ready               Active              Reachable           18.06.1-ce
q6jripaolxsl7xqv3cmv5pxji     KF2-DATA-167        Ready               Active              Leader              18.06.1-ce
h1iql1uvm7123h3gon9so69dy     KF2-DATA-168        Ready               Active                                  18.06.1-ce

2.5 配置docker stack

vi docker-stack.yml

配置如下內容

  1 version: '3.6'
  2 services:
  3   metad0:
  4     image: vesoft/nebula-metad:nightly
  5     env_file:
  6       - ./nebula.env
  7     command:
  8       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
  9       - --local_ip=192.168.1.166
 10       - --ws_ip=192.168.1.166
 11       - --port=45500
 12       - --data_path=/data/meta
 13       - --log_dir=/logs
 14       - --v=0
 15       - --minloglevel=2
 16     deploy:
 17       replicas: 1
 18       restart_policy:
 19         condition: on-failure
 20       placement:
 21         constraints:
 22           - node.hostname == KF2-DATA-166
 23     healthcheck:
 24       test: ["CMD", "curl", "-f", "http://192.168.1.166:11000/status"]
 25       interval: 30s
 26       timeout: 10s
 27       retries: 3
 28       start_period: 20s
 29     ports:
 30       - target: 11000
 31         published: 11000
 32         protocol: tcp
 33         mode: host
 34       - target: 11002
 35         published: 11002
 36         protocol: tcp
 37         mode: host
 38       - target: 45500
 39         published: 45500
 40         protocol: tcp
 41         mode: host
 42     volumes:
 43       - data-metad0:/data/meta
 44       - logs-metad0:/logs
 45     networks:
 46       - nebula-net
 47 
 48   metad1:
 49     image: vesoft/nebula-metad:nightly
 50     env_file:
 51       - ./nebula.env
 52     command:
 53       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
 54       - --local_ip=192.168.1.167
 55       - --ws_ip=192.168.1.167
 56       - --port=45500
 57       - --data_path=/data/meta
 58       - --log_dir=/logs
 59       - --v=0
 60       - --minloglevel=2
 61     deploy:
 62       replicas: 1
 63       restart_policy:
 64         condition: on-failure
 65       placement:
 66         constraints:
 67           - node.hostname == KF2-DATA-167
 68     healthcheck:
 69       test: ["CMD", "curl", "-f", "http://192.168.1.167:11000/status"]
 70       interval: 30s
 71       timeout: 10s
 72       retries: 3
 73       start_period: 20s
 74     ports:
 75       - target: 11000
 76         published: 11000
 77         protocol: tcp
 78         mode: host
 79       - target: 11002
 80         published: 11002
 81         protocol: tcp
 82         mode: host
 83       - target: 45500
 84         published: 45500
 85         protocol: tcp
 86         mode: host
 87     volumes:
 88       - data-metad1:/data/meta
 89       - logs-metad1:/logs
 90     networks:
 91       - nebula-net
 92 
 93   metad2:
 94     image: vesoft/nebula-metad:nightly
 95     env_file:
 96       - ./nebula.env
 97     command:
 98       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
 99       - --local_ip=192.168.1.168
100       - --ws_ip=192.168.1.168
101       - --port=45500
102       - --data_path=/data/meta
103       - --log_dir=/logs
104       - --v=0
105       - --minloglevel=2
106     deploy:
107       replicas: 1
108       restart_policy:
109         condition: on-failure
110       placement:
111         constraints:
112           - node.hostname == KF2-DATA-168
113     healthcheck:
114       test: ["CMD", "curl", "-f", "http://192.168.1.168:11000/status"]
115       interval: 30s
116       timeout: 10s
117       retries: 3
118       start_period: 20s
119     ports:
120       - target: 11000
121         published: 11000
122         protocol: tcp
123         mode: host
124       - target: 11002
125         published: 11002
126         protocol: tcp
127         mode: host
128       - target: 45500
129         published: 45500
130         protocol: tcp
131         mode: host
132     volumes:
133       - data-metad2:/data/meta
134       - logs-metad2:/logs
135     networks:
136       - nebula-net
137 
138   storaged0:
139     image: vesoft/nebula-storaged:nightly
140     env_file:
141       - ./nebula.env
142     command:
143       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
144       - --local_ip=192.168.1.166
145       - --ws_ip=192.168.1.166
146       - --port=44500
147       - --data_path=/data/storage
148       - --log_dir=/logs
149       - --v=0
150       - --minloglevel=2
151     deploy:
152       replicas: 1
153       restart_policy:
154         condition: on-failure
155       placement:
156         constraints:
157           - node.hostname == KF2-DATA-166
158     depends_on:
159       - metad0
160       - metad1
161       - metad2
162     healthcheck:
163       test: ["CMD", "curl", "-f", "http://192.168.1.166:12000/status"]
164       interval: 30s
165       timeout: 10s
166       retries: 3
167       start_period: 20s
168     ports:
169       - target: 12000
170         published: 12000
171         protocol: tcp
172         mode: host
173       - target: 12002
174         published: 12002
175         protocol: tcp
176         mode: host
177     volumes:
178       - data-storaged0:/data/storage
179       - logs-storaged0:/logs
180     networks:
181       - nebula-net
182   storaged1:
183     image: vesoft/nebula-storaged:nightly
184     env_file:
185       - ./nebula.env
186     command:
187       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
188       - --local_ip=192.168.1.167
189       - --ws_ip=192.168.1.167
190       - --port=44500
191       - --data_path=/data/storage
192       - --log_dir=/logs
193       - --v=0
194       - --minloglevel=2
195     deploy:
196       replicas: 1
197       restart_policy:
198         condition: on-failure
199       placement:
200         constraints:
201           - node.hostname == KF2-DATA-167
202     depends_on:
203       - metad0
204       - metad1
205       - metad2
206     healthcheck:
207       test: ["CMD", "curl", "-f", "http://192.168.1.167:12000/status"]
208       interval: 30s
209       timeout: 10s
210       retries: 3
211       start_period: 20s
212     ports:
213       - target: 12000
214         published: 12000
215         protocol: tcp
216         mode: host
217       - target: 12002
218         published: 12004
219         protocol: tcp
220         mode: host
221     volumes:
222       - data-storaged1:/data/storage
223       - logs-storaged1:/logs
224     networks:
225       - nebula-net
226 
227   storaged2:
228     image: vesoft/nebula-storaged:nightly
229     env_file:
230       - ./nebula.env
231     command:
232       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
233       - --local_ip=192.168.1.168
234       - --ws_ip=192.168.1.168
235       - --port=44500
236       - --data_path=/data/storage
237       - --log_dir=/logs
238       - --v=0
239       - --minloglevel=2
240     deploy:
241       replicas: 1
242       restart_policy:
243         condition: on-failure
244       placement:
245         constraints:
246           - node.hostname == KF2-DATA-168
247     depends_on:
248       - metad0
249       - metad1
250       - metad2
251     healthcheck:
252       test: ["CMD", "curl", "-f", "http://192.168.1.168:12000/status"]
253       interval: 30s
254       timeout: 10s
255       retries: 3
256       start_period: 20s
257     ports:
258       - target: 12000
259         published: 12000
260         protocol: tcp
261         mode: host
262       - target: 12002
263         published: 12006
264         protocol: tcp
265         mode: host
266     volumes:
267       - data-storaged2:/data/storage
268       - logs-storaged2:/logs
269     networks:
270       - nebula-net
271   graphd1:
272     image: vesoft/nebula-graphd:nightly
273     env_file:
274       - ./nebula.env
275     command:
276       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
277       - --port=3699
278       - --ws_ip=192.168.1.166
279       - --log_dir=/logs
280       - --v=0
281       - --minloglevel=2
282     deploy:
283       replicas: 1
284       restart_policy:
285         condition: on-failure
286       placement:
287         constraints:
288           - node.hostname == KF2-DATA-166
289     depends_on:
290       - metad0
291       - metad1
292       - metad2
293     healthcheck:
294       test: ["CMD", "curl", "-f", "http://192.168.1.166:13000/status"]
295       interval: 30s
296       timeout: 10s
297       retries: 3
298       start_period: 20s
299     ports:
300       - target: 3699
301         published: 3699
302         protocol: tcp
303         mode: host
304       - target: 13000
305         published: 13000
306         protocol: tcp
307 #        mode: host
308       - target: 13002
309         published: 13002
310         protocol: tcp
311         mode: host
312     volumes:
313       - logs-graphd:/logs
314     networks:
315       - nebula-net
316 
317   graphd2:
318     image: vesoft/nebula-graphd:nightly
319     env_file:
320       - ./nebula.env
321     command:
322       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
323       - --port=3699
324       - --ws_ip=192.168.1.167
325       - --log_dir=/logs
326       - --v=2
327       - --minloglevel=2
328     deploy:
329       replicas: 1
330       restart_policy:
331         condition: on-failure
332       placement:
333         constraints:
334           - node.hostname == KF2-DATA-167
335     depends_on:
336       - metad0
337       - metad1
338       - metad2
339     healthcheck:
340       test: ["CMD", "curl", "-f", "http://192.168.1.167:13001/status"]
341       interval: 30s
342       timeout: 10s
343       retries: 3
344       start_period: 20s
345     ports:
346       - target: 3699
347         published: 3640
348         protocol: tcp
349         mode: host
350       - target: 13000
351         published: 13001
352         protocol: tcp
353         mode: host
354       - target: 13002
355         published: 13003
356         protocol: tcp
357 #        mode: host
358     volumes:
359       - logs-graphd2:/logs
360     networks:
361       - nebula-net
362   graphd3:
363     image: vesoft/nebula-graphd:nightly
364     env_file:
365       - ./nebula.env
366     command:
367       - --meta_server_addrs=192.168.1.166:45500,192.168.1.167:45500,192.168.1.168:45500
368       - --port=3699
369       - --ws_ip=192.168.1.168
370       - --log_dir=/logs
371       - --v=0
372       - --minloglevel=2
373     deploy:
374       replicas: 1
375       restart_policy:
376         condition: on-failure
377       placement:
378         constraints:
379           - node.hostname == KF2-DATA-168
380     depends_on:
381       - metad0
382       - metad1
383       - metad2
384     healthcheck:
385       test: ["CMD", "curl", "-f", "http://192.168.1.168:13002/status"]
386       interval: 30s
387       timeout: 10s
388       retries: 3
389       start_period: 20s
390     ports:
391       - target: 3699
392         published: 3641
393         protocol: tcp
394         mode: host
395       - target: 13000
396         published: 13002
397         protocol: tcp
398 #        mode: host
399       - target: 13002
400         published: 13004
401         protocol: tcp
402         mode: host
403     volumes:
404       - logs-graphd3:/logs
405     networks:
406       - nebula-net
407 networks:
408   nebula-net:
409     external: true
410     attachable: true
411     name: host
412 volumes:
413   data-metad0:
414   logs-metad0:
415   data-metad1:
416   logs-metad1:
417   data-metad2:
418   logs-metad2:
419   data-storaged0:
420   logs-storaged0:
421   data-storaged1:
422   logs-storaged1:
423   data-storaged2:
424   logs-storaged2:
425   logs-graphd:
426   logs-graphd2:
427   logs-graphd3:
docker-stack.yml


編輯 nebula.env
 

加入如下內容

1 TZ=UTC
2 USER=root
nebula.env

 


2.6 啟動nebula叢集

 

docker stack deploy nebula -c docker-stack.yml 

 

三、叢集負載均衡及高可用配置

Nebula Graph的客戶端目前(1.X)沒有提供負載均衡的能力,只是隨機選一個graphd去連線。所以生產使用的時候要自己做個負載均衡和高可用。

圖3.1

將整個部署架構分為三層,資料服務層,負載均衡層及高可用層。如圖3.1所示

負載均衡層:對client請求做負載均衡,將請求分發至下方資料服務層

高可用層: 這裡實現的是haproxy的高可用,保證負載均衡層的服務從而保證整個叢集的正常服務

3.1 負載均衡配置

haproxy使用docker-compose配置。分別編輯以下三個檔案

  • Dockerfile 加入以下內容 

1 FROM haproxy:1.7
2 COPY haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg
3 EXPOSE 3640
Dockerfile
  • docker-compose.yml加入以下內容

 1 version: "3.2"
 2 services:
 3   haproxy:
 4     container_name: haproxy
 5     build: .
 6     volumes:
 7       - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
 8     ports:
 9       - 3640:3640
10     restart: always
11     networks:
12       - app_net
13 networks:
14   app_net:
15     external: true
docker-compose.yml
  • haproxy.cfg加入以下內容

 1 global
 2     daemon
 3     maxconn 30000
 4     log 127.0.0.1 local0 info
 5     log 127.0.0.1 local1 warning
 6 
 7 defaults
 8     log-format %hr\ %ST\ %B\ %Ts
 9     log  global
10     mode http
11     option http-keep-alive
12     timeout connect 5000ms
13     timeout client 10000ms
14     timeout server 50000ms
15     timeout http-request 20000ms
16 
17 # custom your own frontends && backends && listen conf
18 #CUSTOM
19 
20 listen graphd-cluster
21     bind *:3640
22     mode tcp
23     maxconn 300
24     balance roundrobin
25     server server1 192.168.1.166:3699 maxconn 300 check
26     server server2 192.168.1.167:3699 maxconn 300 check
27     server server3 192.168.1.168:3699 maxconn 300 check
28 
29 listen stats
30     bind *:1080
31     stats refresh 30s
32     stats uri /stats
haproxy.cfg

3.2 啟動haproxy

docker-compose up -d

3.2 高可用配置

注:配置keepalive需預先準備好vip (虛擬ip),在以下配置中192.168.1.99便為虛擬ip

在192.168.1.166 、192.168.1.167、192.168.1.168上均做以下配置

  • 安裝keepalived
apt-get update && apt-get upgrade && apt-get install keepalived -y
  • 更改keepalived配置檔案/etc/keepalived/keepalived.conf(三臺機器中 做如下配置,priority應設定不同值確定優先順序)

192.168.1.166機器配置

 1 global_defs {
 2     router_id lb01 #標識資訊,一個名字而已;
 3 }
 4 vrrp_script chk_haproxy {
 5     script "killall -0 haproxy"    interval 2
 6 }
 7 vrrp_instance VI_1 {
 8     state MASTER
 9     interface ens160
10     virtual_router_id 52
11     priority 999
12     # 設定MASTER與BACKUP負載均衡器之間同步檢查的時間間隔,單位是秒
13     advert_int 1
14     # 設定驗證型別和密碼
15     authentication {
16     # 設定驗證型別,主要有PASS和AH兩種
17         auth_type PASS
18     # 設定驗證密碼,在同一個vrrp_instance下,MASTER與BACKUP必須使用相同的密碼才能正常通訊
19         auth_pass amber1
20     }
21     virtual_ipaddress {
22         # 虛擬IP為192.168.1.99/24;繫結介面為ens160;別名ens169:1,主備相同
23         192.168.1.99/24 dev ens160 label ens160:1
24     }
25     track_script {
26         chk_haproxy
27     }
28 }
keepalived.conf 

167機器配置

 1 global_defs {
 2     router_id lb01 #標識資訊,一個名字而已;
 3 }
 4 vrrp_script chk_haproxy {
 5     script "killall -0 haproxy"    interval 2
 6 }
 7 vrrp_instance VI_1 {
 8     state BACKUP
 9     interface ens160
10     virtual_router_id 52
11     priority 888
12     # 設定MASTER與BACKUP負載均衡器之間同步檢查的時間間隔,單位是秒
13     advert_int 1
14     # 設定驗證型別和密碼
15     authentication {
16     # 設定驗證型別,主要有PASS和AH兩種
17         auth_type PASS
18     # 設定驗證密碼,在同一個vrrp_instance下,MASTER與BACKUP必須使用相同的密碼才能正常通訊
19         auth_pass amber1
20     }
21     virtual_ipaddress {
22         # 虛擬IP為192.168.1.99/24;繫結介面為ens160;別名ens160:1,主備相同
23         192.168.1.99/24 dev ens160 label ens160:1
24     }
25     track_script {
26         chk_haproxy
27     }
28 }
keepalived.conf

168機器配置

 1 global_defs {
 2     router_id lb01 #標識資訊,一個名字而已;
 3 }
 4 vrrp_script chk_haproxy {
 5     script "killall -0 haproxy"    interval 2
 6 }
 7 vrrp_instance VI_1 {
 8     state BACKUP
 9     interface ens160
10     virtual_router_id 52
11     priority 777
12     # 設定MASTER與BACKUP負載均衡器之間同步檢查的時間間隔,單位是秒
13     advert_int 1
14     # 設定驗證型別和密碼
15     authentication {
16     # 設定驗證型別,主要有PASS和AH兩種
17         auth_type PASS
18     # 設定驗證密碼,在同一個vrrp_instance下,MASTER與BACKUP必須使用相同的密碼才能正常通訊
19         auth_pass amber1
20     }
21     virtual_ipaddress {
22         # 虛擬IP為192.168.1.99/24;繫結介面為ens160;別名ens160:1,主備相同
23         192.168.1.99/24 dev ens160 label ens160:1
24     }
25     track_script {
26         chk_haproxy
27     }
28 }
keepalived.conf

keepalived相關命令

# 啟動keepalived
systemctl start keepalived
# 使keepalived開機自啟
systemctl enable keeplived
# 重啟keepalived
systemctl restart keepalived

 

四、其他

離線怎麼部署?把映象更改為私有映象庫就成了,有問題歡迎來勾搭啊。