Ceph mimic 版本- 儲存叢集搭建Centos7
阿新 • • 發佈:2018-11-09
一、設定Linux主機基本配置:
#為每臺主機配置主機名稱
hostnamectl set-hostname "主機名"
[root@node0 ~]#cat << EOF >> /etc/hosts
192.168.10.14 ceph-deploy
192.168.10.13 ceph-mon0
192.168.10.12 node2
192.168.10.11 node1
192.168.10.10 node0
EOF
#測試ping
[ceph@ceph-deploy ~]$ for i in {ceph-mon0,node1,node2,node0,ceph-deploy} ;do ping -c1 $i ;done
配置ceph源:http://mirrors.163.com/ceph/
#配置ceph源
[[email protected] ~]# cat /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages for $basearch
baseurl=http://mirrors.163.com/ceph/rpm-mimic/el7/$basearch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.163.com/ceph/keys/release.asc
priority =1
[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.163.com/ceph/rpm-mimic/el7/noarch
enabled=1
gpgcheck=0
type=rpm-md
gpgkey=https://mirrors.163.com/ceph/keys/release.asc
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.163.com/ceph/rpm-mimic/el7/SRPMS
enabled=1
gpgcheck=0
type=rpm-md
gpgkey =https://mirrors.163.com/ceph/keys/release.asc
priority=1
關閉防火牆或者開放 6789/6800~6900 埠、關閉 SELINUX
setenforce 0
#修改 SELINUX 模式
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/sysconfig/selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
# 防火牆設定
$ sudo firewall-cmd --zone=public --add-port=6789/tcp --permanent
# 或者關閉防火牆
$ sudo systemctl stop firewalld.service #停止 firewall
$ sudo systemctl disable firewalld.service #禁止 firewall 開機啟動
配置 ntp 服務,開啟時間服務,保證叢集伺服器時間統一;
yum install ntp ntpdate ntp-doc
系統優化類
#set max user processes
sed -i 's/4096/102400/' /etc/security/limits.d/20-nproc.conf
#set ulimit
grep "ulimit -SHn 102400" /etc/rc.local|| echo "ulimit -SHn 102400" >> /etc/rc.local
#修改最大開啟檔案控制代碼數
grep "^* - sigpending 256612" /etc/security/limits.conf ||
cat >>/etc/security/limits.conf<<EOF
* soft nofile 102400
* hard nofile 102400
* soft nproc 102400
* hard nproc 102400
* - sigpending 256612
EOF
建立 Ceph 部署使用者:ceph-deploy 工具必須以普通使用者登入 Ceph 節點;
# 在 Ceph 叢集各節點進行如下操作
# 建立 ceph 特定使用者
$ sudo useradd -d /home/ceph -m ceph
$ sudo echo "123456" | passwd --stdin ceph
# 新增 sudo 許可權
echo "ceph ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph
sudo chmod 0440 /etc/sudoers.d/ceph
#每臺主機上配置免金鑰互信
ssh-keygen -t dsa -f ~/.ssh/id_dsa -P ''
ssh-copy-id "主機名"
#修改 ceph-deploy 管理節點上的 ~/.ssh/config 檔案
[ceph@ceph-deploy ~]$ cat .ssh/config
Host node1
Hostname node1
User ceph
Host node2
Hostname node2
User ceph
Host node0
Hostname node0
User ceph
Host ceph-mon0
Hostname ceph-mon0
User ceph
Host ceph-deploy
Hostname ceph-deploy
User ceph
備註:需要改下許可權 chmod 600 config
#測試
[ceph@ceph-deploy ~]$ for i in {ceph-mon0,node1,node2,node0,ceph-deploy} ;do ssh $i hostname;done
ceph-mon0
node1
node2
node0
ceph-deploy
二、Ceph 儲存叢集搭建
1、 建立叢集;ceph-deploy new ceph-mon0
#錯誤1:
[[email protected] ~]$ ceph-deploy new ceph-mon0
Traceback (most recent call last):
File "/bin/ceph-deploy", line 18, in <module>
from ceph_deploy.cli import main
File "/usr/lib/python2.7/site-packages/ceph_deploy/cli.py", line 1, in <module>
import pkg_resources
ImportError: No module named pkg_resources
#解決辦法:重新安裝pip解決
[[email protected] ceph-cluster]$ sudo yum install python2-pip
[[email protected] ceph-cluster]$ ceph-deploy new ceph-mon0
...忽略...
[ceph_deploy.new][DEBUG ] Resolving host ceph-mon0
[ceph_deploy.new][DEBUG ] Monitor ceph-mon0 at 192.168.10.13
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-mon0']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.10.13']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
[[email protected] ceph-cluster]$ ls #檢視檔案
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring
由於我們osd節點只有兩個,需要修改副本數量;
osd pool default size = 2
[[email protected] ceph-cluster]$ cat ceph.conf
[global]
fsid = 9c7c907b-dcbe-4ab1-b103-17f9d8aa2c2d
mon_initial_members = ceph-mon0
mon_host = 192.168.10.13
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
osd pool default size = 2 #新增項
2、安裝ceph:使用ceph-deploy為節點安裝ceph
[[email protected] ceph-cluster]$ ceph-deploy install ceph-deploy ceph-mon0 node0 node1 node2
#忽略
[node2][DEBUG ] Complete!
[node2][INFO ] Running command: sudo ceph --version
[node2][DEBUG ] ceph version 13.2.1 (5533ecdc0fda920179d7ad84e0aa65a127b20d77) mimic (stable)
#ceph-deploy 將在各節點安裝 Ceph 。
#注:如果你執行過 ceph-deploy purge ,你必須重新執行這一步來安裝 Ceph
3、配置初始 monitor(s)、並收集所有金鑰
[[email protected] ceph-cluster]$ ceph-deploy mon create-initial
[ceph_deploy.gatherkeys][INFO ] Storing ceph.client.admin.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mds.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-mgr.keyring
[ceph_deploy.gatherkeys][INFO ] keyring 'ceph.mon.keyring' already exists
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-osd.keyring
[ceph_deploy.gatherkeys][INFO ] Storing ceph.bootstrap-rgw.keyring
[ceph_deploy.gatherkeys][INFO ] Destroy temp directory /tmp/tmp7RWLGd
#完成上述操作後,當前目錄裡應該會出現這些金鑰環
[[email protected] ceph-cluster]$ ll
total 320
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-mds.keyring
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-mgr.keyring
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-osd.keyring
-rw------- 1 ceph ceph 113 Sep 4 23:25 ceph.bootstrap-rgw.keyring
-rw------- 1 ceph ceph 151 Sep 4 23:25 ceph.client.admin.keyring
-rw-rw-r-- 1 ceph ceph 225 Sep 4 22:59 ceph.conf
-rw-rw-r-- 1 ceph ceph 295037 Sep 4 23:25 ceph-deploy-ceph.log
-rw------- 1 ceph ceph 73 Sep 4 22:53 ceph.mon.keyring
4、建立 ceph 管理程序服務
# ceph-deploy mgr create ceph-mon0 ceph-deploy node0, node1, node2
# ceph -s
5、建立OSD節點
#檢視節點硬碟資訊
[[email protected]-deploy ceph-cluster]$ ceph-deploy disk list node0 node1 node2
[[email protected] ~]# lsblk
sdb 8:16 0 20G 0 disk
└─ceph--f805b345--30ad--4006--86e8--f00aac00eeb5-osd--block--068c7bab--2130--4862--8693--3e54f6b0357c 253:2 0 20G 0 lvm
# ceph-deploy osd create --data /dev/sdb node1
# ceph-deploy osd create --data /dev/sdb node2
# ceph-deploy osd create --data /dev/sdb node3
# ceph -s
[[email protected] ceph-cluster]$ sudo ceph -s
cluster:
id: 8345c764-cc94-402f-83f7-d4db29d79f89
health: HEALTH_WARN
no active mgr
services:
mon: 2 daemons, quorum ceph-mon0,ceph-deploy
mgr: ceph-mon0(active), standbys: ceph-deploy, node0, node1, node2
osd: 3 osds: 3 up, 3 in
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:
#完成建立
[[email protected] ceph-cluster]$ sudo ceph health
HEALTH_OK
三、啟用Dashboard
#使用如下命令即可啟用dashboard模組:
$ sudo ceph mgr module enable dashboard
預設情況下,儀表板的所有HTTP連線均使用SSL/TLS進行保護。
要快速啟動並執行儀表板,可以使用以下內建命令生成並安裝自簽名證書:
$ sudo ceph dashboard create-self-signed-cert
Self-signed certificate created
建立具有管理員角色的使用者:
$sudo ceph dashboard set-login-credentials admin admin
Username and password updated
檢視ceph-mgr服務:
預設下,儀表板的守護程式(即當前活動的管理器)將繫結到TCP埠8443或8080
[ceph@ceph-mon0 ~]$ sudo ceph mgr services
{
"dashboard": "https://ceph-mon0:8443/"
}
啟用Prometheus模組 和grafana 關聯
[ceph@ceph-mon0 ~]$ sudo ceph mgr module enable prometheus
[ceph@ceph-mon0 ~]$ ss -tlnp |grep 9283
LISTEN 0 5 :::9283 :::* users:(("ceph-mgr",pid=43370,fd=74))
[ceph@ceph-mon0 ~]$ sudo ceph mgr services
{
"dashboard": "https://ceph-mon0:8443/",
"prometheus": "http://ceph-mon0:9283/"
}
安裝Prometheus:
# tar -zxvf prometheus-*.tar.gz
# cd prometheus-*
# cp prometheus promtool /usr/local/bin/
# prometheus --version
prometheus, version 2.3.2 (branch: HEAD, revision: 71af5e29e815795e9dd14742ee7725682fa14b7b)
build user: [email protected]5258e0bd9cc1
build date: 20180712-14:02:52
go version: go1.10.3
# mkdir /etc/prometheus && mkdir /var/lib/prometheus
# vim /usr/lib/systemd/system/prometheus.service ###配置啟動項
[Unit]
Description=Prometheus
Documentation=https://prometheus.io
[Service]
Type=simple
WorkingDirectory=/var/lib/prometheus
EnvironmentFile=-/etc/prometheus/prometheus.yml
ExecStart=/usr/local/bin/prometheus \
--config.file /etc/prometheus/prometheus.yml \
--storage.tsdb.path /var/lib/prometheus/
[Install]
WantedBy=multi-user.target
# vim /etc/prometheus/prometheus.yml ##配置配置檔案
global:
scrape_interval: 15s
evaluation_interval: 15s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['192.168.100.116:9090']
- job_name: 'ceph'
static_configs:
- targets:
- 192.168.100.116:9283
- 192.168.100.117:9283
- 192.168.100.118:9283
# systemctl daemon-reload
# systemctl start prometheus
# systemctl status prometheus