1. 程式人生 > >Ceph-10.2.10安裝配置

Ceph-10.2.10安裝配置

Ceph

Ceph-10.2.10安裝配置

操作系統

CentOS 7.2.1511

主機規劃

192.168.0.106 ceph-node1 #ceph-deploy
192.168.0.107 ceph-node2
192.168.0.108 ceph-node3

磁盤規劃

/dev/sda system
/dev/sdb osd
/dev/sdc osd

關閉防火墻

# setenforce 0
# sed -i s‘/SELINUX=enforcing/SELINUX=disabled‘/g /etc/selinux/config
# yum -y install epel-release

ssh免密登錄
ceph-node1

# ssh-keygen -t rsa
# ssh-copy-id ceph-node1
# ssh-copy-id ceph-node2
# ssh-copy-id ceph-node3

時間同步

# yum -y install ntp ntpdate
# ntpdate cn.pool.ntp.org

使用國內源

# export CEPH_DEPLOY_REPO_URL=http://mirrors.163.com/ceph/rpm-jewel/el7
# export CEPH_DEPLOY_GPG_URL=http://mirrors.163.com/ceph/keys/release.asc

安裝ceph-deploy

# yum -y ceph-deploy

快速部署

# mkdir my-cluster
# cd my-cluster

# ceph-deploy new ceph-node1

# yum -y update ceph-deploy

# ceph-deploy install ceph-node1 ceph-node2 ceph-node3

# ceph-deploy mon create-initial

# ceph-deploy admin ceph-node1 ceph-node2 ceph-node3

查看磁盤
# ceph-deploy disk list ceph-node1

# ceph-deploy osd create ceph-node1:/dev/sdb ceph-node1:/dev/sdc

# ceph-deploy osd create ceph-node2:/dev/sdb ceph-node2:/dev/sdc

# ceph-deploy osd create ceph-node3:/dev/sdb ceph-node3:/dev/sdc

# ceph osd tree
[root@ceph-node1 ~]# ceph osd tree
ID WEIGHT  TYPE NAME           UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1 0.08752 root default                                          
-2 0.02917     host ceph-node1                                   
 0 0.01459         osd.0            up  1.00000          1.00000 
 1 0.01459         osd.1            up  1.00000          1.00000 
-3 0.02917     host ceph-node2                                   
 2 0.01459         osd.2            up  1.00000          1.00000 
 3 0.01459         osd.3            up  1.00000          1.00000 
-4 0.02917     host ceph-node3                                   
 4 0.01459         osd.4            up  1.00000          1.00000 
 5 0.01459         osd.5            up  1.00000          1.00000 

# chmod +r /etc/ceph/ceph.client.admin.keyring

檢查健康狀況
# ceph health
[root@ceph-node1 ~]# ceph health
HEALTH_OK

創建塊設備
# rbd create rbd1 --size 10240 

查看創建的rbd
#rbd list
[root@ceph-node1 ~]# rbd list
rbd1

查看rbd細節
# rbd --image rbd1 info
[root@ceph-node1 ~]# rbd --image rbd1 info
rbd image ‘rbd1‘:
    size 10240 MB in 2560 objects
    order 22 (4096 kB objects)
    block_name_prefix: rbd_data.10372ae8944a
    format: 2
    features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
    flags: 

Ceph-10.2.10安裝配置