1. 程式人生 > >Openstack 之 ceph更換故障磁盤

Openstack 之 ceph更換故障磁盤

ceph osd 更換

在實際生產使用過程中,難免會有ceph的osd硬盤損壞的時候,本文針對這種場景描述更換故障磁盤的操作,本文的操作環境是通過kolla部署的openstack,3個HA節點上各有3個osd,我們將osd.6 、osd.7 、osd.8 剔除並更換。

1、首先查看和收集相關的信息:

    查看ceph集群空間使用:
    #docker exec -it ceph_mon ceph df
    GLOBAL:
        SIZE     AVAIL     RAW USED     %RAW USED 
        584G      548G       37548M          6.27 
    POOLS:
        NAME                          ID     USED       %USED     MAX AVAIL     OBJECTS 
        rbd                           0           0         0         1017M           0 
        .rgw.root                     1        1588         0         1017M           4 
        default.rgw.control           2           0         0         1017M           8 
        default.rgw.data.root         3         670         0         1017M           2 
        default.rgw.gc                4           0         0         1017M          32 
        default.rgw.log               5           0         0         1017M         127 
        images                        6      13286M     92.89         1017M        6674 
        volumes                       7         230         0         1017M           7 
        backups                       8           0         0         1017M           0 
        vms                           9       5519M     84.44         1017M        1495 
        default.rgw.users.uid         10        209         0         1017M           2 
        default.rgw.buckets.index     11          0         0         1017M           1 
        default.rgw.buckets.data      12          0         0         1017M           1 
        
    查看OSD的分布權重:
    # docker exec -it ceph_mon ceph osd tree
    ID WEIGHT  TYPE NAME              UP/DOWN REWEIGHT PRIMARY-AFFINITY 
    -1 9.00000 root default                                             
    -2 3.00000     host 192.168.1.132                                   
     0 1.00000         osd.0               up  1.00000          1.00000 
     3 1.00000         osd.3               up  1.00000          1.00000 
     6 1.00000         osd.6               up  1.00000          1.00000 
    -3 3.00000     host 192.168.1.130                                   
     1 1.00000         osd.1               up  1.00000          1.00000 
     5 1.00000         osd.5               up  1.00000          1.00000 
     8 1.00000         osd.8               up  1.00000          1.00000 
    -4 3.00000     host 192.168.1.131                                   
     2 1.00000         osd.2               up  1.00000          1.00000 
     4 1.00000         osd.4               up  1.00000          1.00000 
     7 1.00000         osd.7               up  1.00000          1.00000 
    
    查看crushmap:
    # docker exec -it ceph_mon ceph osd getcrushmap -o /var/log/kolla/ceph/crushmap.bin
    got crush map from osdmap epoch 247
    # docker exec -it ceph_mon crushtool -d /var/log/kolla/ceph/crushmap.bin -o /var/log/kolla/ceph/crushmap
    # cd /var/lib/docker/volumes/kolla_logs/_data/ceph
    
    # more crushmap
    # begin crush map
    tunable choose_local_tries 0
    tunable choose_local_fallback_tries 0
    tunable choose_total_tries 50
    tunable chooseleaf_descend_once 1
    tunable chooseleaf_vary_r 1
    tunable straw_calc_version 1
    
    # devices
    device 0 osd.0
    device 1 osd.1
    device 2 osd.2
    device 3 osd.3
    device 4 osd.4
    device 5 osd.5
    device 6 osd.6
    device 7 osd.7
    device 8 osd.8
    
    # types
    type 0 osd
    type 1 host
    type 2 chassis
    type 3 rack
    type 4 row
    type 5 pdu
    type 6 pod
    type 7 room
    type 8 datacenter
    type 9 region
    type 10 root
    
    # buckets
    host 192.168.1.132 {
    id -2# do not change unnecessarily
    # weight 3.000
    alg straw
    hash 0# rjenkins1
    item osd.0 weight 1.000
    item osd.3 weight 1.000
    item osd.6 weight 1.000
    }
    host 192.168.1.130 {
    id -3# do not change unnecessarily
    # weight 3.000
    alg straw
    hash 0# rjenkins1
    item osd.1 weight 1.000
    item osd.5 weight 1.000
    item osd.8 weight 1.000
    }
    host 192.168.1.131 {
    id -4# do not change unnecessarily
    # weight 3.000
    alg straw
    hash 0# rjenkins1
    item osd.2 weight 1.000
    item osd.4 weight 1.000
    item osd.7 weight 1.000
    }
    root default {
    id -1# do not change unnecessarily
    # weight 9.000
    alg straw
    hash 0# rjenkins1
    item 192.168.1.132 weight 3.000
    item 192.168.1.130 weight 3.000
    item 192.168.1.131 weight 3.000
    }
    
    # rules
    rule replicated_ruleset {
    ruleset 0
    type replicated
    min_size 1
    max_size 10
    step take default
    step chooseleaf firstn 0 type host
    step emit
    }
    rule disks {
    ruleset 1
    type replicated
    min_size 1
    max_size 10
    step take default
    step chooseleaf firstn 0 type host
    step emit
    }
    
    # end crush map
    
    查看磁盤掛載情況:
    [root@control01 ceph]# df -h
    Filesystem               Size  Used Avail Use% Mounted on
    /dev/mapper/centos-root   88G   14G   74G  16% /
    devtmpfs                 9.8G     0  9.8G   0% /dev
    tmpfs                    9.8G     0  9.8G   0% /dev/shm
    tmpfs                    9.8G   20M  9.8G   1% /run
    tmpfs                    9.8G     0  9.8G   0% /sys/fs/cgroup
    /dev/sdd1                5.0G  4.8G  227M  96% /var/lib/ceph/osd/a007c495-e2e9-4a08-a565-83dfef1df30d
    /dev/sdb1                 95G  4.1G   91G   5% /var/lib/ceph/osd/b7267089-cae5-4e28-b9ff-a37c373c0d34
    /dev/sdc1                 95G  4.6G   91G   5% /var/lib/ceph/osd/c35eea02-f07d-4557-acdb-7280a571aaf9
    檢查健康狀態:
    # docker exec -it ceph_mon ceph health detail
    HEALTH_ERR 1 full osd(s); 2 near full osd(s); full flag(s) set
    osd.8 is full at 95%
    osd.6 is near full at 90%
    osd.7 is near full at 85%
    full flag(s) set
    
    # docker exec -it ceph_mon ceph -s
        cluster 33932e16-1909-4d68-b085-3c01d0432adc
         health HEALTH_ERR
                1 full osd(s)
                2 near full osd(s)
                full flag(s) set
         monmap e2: 3 mons at {192.168.1.130=192.168.1.130:6789/0,192.168.1.131=192.168.1.131:6789/0,192.168.1.132=192.168.1.132:6789/0}
                election epoch 60, quorum 0,1,2 192.168.1.130,192.168.1.131,192.168.1.132
         osdmap e247: 9 osds: 9 up, 9 in
                flags nearfull,full,sortbitwise,require_jewel_osds
          pgmap v179945: 640 pgs, 13 pools, 18806 MB data, 8353 objects
                37548 MB used, 548 GB / 584 GB avail
                     640 active+clean
    查看文件系統配置文件fstab:
    # cat /etc/fstab
    
    #
    # /etc/fstab
    # Created by anaconda on Mon Jan 22 13:32:42 2018
    #
    # Accessible filesystems, by reference, are maintained under '/dev/disk'
    # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
    #
    /dev/mapper/centos-root /                       xfs     defaults        0 0
    UUID=c2c088f8-a530-4099-a9ef-0fd41508d304 /boot                   xfs     defaults        0 0
    UUID=7FA3-61ED          /boot/efi               vfat    defaults,uid=0,gid=0,umask=0077,shortname=winnt 0 0
    UUID=b7267089-cae5-4e28-b9ff-a37c373c0d34 /var/lib/ceph/osd/b7267089-cae5-4e28-b9ff-a37c373c0d34 xfs defaults,noatime 0 0
    UUID=c35eea02-f07d-4557-acdb-7280a571aaf9 /var/lib/ceph/osd/c35eea02-f07d-4557-acdb-7280a571aaf9 xfs defaults,noatime 0 0
    UUID=a007c495-e2e9-4a08-a565-83dfef1df30d /var/lib/ceph/osd/a007c495-e2e9-4a08-a565-83dfef1df30d xfs defaults,noatime 0 0
    
    查看磁盤分區與掛在文件系統對應關系:
    # lsblk|grep osd
    ├─sdb1                                                                                            8:17   0   95G  0 part /var/lib/ceph/osd/b7267089-cae5-4e28-b9ff-a37c373c0d34
    ├─sdc1                                                                                            8:33   0   95G  0 part /var/lib/ceph/osd/c35eea02-f07d-4557-acdb-7280a571aaf9
    ├─sdd1                                                                                            8:49   0    5G  0 part /var/lib/ceph/osd/a007c495-e2e9-4a08-a565-83dfef1df30d


2.執行reweight,讓這個osd上的數據遷移到其他osd,同步可以使用ceph -w查看實時數據遷移情況 :

# docker exec -it ceph_mon ceph osd crush reweight osd.8 0.0  //同樣的命令重平衡osd.7、osd.6 權重為0


3.剔除osd出集群:

# docker exec -it ceph_mon ceph osd out osd.8   //同樣的命令out掉osd.7、osd.6
# docker exec -it ceph_mon ceph osd crush remove osd.8  //同樣的命令crush remove掉osd.7、osd.6
# docker exec -it ceph_mon ceph auth del osd.8 //同樣的命令auth del掉osd.7、osd.6
# docker stop ceph_osd_8    //在另外2臺服務器,同樣的命令stop掉ceph_osd_7、ceph_osd_6
# docker exec -it ceph_mon ceph osd rm osd.8  //同樣的命令rm掉osd.7、osd.6

4.整個操作過程,可以使用 # docker exec -it ceph_mon ceph -w 查看實時的ceph集群數據變化,或者使用# docker exec -it ceph_mon ceph -s 查看總體情況。


# docker exec -it ceph_mon ceph -s
    cluster 33932e16-1909-4d68-b085-3c01d0432adc
     health HEALTH_OK
     monmap e2: 3 mons at {192.168.1.130=192.168.1.130:6789/0,192.168.1.131=192.168.1.131:6789/0,192.168.1.132=192.168.1.132:6789/0}
            election epoch 60, quorum 0,1,2 192.168.1.130,192.168.1.131,192.168.1.132
     osdmap e297: 6 osds: 6 up, 6 in
            flags sortbitwise,require_jewel_osds
      pgmap v181612: 640 pgs, 13 pools, 14884 MB data, 7296 objects
            30025 MB used, 540 GB / 569 GB avail
                 640 active+clean


如上所示,說明3個磁盤剔除成功,整個ceph集群數據pg分布到osd重新獲得平衡。

5.替換磁盤後,重新使用kolla部署工具添加osd磁盤,可以參考我的另一篇博客“Openstack 之 kolla 部署ceph”中的後半部分,擴容磁盤,簡單描述如下:

直接在宿主機上增加硬盤,然後對每一塊硬盤打標簽,然後重新執行deploy即可,步驟如下:

1)..OSD盤打標:


12 parted /dev/sdd -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP 1 -1

註意,整塊盤作為一個OSD只能用標簽KOLLA_CEPH_OSD_BOOTSTRAP ,如果日誌分區單獨使用SSD固態硬盤的分區,則使用不同標簽,比如/dev/sdb 標簽為KOLLA_CEPH_OSD_BOOTSTRAP_SDC ,日誌分區/dev/sdh1 標簽為:KOLLA_CEPH_OSD_BOOTSTRAP_SDC_J

2).部署前的檢查:

1 tools/kolla_ansible prechecks -i 3node

註意:3node是inventory文件,根據實際環境替換。

檢查報錯6780端口占用,查詢這個端口是ceph_rgw容器占用,臨時將這個容器關閉: docker stop ceph_rgw

3).部署:

1 tools/kolla_ansible deploy -i 3node


註意:3node是inventory文件,根據實際環境替換。

部署完成後,將上面關閉的容器ceph_rgw啟動:

1 docker start ceph_rgw




Openstack 之 ceph更換故障磁盤