1. 程式人生 > >在CentOS7上部署MongoDB復制集和復制集的管理維護

在CentOS7上部署MongoDB復制集和復制集的管理維護

重新 冗余 ast erro gre 能夠 mmap base enable

MongoDB復制集的概述

復制集是額外的數據副本,是跨多個服務器同步數據的過程,復制集提供了冗余並增加了數據可用性,通過復制集可以對硬件故障和中斷的服務進行恢復。

復制集工作原理

  • MongoDB的復制集至少需要兩個節點。其中一個是主節點(primary),負責處理客戶端的請求,其余都是從節點(Secondary),負責復制主節點上的數據。
  • MongoDB各個節點常見的搭配方式為:一主一從或一主多從。主節點記錄其上的所有操作到oplog中,從節點定期輪詢主節點獲取這些操作,然後對自己的數副本執行這些操作,從而保證從節點的數據與主節點一致。

復制集的特點

  • N個節點的群集
  • 任何節點可作為主節點
  • 所有寫入操作都在主節點上
  • 自動故障轉移
  • 自動恢復
MongoDB復制集部署

1.配置復制集

(1)創建數據文件和日誌文件存儲路徑

[root@localhost ~]# mkdir -p /data/mongodb/mongodb{2,3,4}
[root@localhost ~]# cd /data/mongodb/
[root@localhost mongodb]# mkdir logs
[root@localhost mongodb]# touch logs/mongodb{2,3,4}.log
[root@localhost mongodb]# cd logs/
[root@localhost logs]# ls
mongodb2.log  mongodb3.log  mongodb4.log
[root@localhost logs]# chmod 777 *.log

(2)編輯4個MongoDB實例的配置文件

先編輯Mongodb的配置文件,配置replSet參數值都為kgcrs,並復制3份,具體操作如下:

[root@localhost etc]# vim mongod.conf 
  path: /var/log/mongodb/mongod.log

# Where and how to store data.
storage:
  dbPath: /var/lib/mongo
  journal:
    enabled: true
#  engine:
#  mmapv1:
#  wiredTiger:

# how the process runs
processManagement:
  fork: true  # fork and run in background
  pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
  timeZoneInfo: /usr/share/zoneinfo

# network interfaces
net:
  port: 27017
  bindIp: 0.0.0.0  # Listen to local interface only, comment to listen on all interfaces.

#security:

#operationProfiling:

replication:
     replSetName: kgcrs    

#sharding:

## Enterprise-Only Options

#auditLog:

#snmp:

然後將mongodb2.conf中的port參數配置為27018,mongodb3.conf中的port參數配置為27019,mongodb4.conf中的port參數配置為27020。同樣也將dbpath和logpath參數修改為對應的路徑值。

(3)啟動4個MongoDB節點實列並查看進程信息

[root@localhost etc]# mongod -f /etc/mongod.conf --shutdown  //先關閉//
[root@localhost etc]# mongod -f /etc/mongod.conf //再開啟//
[root@localhost etc]# mongod -f /etc/mongod2.conf
[root@localhost etc]# mongod -f /etc/mongod3.conf 
[root@localhost etc]# mongod -f /etc/mongod4.conf 
[root@localhost etc]# netstat -ntap | grep mongod
tcp        0      0 0.0.0.0:27019           0.0.0.0:*               LISTEN      17868/mongod        
tcp        0      0 0.0.0.0:27020           0.0.0.0:*               LISTEN      17896/mongod        
tcp        0      0 0.0.0.0:27017           0.0.0.0:*               LISTEN      17116/mongod        
tcp        0      0 0.0.0.0:27018           0.0.0.0:*               LISTEN      17413/mongod

(4)配置三個節點的復制集

[root@localhost etc]# mongo
> rs.status()    //查看復制集//
{
    "info" : "run rs.initiate(...) if not yet done for the set",
    "ok" : 0,
    "errmsg" : "no replset config has been received",
    "code" : 94,
    "codeName" : "NotYetInitialized",
    "$clusterTime" : {
        "clusterTime" : Timestamp(0, 0),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
}
> cfg={"_id":"kgcrs","members":[{"_id":0,"host":"192.168.126.132:27017"},{"_id":1,"host":"192.168.126.132:27018"},{"_id":2,"host":"192.168.126.132:27019"}]}  //添加復制集//
{
    "_id" : "kgcrs",
    "members" : [
        {
            "_id" : 0,
            "host" : "192.168.126.132:27017"
        },
        {
            "_id" : 1,
            "host" : "192.168.126.132:27018"
        },
        {
            "_id" : 2,
            "host" : "192.168.126.132:27019"
        }
    ]
}
> rs.initiate(cfg)   //初始化配置時保證從節點沒有數據//

(5)查看復制集狀態

啟動復制集後,再次通過rs.status()命令查看復制集的完整狀態信息

kgcrs:SECONDARY> rs.status()
{
    "set" : "kgcrs",
    "date" : ISODate("2018-07-17T07:18:52.047Z"),
    "myState" : 1,
    "term" : NumberLong(1),
    "syncingTo" : "",
    "syncSourceHost" : "",
    "syncSourceId" : -1,
    "heartbeatIntervalMillis" : NumberLong(2000),
    "optimes" : {
        "lastCommittedOpTime" : {
            "ts" : Timestamp(1531811928, 1),
            "t" : NumberLong(1)
        },
        "readConcernMajorityOpTime" : {
            "ts" : Timestamp(1531811928, 1),
            "t" : NumberLong(1)
        },
        "appliedOpTime" : {
            "ts" : Timestamp(1531811928, 1),
            "t" : NumberLong(1)
        },
        "durableOpTime" : {
            "ts" : Timestamp(1531811928, 1),
            "t" : NumberLong(1)
        }
    },
    "members" : [
        {
            "_id" : 0,
            "name" : "192.168.126.132:27017",
            "health" : 1,
            "state" : 1,
            "stateStr" : "PRIMARY",       //主節點//
            "uptime" : 2855,
            "optime" : {
                "ts" : Timestamp(1531811928, 1),
                "t" : NumberLong(1)
            },
            "optimeDate" : ISODate("2018-07-17T07:18:48Z"),
            "syncingTo" : "",
            "syncSourceHost" : "",
            "syncSourceId" : -1,
            "infoMessage" : "could not find member to sync from",
            "electionTime" : Timestamp(1531811847, 1),
            "electionDate" : ISODate("2018-07-17T07:17:27Z"),
            "configVersion" : 1,
            "self" : true,
            "lastHeartbeatMessage" : ""
        },
        {
            "_id" : 1,
            "name" : "192.168.126.132:27018",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",     //從節點//
            "uptime" : 95,
            "optime" : {
                "ts" : Timestamp(1531811928, 1),
                "t" : NumberLong(1)
            },
            "optimeDurable" : {
                "ts" : Timestamp(1531811928, 1),
                "t" : NumberLong(1)
            },
            "optimeDate" : ISODate("2018-07-17T07:18:48Z"),
            "optimeDurableDate" : ISODate("2018-07-17T07:18:48Z"),
            "lastHeartbeat" : ISODate("2018-07-17T07:18:51.208Z"),
            "lastHeartbeatRecv" : ISODate("2018-07-17T07:18:51.720Z"),
            "pingMs" : NumberLong(0),
            "lastHeartbeatMessage" : "",
            "syncingTo" : "192.168.126.132:27017",
            "syncSourceHost" : "192.168.126.132:27017",
            "syncSourceId" : 0,
            "infoMessage" : "",
            "configVersion" : 1
        },
        {
            "_id" : 2,
            "name" : "192.168.126.132:27019",
            "health" : 1,         
            "state" : 2,             
            "stateStr" : "SECONDARY",      //從節點//
            "uptime" : 95,
            "optime" : {
                "ts" : Timestamp(1531811928, 1),
                "t" : NumberLong(1)
            },
            "optimeDurable" : {
                "ts" : Timestamp(1531811928, 1),
                "t" : NumberLong(1)
            },
            "optimeDate" : ISODate("2018-07-17T07:18:48Z"),
            "optimeDurableDate" : ISODate("2018-07-17T07:18:48Z"),
            "lastHeartbeat" : ISODate("2018-07-17T07:18:51.208Z"),
            "lastHeartbeatRecv" : ISODate("2018-07-17T07:18:51.822Z"),
            "pingMs" : NumberLong(0),
            "lastHeartbeatMessage" : "",
            "syncingTo" : "192.168.126.132:27017",
            "syncSourceHost" : "192.168.126.132:27017",
            "syncSourceId" : 0,
            "infoMessage" : "",
            "configVersion" : 1
        }
    ],
    "ok" : 1,
    "operationTime" : Timestamp(1531811928, 1),
    "$clusterTime" : {
        "clusterTime" : Timestamp(1531811928, 1),
        "signature" : {
            "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
            "keyId" : NumberLong(0)
        }
    }
}

其中,health為1代表健康,0代表宕機。state為1代表主節點,為2代表從節點。

在復制集初始化配置時要保證從節點上沒有數據

MongoDB復制集切換

MongoDB復制集可以實現群集的高可用,當其中主節點出現故障時會自動切換到其他節點。也可手動進行復制集的主從切換。

1.故障轉移切換

[root@localhost etc]# ps aux | grep mongod   //查看進程//
root      17116  1.2  5.8 1546916 58140 ?       Sl   14:31   0:51 mongod -f /etc/mongod.conf
root      17413  1.0  5.7 1445624 57444 ?       Sl   14:34   0:39 mongod -f /etc/mongod2.conf
root      17868  1.2  5.5 1446752 55032 ?       Sl   15:05   0:23 mongod -f /etc/mongod3.conf
root      17896  0.8  4.7 1037208 47552 ?       Sl   15:05   0:16 mongod -f /etc/mongod4.conf
root      18836  0.0  0.0 112676   980 pts/1    S+   15:38   0:00 grep --color=auto mongod
[root@localhost etc]# kill -9 17116   ///殺死27017進程//
[root@localhost etc]# ps aux | grep mongod
root      17413  1.0  5.7 1453820 57456 ?       Sl   14:34   0:40 mongod -f /etc/mongod2.conf
root      17868  1.2  5.5 1454948 55056 ?       Sl   15:05   0:24 mongod -f /etc/mongod3.conf
root      17896  0.8  4.7 1037208 47552 ?       Sl   15:05   0:16 mongod -f /etc/mongod4.conf
root      18843  0.0  0.0 112676   976 pts/1    R+   15:38   0:00 grep --color=auto mongod
[root@localhost etc]# mongo --port 27019
kgcrs:PRIMARY> rs.status()

    "members" : [
        {
            "_id" : 0,
            "name" : "192.168.126.132:27017",
            "health" : 0,       //宕機狀態//
            "state" : 8,
            "stateStr" : "(not reachable/healthy)",
            "uptime" : 0,
            "optime" : {
                "ts" : Timestamp(0, 0),
                "t" : NumberLong(-1)

        {
            "_id" : 1,
            "name" : "192.168.126.132:27018",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",     //從節點//
            "uptime" : 1467,
            "optime" : {
                "ts" : Timestamp(1531813296, 1),
                "t" : NumberLong(2)
            },
            "optimeDurable" : {
                "ts" : Timestamp(1531813296, 1),
                "t" : NumberLong(2)
            },

        {
            "_id" : 2,
            "name" : "192.168.126.132:27019",
            "health" : 1,
            "state" : 1,
            "stateStr" : "PRIMARY",     //主節點//
            "uptime" : 2178,
            "optime" : {
                "ts" : Timestamp(1531813296, 1),
                "t" : NumberLong(2)

}

2.手動進行主從切換

kgcrs:PRIMARY> rs.freeze(30)      //暫停30s不參與選舉
kgcrs:PRIMARY> rs.stepDown(60,30)  //交出主節點位置,維持從節點狀態不少於60秒,等待30秒使主節點和從節點日誌同步

2018-07-17T15:46:19.079+0800 E QUERY    [thread1] Error: error doing query: failed: network error while attempting to run command ‘replSetStepDown‘ on host ‘127.0.0.1:27019‘  :
DB.prototype.runCommand@src/mongo/shell/db.js:168:1
DB.prototype.adminCommand@src/mongo/shell/db.js:186:16
rs.stepDown@src/mongo/shell/utils.js:1341:12
@(shell):1:1
2018-07-17T15:46:19.082+0800 I NETWORK  [thread1] trying reconnect to 127.0.0.1:27019 (127.0.0.1) failed
2018-07-17T15:46:19.085+0800 I NETWORK  [thread1] reconnect 127.0.0.1:27019 (127.0.0.1) ok
kgcrs:SECONDARY>    //交出主節點後立馬變成從節點//
kgcrs:SECONDARY> rs.status()
    "_id" : 0,
            "name" : "192.168.126.132:27017",
            "health" : 0,      //宕機狀態//
            "state" : 8,
            "stateStr" : "(not reachable/healthy)",
            "uptime" : 0,
            "optime" : {
                "ts" : Timestamp(0, 0),
                "t" : NumberLong(-1)
            },

        {
            "_id" : 1,
            "name" : "192.168.126.132:27018",
            "health" : 1,
            "state" : 1,
            "stateStr" : "PRIMARY",       //主節點狀態//
            "uptime" : 1851,
            "optime" : {
                "ts" : Timestamp(1531813679, 1),
                "t" : NumberLong(3)

        {
            "_id" : 2,
            "name" : "192.168.126.132:27019",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",     //從節點狀態//
            "uptime" : 2563,
            "optime" : {
                "ts" : Timestamp(1531813689, 1),
                "t" : NumberLong(3)
MongoDB復制集的選舉原理

節點類型分為標準節點(host)、被動節點(passive)和仲裁節點(arbiter)。

  • 只有標準節點可能被選舉為活躍(primary)節點,有選舉權。被動節點有完整副本,不可能成為活躍節點,有選舉權。仲裁節點不復制數據,不可能成為活躍節點,只有選舉權。
  • 標準節點與被動節點的區別:priority值高者是標準節點,低者為被動節點。
  • 選舉規則是票數高者獲勝,priority是優先權為0~1000的值,相當於額外增加0~1000的票數。選舉結果:票數高者獲勝;若票數相同,數據新者獲勝。

1.配置復制集的優先級

1)重新配置4個節點的MongoDB復制集,設置兩個標準節點,一個被動節點和一個仲裁節點。

[root@localhost etc]# mongo
> cfg={"_id":"kgcrs","members":[{"_id":0,"host":"192.168.126.132:27017","priority":100},{"_id":1,"host":"192.168.126.132:27018","priority":100},{"_id":2,"host":"192.168.126.132:27019","priority":0},{"_id":3,"host":"192.168.126.132:27020","arbiterOnly":true}]}
> rs.initiate(cfg)     //重新配置//
kgcrs:SECONDARY> rs.isMaster()
{
    "hosts" : [                 //標準節點//
        "192.168.126.132:27017",
        "192.168.126.132:27018"
    ],
    "passives" : [             //被動節點//
        "192.168.126.132:27019"
    ],
    "arbiters" : [            //仲裁節點//
        "192.168.126.132:27020"

2)模擬主節點故障

如果主節點出現故障,另一個標準節點將會選舉成為新的主節點

[root@localhost etc]# mongod -f /etc/mongod.conf --shutdown   //標準節點27017//
[root@localhost etc]# mongo --port 27018   //此時會選舉第二個標準節點為主節點//
kgcrs:PRIMARY> rs.status()
    "_id" : 0,
            "name" : "192.168.126.132:27017",
            "health" : 0,      //宕機狀態//
            "state" : 8,
            "stateStr" : "(not reachable/healthy)",
            "uptime" : 0,
            "optime" : {
                "ts" : Timestamp(0, 0),
                "t" : NumberLong(-1)
    "_id" : 1,
            "name" : "192.168.126.132:27018",
            "health" : 1,
            "state" : 1,        
            "stateStr" : "PRIMARY",    //標準節點//
            "uptime" : 879,
            "optime" : {
                "ts" : Timestamp(1531817473, 1),
                "t" : NumberLong(2)
    "_id" : 2,
            "name" : "192.168.126.132:27019",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",   //被動節點//
            "uptime" : 569,
            "optime" : {
                "ts" : Timestamp(1531817473, 1),
                "t" : NumberLong(2)
        "_id" : 3,
            "name" : "192.168.126.132:27020",
            "health" : 1,
            "state" : 7,
            "stateStr" : "ARBITER",   //仲裁節點//
            "uptime" : 569,     

3)模擬所有標準節點出現故障

所有標準節點都出現故障,被動節點也不能成為主節點

[root@localhost etc]# mongod -f /etc/mongod2.conf --shutdown //關閉標準節點27018//
[root@localhost etc]# mongo --port 27019
kgcrs:SECONDARY> rs.status()

            "_id" : 0,
            "name" : "192.168.126.132:27017",
            "health" : 0,     //宕機狀態//
            "state" : 8,
            "stateStr" : "(not reachable/healthy)",
            "uptime" : 0,
            "_id" : 1,
            "name" : "192.168.126.132:27018",
            "health" : 0,      //宕機狀態//
            "state" : 8,
            "stateStr" : "(not reachable/healthy)",
            "uptime" : 0,
            "_id" : 2,
            "name" : "192.168.126.132:27019",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",   //被動節點//
            "uptime" : 1403,

            "_id" : 3,
            "name" : "192.168.126.132:27020",
            "health" : 1,
            "state" : 7,
            "stateStr" : "ARBITER",    //仲裁節點//
MongoDB復制集管理

1.配置允許在從節點讀取數據

默認MongoDB復制集的從節點不能讀取數據,可以使用rs.slaveOk()命令允許能夠在從節點讀取數據。

[root@localhost etc]# mongo --port 27017
kgcrs:SECONDARY> show dbs    //讀取不到數據庫信息//
2018-07-17T17:11:31.570+0800 E QUERY    [thread1] Error: listDatabases failed:{
    "operationTime" : Timestamp(1531818690, 1),
    "ok" : 0,
    "errmsg" : "not master and slaveOk=false",
    "code" : 13435,
    "codeName" : "NotMaste
    kgcrs:SECONDARY> rs.slaveOk()
kgcrs:SECONDARY> show dbs
admin   0.000GB
config  0.000GB
local   0.000GB

2.查看復制狀態信息

可以使用 rs.printReplicationInfo()和rs.printSlaveReplicationInfo()命令查看復制集狀態。

kgcrs:SECONDARY> rs.printReplicationInfo()
configured oplog size:   990MB
log length start to end: 2092secs (0.58hrs)
oplog first event time:  Tue Jul 17 2018 16:41:48 GMT+0800 (CST)
oplog last event time:   Tue Jul 17 2018 17:16:40 GMT+0800 (CST)
now:                     Tue Jul 17 2018 17:16:46 GMT+0800 (CST)
kgcrs:SECONDARY>  rs.printSlaveReplicationInfo()
source: 192.168.126.132:27017
    syncedTo: Tue Jul 17 2018 17:16:50 GMT+0800 (CST)
    0 secs (0 hrs) behind the primary 
source: 192.168.126.132:27019
    syncedTo: Tue Jul 17 2018 17:16:50 GMT+0800 (CST)
    0 secs (0 hrs) behind the primary

3.部署認證復制

kgcrs:PRIMARY> use admin
kgcrs:PRIMARY> db.createUser({"user":"root","pwd":"123","roles":["root"]})
[root@localhost ~]# vim /etc/mongod.conf   //分別編輯四個配置文件//
....
security:
    keyFile: /usr/bin/kgcrskey1      //驗證路徑//
    clusterAuthMode: keyFile        //驗證類型//
    [root@localhost ~]# vim /etc/mongod2.conf 
[root@localhost ~]# vim /etc/mongod3.conf 
[root@localhost ~]# vim /etc/mongod4.conf 
[root@localhost bin]# echo "kgcrs key"> kgcrskey1   //生成4個實例的密鑰文件//
[root@localhost bin]# echo "kgcrs key"> kgcrskey2
[root@localhost bin]# echo "kgcrs key"> kgcrskey3
[root@localhost bin]# echo "kgcrs key"> kgcrskey4
[root@localhost bin]# chmod 600 kgcrskey{1..4}
[root@localhost bin]# mongod -f /etc/mongod.conf   //重啟4個實例//
[root@localhost bin]# mongod -f /etc/mongod2.conf
[root@localhost bin]# mongod -f /etc/mongod3.conf
[root@localhost bin]# mongod -f /etc/mongod4.conf
[root@localhost bin]# mongo --port 27017  //進入標準節點中//
kgcrs:PRIMARY> show dbs       //無法查看數據庫//
kgcrs:PRIMARY> rs.status()   //無法查看復制集//
kgcrs:PRIMARY> use admin    //身份登錄驗證//
kgcrs:PRIMARY> db.auth("root","123")
kgcrs:PRIMARY> show dbs     //可以查看數據庫//
admin   0.000GB
config  0.000GB
local   0.000GB
kgcrs:PRIMARY> rs.status()  //可以查看復制集//
"_id" : 0,
            "name" : "192.168.126.132:27017",
            "health" : 1,
            "state" : 1,
            "stateStr" : "PRIMARY",   
            "uptime" : 411,
"_id" : 1,
            "name" : "192.168.126.132:27018",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",    
            "uptime" : 324,

            "_id" : 2,
            "name" : "192.168.126.132:27019",
            "health" : 1,
            "state" : 2,
            "stateStr" : "SECONDARY",
            "uptime" : 305,
"_id" : 3,
            "name" : "192.168.126.132:27020",
            "health" : 1,
            "state" : 7,
            "stateStr" : "ARBITER",
            "uptime" : 280,

在CentOS7上部署MongoDB復制集和復制集的管理維護