192.168.31.231 mongod shard1_1:27017
mongod shard2_1:27018
mongod config1:20000
mongs1:30000
Server B 192.168.31.232 mongod shard1_2:27017
mongod shard2_2:27018
mongod config2:20000
mongs2:30000
Server C 192.168.31.233 mongod shard1_3:27017
mongod shard2_3:27018
mongod config3:20000
mongs3:30000>
rs.initiate(config)
1.1
创建数据目录
Server A
mkdir /data/
shard1_1
mkdir /data/
shard2_1
mkdir /data/
config
Server B
mkdir /data/
shard1_2
mkdir /data/
shard2_2
mkdir /data/
config
Server C
mkdir /data/
shard1_3
mkdir /data/
shard2_3
mkdir /data/
config
1.2
配置shard1的Replica Sets
Server A
[root@localhost bin]# /usr/local/mongo/bin/mongod --shardsvr --replSet shard1 --port 27017 --dbpath /data/shard1_1 --logpath /data/shard1_1/shard1_1.log --logappend --fork
Server B
[root@localhost bin]# /usr/local/mongo/bin/mongod --shardsvr --replSet shard1 --port 27017 --dbpath /data/shard1_2 --logpath /data/shard1_2/shard1_2.log --logappend --fork
Server C
[root@localhost bin]# /usr/local/mongo/bin/mongod --shardsvr --replSet shard1 --port 27017 --dbpath /data/shard1_3 --logpath /data/shard1_3/shard1_3.log --logappend --fork
用 mongo 连接其中一台机器的 27017
端口的 mongod,初始化 Replica Sets“shard1” ,执行:
[root@localhost bin]# ./mongo --port 27017
MongoDB shell version: 1.8.1
connecting to: 127.0.0.1:27017/
test
> config = {_id:
‘shard1‘, members: [
... {_id: 0, host: ‘192.168.31.231:27017‘},
... {_id: 1, host:
‘192.168.31.232:27017‘},
... {_id: 2, host:
‘192.168.31.233:27017‘,arbiterOnly:true}]
... }
{
"_id" :
"shard1",
"members" : [
{
"_id" : 0,
"host" :
"192.168.31.231:27017"
},
{
"_id" : 1
,
"host" :
"192.168.31.232:27017"
},
{
"_id" : 2
,
"host" :
"192.168.31.233:27017",
"arbiterOnly":true
}
]
}
>
rs.initiate(config)
{
"info" :
"Config now saved locally. Should come online in about a minute.",
"ok" : 1
}
1
.3配置 shard2 所用到的 Replica Sets
Server A
[root@localhost bin]# /usr/local/mongo/bin/mongod --shardsvr --replSet shard2 --port 27018 --dbpath /data/shard2_1 --logpath /data/shard2_1/shard2_1.log --logappend --fork
Server B
[root@localhost bin]# /usr/local/mongo/bin/mongod --shardsvr --replSet shard2 --port 27018 --dbpath /data/shard2_2 --logpath /data/shard2_2/shard2_2.log --logappend --fork
Server C
[root@localhost bin]# /usr/local/mongo/bin/mongod --shardsvr --replSet shard2 --port 27018 --dbpath /data/shard2_3 --logpath /data/shard2_3/shard2_3.log --logappend --fork
用 mongo 连接其中一台机器的 27018
端口的 mongod,初始化 Replica Sets “shard2” ,执行:
[root@localhost bin]# ./mongo --port 27018
MongoDB shell version: 1.8.1
connecting to: 127.0.0.1:27018/
test
> config = {_id:
‘shard2‘, members: [
... {_id: 0, host: ‘192.168.31.231:27018‘},
... {_id: 1, host:
‘192.168.31.232:27018‘},
... {_id: 2, host:
‘192.168.31.233:27018‘,arbiterOnly:true}]
... }
{
"_id" :
"shard2",
"members" : [
{
"_id" : 0,
"host" :
"192.168.31.231:27018"
},
{
"_id" : 1
,
"host" :
"192.168.31.232:27018"
},
{
"_id" : 2
,
"host" :
"192.168.31.233:27018",
"arbiterOnly":true
}
]
}
>
rs.initiate(config)
{ "ok" : 1
}
1.4 配置 3
台 台 Config Server
在 Server A 、B 、C 上 执行:
/usr/local/mongo/bin/mongod --configsvr --dbpath /data/config --port 20000 --logpath /data/config/config.log --logappend --
fork
23.4 配置 3
台 台 Route Process
在 在 Server A 、B 、C 上 执行:
/usr/local/mongo/bin/mongos --configdb 192.168.31.231:20000,192.168.31.232:20000,192.168.31.233:20000 --port 30000 --chunkSize 1 --logpath /data/mongos.log --logappend --
fork
[root@mongo1 bin]# netstat -tpln|grep mongo
tcp 0 0 0.0.0.0:30000 0.0.0.0:* LISTEN 1573/
mongos
tcp 0 0 0.0.0.0:20000 0.0.0.0:* LISTEN 1375/
mongod
tcp 0 0 0.0.0.0:27017 0.0.0.0:* LISTEN 1061/
mongod
tcp 0 0 0.0.0.0:27018 0.0.0.0:* LISTEN 1211/
mongod
[root@mongo1 bin]# ps aux|grep mongo
root 1061 2.0 4.1 3198136 41920 ? Sl 21:02 0:35 /usr/local/mongo/bin/mongod --shardsvr --replSet shard1 --port 27017 --dbpath /data/shard1_1 --logpath /data/shard1_1/shard1_1.log --logappend --
fork
root 1211 2.5 7.6 3202220 77716 ? Sl 21:09 0:33 /usr/local/mongo/bin/mongod --shardsvr --replSet shard2 --port 27018 --dbpath /data/shard2_1 --logpath /data/shard2_1/shard2_1.log --logappend --
fork
root 1375 1.3 3.7 487476 38484 ? Sl 21:17 0:12 /usr/local/mongo/bin/mongod --configsvr --dbpath /data/config --port 20000 --logpath /data/config/config.log --logappend --
fork
root 1573 0.8 0.7 240152 7320 ? Sl 21:28 0:02 /usr/local/mongo/bin/mongos --configdb 192.168.31.231:20000,192.168.31.232:20000,192.168.31.233:20000 --port 30000 --chunkSize 1 --logpath /data/mongos.log --logappend --
fork
root 1794 0.0 0.0 103244 848 pts/1 S+ 21:31 0:00
grep mongo
1
.5配置 Shard Cluster
连接到其中一台机器的端口 30000
的 mongos 进程,并切换到 admin 数据库做以下配置
[root@localhost bin]# ./mongo --port 30000
MongoDB shell version: 1.8.1
connecting to: 127.0.0.1:30000/
test
>
use admin
switched to db admin
>db.runCommand({addshard:
"shard1/192.168.31.231:27017,192.168.31.232:27017,192.168.31.233:27017"});
{ "shardAdded" :
"shard1",
"ok" : 1
}
>db.runCommand({addshard:
"shard2/192.168.31.231:27018,192.168.31.232:27018,192.168.31.233:27018"});
{ "shardAdded" :
"shard2",
"ok" : 1
}
>
进入mongos进程config库可以看到目前分片的情况:
mongos>
use config
switched to db config
mongos>
db.shards.find()
{ "_id" :
"shard1",
"host" :
"shard1/192.168.31.231:27017,192.168.31.232:27017,192.168.31.233:27017" }
{ "_id" :
"shard2",
"host" :
"shard2/192.168.31.231:27018,192.168.31.232:27018,192.168.31.233:27018" }
mongos>
1
.6激活数据库及集合的分片
db.runCommand({ enablesharding:"test" })
db.runCommand({ shardcollection: "test.users", key: { _id:1
}})
1
.7验证 Sharding 正常工作
连接到其中一台机器的端口 30000
的 mongos 进程,并切换到 test 数据库,以便添加测试数
据
use test
for(var i=1;i<=20000;i++) db.users.insert({id:i,addr_1:
"Beijing",addr_2:
"Shanghai"});
db.users.stats()
{
"sharded" : true,
"paddingFactorNote" :
"paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1
,
"capped" : false,
"ns" :
"test.users",
"count" : 20000
,
"numExtents" : 10
,
"size" : 2240000
,
"storageSize" : 5586944
,
"totalIndexSize" : 670432
,
"indexSizes" : {
"_id_" : 670432
},
"avgObjSize" : 112
,
"nindexes" : 1
,
"nchunks" : 5
,
"shards" : {
"shard1" : {
"ns" :
"test.users",
"count" : 8697
,
"size" : 974064
,
"avgObjSize" : 112
,
"numExtents" : 5
,
"storageSize" : 2793472
,
"lastExtentSize" : 2097152
,
"paddingFactor" : 1
,
"paddingFactorNote" :
"paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1
,
"capped" : false,
"nindexes" : 1
,
"totalIndexSize" : 294336
,
"indexSizes" : {
"_id_" : 294336
},
"ok" : 1
,
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId(
"558aabf892f5b70479ade020")
}
},
"shard2" : {
"ns" :
"test.users",
"count" : 11303
,
"size" : 1265936
,
"avgObjSize" : 112
,
"numExtents" : 5
,
"storageSize" : 2793472
,
"lastExtentSize" : 2097152
,
"paddingFactor" : 1
,
"paddingFactorNote" :
"paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
"userFlags" : 1
,
"capped" : false,
"nindexes" : 1
,
"totalIndexSize" : 376096
,
"indexSizes" : {
"_id_" : 376096
},
"ok" : 1
,
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId(
"558aacdd8bf5d360e64002dd")
}
}
},
"ok" : 1
}
mongos>
./mongo --port 27017
shard2:PRIMARY>
use test
switched to db test
shard2:PRIMARY>
show collections;
system.indexes
users
shard2:PRIMARY>
db.users.find().count()
11303 --和db.users.stats()查看的数据条数一样 20000=11303+8697
shard2:PRIMARY>
./mongo --port 27018
shard2:PRIMARY>
use test
switched to db test
shard2:PRIMARY>
show collections;
system.indexes
users
shard1:PRIMARY>
db.users.find().count()
8697 ----
和db.users.stats()查看的数据条数一样
shard1:PRIMARY>
从SECONDARY读取数据:
shard2:SECONDARY>
show collections;
2015-06-24T21:46:11.634+0800 E QUERY Error: listCollections failed: {
"note" :
"from execCommand",
"ok" : 0,
"errmsg" :
"not master" }
查询时报错,说明SECONDARY节点不能执行查询操作。
执行db.getMongo().setSlaveOk(),从库就可以进行读操作了,可以控制从哪些分片上面可以读。
shard1:
shard2:SECONDARY>
db.getMongo().setSlaveOk()
shard2:SECONDARY>
show collections;
system.indexes
users
shard2:
shard1:SECONDARY>
db.getMongo().setSlaveOk()
shard1:SECONDARY>
show collections;
system.indexes
users
shard1:SECONDARY>
db.users.find().count();
8697
shard1:SECONDARY>
模拟故障:
[root@mongo1 bin]# ps aux|grep mongo
root 1061 1.9 8.1 3200192 82956 ? Sl 21:02 0:56 /usr/local/mongo/bin/mongod --shardsvr --replSet shard1 --port 27017 --dbpath /data/shard1_1 --logpath /data/shard1_1/shard1_1.log --logappend --
fork
root 1211 2.1 7.8 3205304 79928 ? Sl 21:09 0:52 /usr/local/mongo/bin/mongod --shardsvr --replSet shard2 --port 27018 --dbpath /data/shard2_1 --logpath /data/shard2_1/shard2_1.log --logappend --
fork
root 1375 1.8 4.1 489528 42180 ? Sl 21:17 0:37 /usr/local/mongo/bin/mongod --configsvr --dbpath /data/config --port 20000 --logpath /data/config/config.log --logappend --
fork
root 1573 0.9 0.7 240152 7340 ? Sl 21:28 0:13 /usr/local/mongo/bin/mongos --configdb 192.168.31.231:20000,192.168.31.232:20000,192.168.31.233:20000 --port 30000 --chunkSize 1 --logpath /data/mongos.log --logappend --
fork
root 1901 0.0 1.8 721952 19132 pts/0 Sl+ 21:38 0:00 ./mongo --port 27018
root 2130 0.0 0.0 103244 848 pts/1 S+ 21:51 0:00
grep mongo
[root@mongo1 bin]# kill -2 1061 --这里不能使用kill -9 ,否则会导致丢失数据
通过rs.status()可以查看复制集的状态:
我们可以看到192.168.31.231:27017处于宕机状态,192.168.31.233
:27017接管变成了PRIMARY
shard1:PRIMARY>
rs.status()
{
"set" :
"shard1",
"date" : ISODate(
"2015-06-24T14:03:10.425Z"),
"myState" : 1
,
"members" : [
{
"_id" : 0,
"name" :
"192.168.31.231:27017",
"health" : 0,
"state" : 8
,
"stateStr" :
"(not reachable/healthy)",
"uptime" : 0,
"optime" : Timestamp(0, 0),
"optimeDate" : ISODate(
"1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate(
"2015-06-24T14:03:09.083Z"),
"lastHeartbeatRecv" : ISODate(
"2015-06-24T13:51:38.834Z"),
"pingMs" : 0,
"lastHeartbeatMessage" :
"Failed attempt to connect to 192.168.31.231:27017; couldn‘t connect to server 192.168.31.231:27017 (192.168.31.231), connection attempt failed",
"configVersion" : -1
},
{
"_id" : 1
,
"name" :
"192.168.31.232:27017",
"health" : 1
,
"state" : 2
,
"stateStr" :
"SECONDARY",
"uptime" : 3242
,
"optime" : Timestamp(1435152718, 13
),
"optimeDate" : ISODate(
"2015-06-24T13:31:58Z"),
"lastHeartbeat" : ISODate(
"2015-06-24T14:03:09.814Z"),
"lastHeartbeatRecv" : ISODate(
"2015-06-24T14:03:09.813Z"),
"pingMs" : 1
,
"configVersion" : 1
},
{
"_id" : 2
,
"name" :
"192.168.31.233:27017",
"health" : 1
,
"state" : 1
,
"stateStr" :
"PRIMARY",
"uptime" : 3603
,
"optime" : Timestamp(1435152718, 13
),
"optimeDate" : ISODate(
"2015-06-24T13:31:58Z"),
"electionTime" : Timestamp(1435153900, 1
),
"electionDate" : ISODate(
"2015-06-24T13:51:40Z"),
"configVersion" : 1
,
"self" : true
}
],
"ok" : 1
,
"$gleStats" : {
"lastOpTime" : Timestamp(0, 0),
"electionId" : ObjectId(
"558ab5ec28f234f7b5abda48")
}
}
查看分片集合信息:
mongos>
db.collections.find()
{ "_id" :
"test.users",
"lastmod" : ISODate(
"2015-06-24T13:30:19.670Z"),
"dropped" : false,
"key" : {
"_id" : 1 },
"unique" : false,
"lastmodEpoch" : ObjectId(
"558ab0eb6a881136d7047be4") }
mongodb 分片+复制集
标签: