`
m635674608
  • 浏览: 5042884 次
  • 性别: Icon_minigender_1
  • 来自: 南京
社区版块
存档分类
最新评论

基于docker ceph环境快速搭建体验

 
阅读更多
http://blog.csdn.net/asd05txffh/article/details/51159416

为了验证Docker挂载共享存储卷,而需要安装一个ceph的环境。由于网络延迟等原因,基于官方知道文档基于rpm的部署方式容器中途安装失败。幸而,ceph的仓库中提供了一套将ceph所有的组件都部署于一个容器镜像中的all-in-one镜像,使用起来非常的方便。但是,demo镜像只用用来体验和做简单的使用测试,不能用户任何生产环境。生产环境的部署,ceph的仓库下同时包含了完整组件的镜像以及ansible自动化部署脚本。

pull ceph/demo镜像
[root@localhost vagrant]# docker  pull ceph/demo
Using default tag: latest
latest: Pulling from ceph/demo
759d6771041e: Pull complete 
8836b825667b: Pull complete 
c2f5e51744e6: Pull complete 
a3ed95caeb02: Pull complete 
2a3d79c9e3c9: Pull complete 
f78d0723009e: Pull complete 
fe5a2cfd20a6: Pull complete 
6711b6d1f909: Pull complete 
2d0412ff2a85: Pull complete 
Digest: sha256:dedc1e8266bdd376b831c543d309fce1bcd068d6b775513fab2ae639fb8630e6
Status: Downloaded newer image for ceph/demo:latest
1
2
3
4
5
6
7
8
9
10
11
12
13
14
1
2
3
4
5
6
7
8
9
10
11
12
13
14
2 启动ceph/demo

docker run -d --net=host -v /etc/ceph:/etc/ceph --name=ceph -e MON_IP=172.28.0.2 -e CEPH_NETWORK=172.28.0.0/24 ceph/demo

[root@localhost ceph]# docker  ps
CONTAINER ID        IMAGE               COMMAND             CREATED              STATUS              PORTS               NAMES
fa4d282fd3fe        ceph/demo           "/entrypoint.sh"    About a minute ago   Up About a minute                       ceph
[root@localhost ceph]# 

root@localhost:/# ps -ef
UID        PID  PPID  C STIME TTY          TIME CMD
root         1     0  0 01:52 ?        00:00:00 /usr/bin/python /usr/bin/ceph --cluster ceph -w
root        21     1  0 01:52 ?        00:00:00 ceph-mon --cluster ceph -i localhost --public-addr 172.28.0.2
root       180     1  0 01:52 ?        00:00:00 ceph-osd --cluster ceph -i 0 -k /var/lib/ceph/osd/ceph-0/keyring
root       390     1  0 01:52 ?        00:00:00 ceph-mds --cluster ceph -i 0
root       444     1  0 01:52 ?        00:00:00 radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway -k /var/lib/ceph/radosgw/localhost/keyring --rgw-socket-path= --rgw-frontends=civetweb port=80
root       445     1  0 01:52 ?        00:00:00 /usr/bin/python /usr/bin/ceph-rest-api --cluster ceph -n client.admin
root       713     0  0 01:54 ?        00:00:00 bash
root       727   713  0 01:54 ?        00:00:00 ps -ef
root@localhost:/# 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
3 在集群中其他机器中安装ceph-client

#安装rpm包
yum -y install ceph-common

#拷贝ceph/demon /etc/ceph/ceph.client.admin.keyring到client
scp  {ceph-mon}:/etc/ceph/ceph.client.admin.keyring {ceph-cleint}@/etc/ceph

#修改ceph-client上ceph.conf 
vi /etc/ceph/ceph.conf
[global]
mon host = 172.28.0.2

#查询ceph状态
[root@node2 ceph]# ceph -s
    cluster 4f677727-ef6d-456c-9318-69711d5896a9
     health HEALTH_OK
     monmap e1: 1 mons at {localhost=172.28.0.2:6789/0}
            election epoch 2, quorum 0 localhost
     mdsmap e5: 1/1/1 up {0=0=up:active}
     osdmap e16: 1 osds: 1 up, 1 in
            flags sortbitwise
      pgmap v19: 128 pgs, 9 pools, 2808 bytes data, 190 objects
            4297 MB used, 4272 MB / 8570 MB avail
                 128 active+clean
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
4 在ceph中创建一个块设备

[root@node2 ceph]# rbd create --size 4096 docker_data
[root@node2 ceph]# rbd ls
docker_data
[root@node2 ceph]# 
1
2
3
4
1
2
3
4
5 映射到本地快设备

[root@node2 ceph]# rbd map docker_data --name client.admin 
[root@node2 ceph]# rbd showmapped
id pool image       snap device    
0  rbd  docker_data -    /dev/rbd0 
[root@node2 ceph]# 
1
2
3
4
5
1
2
3
4
5
6 格式化

[root@node2 ceph]# mkfs.ext4  /dev/rbd0 
mke2fs 1.42.9 (28-Dec-2013)
Discarding device blocks: 完成                            
文件系统标签=
OS type: Linux
块大小=4096 (log=2)
分块大小=4096 (log=2)
Stride=1024 blocks, Stripe width=1024 blocks
262144 inodes, 1048576 blocks
52428 blocks (5.00%) reserved for the super user
第一个数据块=0
Maximum filesystem blocks=1073741824
32 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks: 
    32768, 98304, 163840, 229376, 294912, 819200, 884736

Allocating group tables: 完成                            
正在写入inode表: 完成                            
Creating journal (32768 blocks): 完成
Writing superblocks and filesystem accounting information: 完成 

[root@node2 ceph]#
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
7 挂载到目录

[root@node2 ceph]# mount /dev/rbd0 /mnt/data
[root@node2 ceph]# mount |grep /mnt/data
/dev/rbd0 on /mnt/data type ext4 (rw,relatime,stripe=1024,data=ordered)
[root@node2 ceph]# 
1
2
3
4
1
2
3
4
8 docker volume使用共享存储

docker run -v /mnt/data:/var/lib/mysql --name some-mariadb -e MYSQL_ROOT_PASSWORD=ffh-db -d mariadb
1
1
[1]https://github.com/ceph

 

分享到:
评论

相关推荐

Global site tag (gtag.js) - Google Analytics