-3 集群配置如下: 主机 IP 功能 ceph-1 192.168.57.222 deploy、mon1、osd3 ceph-2 192.168.57.223 mon1、 osd3 ceph- [ceph_deploy.new][DEBUG ] Resolving host ceph-3 [ceph_deploy.new][DEBUG ] Monitor ceph-3 at 192.168.57.224 [ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph-1', 'ceph-2','ceph-3'] [ceph_deploy.new -3=192.168.57.224:6789/0} electionepoch 6, quorum 0,1,2 ceph-1,ceph-2,ceph-3 osdmap -3:/dev/sdb ceph-3:/dev/sdcceph-3:/dev/sdd --zap-disk ceph-deploy --overwrite-conf osd activate ceph
配置基础环境 # 配置主机名 hostnamectl set-hostname ceph-1 hostnamectl set-hostname ceph-2 hostnamectl set-hostname ceph i /etc/ceph/ceph.pub root@ceph-3 创建mon和mgr # 创建mon和mgr ceph orch host add ceph-2 ceph orch host add ceph [root@ceph-1 ~]# [root@ceph-1 ~]# ceph orch apply mgr --placement="3 ceph-1 ceph-2 ceph-3" Scheduled add osd ceph-2:/dev/sdb Created osd(s) 1 on host 'ceph-2' [root@ceph-1 ~]# ceph orch daemon add osd ceph 45s ago 51s 14.1M - 17.2.5 cc65afd6173a 940a019d4c75 mds.cephfs.ceph-3.afnozf ceph
本地源,详细信息如下: 集群配置如下: 主机 IP 功能 ceph-1 192.168.56.100 deploy、mon1、osd3 ceph-2 192.168.56.101 mon1、 osd3 ceph 在ceph-1/ceph-2/ceph-3三个节点上: 修改/etc/ntp.conf,注释掉四行server,添加一行server指向ceph-admin: 重启ntp服务并观察client是否正确连接到 -3:/dev/sdb:/dev/sde1ceph-3:/dev/sdc:/dev/sde2 ceph-3:/dev/sdd:/dev/sde3 osd activate后面的参数格式为:HOSTNAME -3:/dev/sdc1 ceph-3:/dev/sdd1 我在部署的时候出了个小问题,有一个OSD没成功(待所有OSD部署完毕后,再重新部署问题OSD即可解决),如果不出意外的话,集群状态应该如下: -3=192.168.56.103:6789/0} electionepoch 4, quorum 0,1,2 ceph-1,ceph-2,ceph-3 osdmap
主机名 IP 网卡模式 内存 系统盘 数据盘 ceph-1 192.168.200.43 NAT 2G 100G 20G ceph-2 192.168.200.44 NAT 2G 100G 20G ceph hostnamectl set-hostname ceph-2 [root@ceph-2 ~]# bash [root@localhost ~]# hostnamectl set-hostname ceph root@ceph-1 ~]# ssh-copy-id ceph-1 [root@ceph-1 ~]# ssh-copy-id ceph-2 [root@ceph-1 ~]# ssh-copy-id ceph $ ceph-deploy admin ceph-1 ceph-2 ceph-3 #各节点添加r读权限 $ chmod +r /etc/ceph/ceph.client.admin.keyring 4 -3 --data /dev/sdb #查看osd状态 [root@ceph-1 ceph]# ceph-deploy osd list ceph-1 ceph-2 ceph-3 [root@ceph
6% /var/lib/ceph/osd/ceph-0/dev/sdc1 xfs 102M 5.6M 96M 6% /var/lib/ceph/osd/ceph .3updated[root@server3 ~]# ceph osd rm osd.3removed osd.3[root@server3 ~]# umount /var/lib/ceph/osd/ceph
默认会进行文件三份的冗余来保障文件不易丢失,服务器IP地址如下: PS:这里使用的是Centos7的系统版本 192.168.3.101 ceph-1 192.168.3.102 ceph-2 192.168.3.103 ceph 4420-a510-287f4ced25de health: HEALTH_OK services: mon: 3 daemons, quorum ceph-1,ceph-2,ceph health: HEALTH_WARN no active mgr services: mon: 3 daemons, quorum ceph-1,ceph-2,ceph 4420-a510-287f4ced25de health: HEALTH_OK services: mon: 3 daemons, quorum ceph-1,ceph-2,ceph
ioengine=libaio -bs=4M -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/var/lib/ceph/osd/ceph ioengine=libaio -bs=4M -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/var/lib/ceph/osd/ceph ioengine=libaio -bs=4M -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/var/lib/ceph/osd/ceph ioengine=libaio -bs=4M -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/var/lib/ceph/osd/ceph ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/var/lib/ceph/osd/ceph
parted -s /dev/nvme0n1 mkpart xfs 85.9GB 107GB[root@mar-mon01 ~]# ceph日志盘个别osd没有挂载[root@mar-mon01 ceph /dev/sdx4 other, ext4, mounted on /var/logYou have new mail in /var/spool/mail/root[root@mar-mon01 ceph
那么这样的RULE就形如: take(root) ============> [default] 注意是根节点的名字 choose(3, host) ========> [ceph-1, ceph-2, ceph rjenkins1 item osd.3 weight 1.990 item osd.4 weight 1.990 item osd.5 weight 1.990 } host ceph straw hash 0 # rjenkins1 item ceph-2 weight 5.970 item ceph-1 weight 5.970 item ceph
BUILD | spawning | NOSTATE | private=172.16.1.48 | | 56d6a3a8-e6c4-4860-bd72-2e0aa0fa55f2 | ceph
[root@centos7 osd]# cd ceph-3/current/5.14_head/ 4.
1.00000 1.00000 5 1.98999 osd.5 up 1.00000 1.00000 -3 5.96997 host ceph