# 修改文件 /etc/sysconfig/network-scripts/ifcfg-eth0
# 注意 ifcfg-eth0 根据网卡的不同,也可能是其他名称
sudo sed -i 's/ONBOOT="no"/ONBOOT="yes"/g' /etc/sysconfig/network-scripts/ifcfg-eth0
echo -e 'nDNS1="114.114.114.114"' >> /etc/sysconfig/network-scripts/ifcfg-eth0
service network restart
cp -a /pos.po /pos.po.bakwget -O /pos.po .repoyum clean all
yum makecache#启用 epelyum install epel-release -ycp -a /pos.po /pos.po.backup
mv /pos.po /pos.po.backupsed -i "s/#baseurl/baseurl/g" /pos.po
sed -i "s/metalink/#metalink/g" /pos.po
sed -i "s@https?://download.fedoraproject/pub@@g" /pos.poyum update -y
systemctl stop firewalld.service
systemctl disable firewalld.service
systemctl status firewalld.service
sudo yum remove docker docker-common docker-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2wget -O /pos.po .reposudo sed -i 's+download.docker+repo.huaweicloud/docker-ce+' /pos.posudo yum makecache fast
sudo yum install docker-ce -y
sudo docker login --username=漱石者枕夏目 registry-hangzhou.aliyuncssudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{"registry-mirrors": [""]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
yum install epel-release -y
yum install python36 -ycurl -O .py
/usr/bin/python3.6 get-pip.py
yum -y install chronyvim /f
###################
allow 192.168.141.0/24
###################
systemctl enable chronyd
systemctl restart chronydyum -y install chrony
echo "server 192.168.141.20 iburst" > /f
systemctl enable chronyd
systemctl restart chronyd
chronyc sources
ssh-keygen -t rsa -C "limengkai@zettakit"ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@192.168.141.23ssh-copy-id -f -i /root/.ssh/id_rsa.pub root@192.168.141.24
cephadm 是容器搭建 ceph 的 一种新方式
按照ceph 官网 推荐,大于 24 g
curl --silent --remote-name --location +x cephadm./cephadm --help./cephadm add-repo --release octopus
./cephadm installwhich cephadm/usr/sbin/cephadm--docker
mkdir -p /etc/ceph# cephadm bootstrap --mon-ip <you_pmon_ip>
cephadm --docker bootstrap --mon-ip 192.168.141.20
cephadm shell cephadm shell -- ceph -s cephadm add-repo --release octopus
cephadm install ceph-common# than you can diracet useceph -v
ceph status
To add each new host to the cluster, perform two steps:
Install the cluster’s public SSH key in the new host’s root user’s authorized_keys file:
ssh-copy-id -f -i /etc/ceph/ceph.pub root@*<new-host>*
For example:
ssh-copy-id -f -i /etc/ceph/ceph.pub root@host2
ssh-copy-id -f -i /etc/ceph/ceph.pub root@host3
Tell Ceph that the new node is part of the cluster:
ceph orch host add newhost
For example:
ceph orch host add host2
ceph orch host add host3
ssh-copy-id -f -i /etc/ceph/ceph.pub root@192.168.141.23
ssh-copy-id -f -i /etc/ceph/ceph.pub root@192.168.141.24ceph orch host add host-192-168-141-23
ceph orch host add host-192-168-141-24
ceph config set mon public_network *<mon-cidr-network>*
ceph config set mon public_network 192.168.0.0/16
ceph orch device lsceph orch device zap host-192-168-141-20 /dev/sdb --force
ceph orch device zap host-192-168-141-23 /dev/sdb --force
ceph orch device zap host-192-168-141-24 /dev/sdb --forceceph orch daemon add osd host-192-168-141-20:/dev/sdb
ceph orch daemon add osd host-192-168-141-23:/dev/sdb
ceph orch daemon add osd host-192-168-141-24:/dev/sdbceph orch device lsorch device ls [<hostname>...] [plain|json|json-pretty|yaml] [--refresh] [--wide] List devices on a host
orch device zap <hostname> <path> [--force] Zap (erase!) a device so it can be re-used# 成功创建三个 osd ceph 设备
[root@host-192-168-141-20 ~]# ceph orch device ls
Hostname Path Type Serial Size Health Ident Fault Available
host-192-168-141-20 /dev/sdb hdd 32007f50-1e38-4a84-8f63-1e69c674f43d 53.6G Unknown N/A N/A No
host-192-168-141-23 /dev/sdb hdd 0652ea45-425d-40cd-bd20-b2e50123e9bf 32.2G Unknown N/A N/A No
host-192-168-141-24 /dev/sdb hdd 32ec781d-cea2-4f67-b1e1-78e7a1104940 32.2G Unknown N/A N/A No
ceph osd pool create cephfs_data 32
ceph osd pool create cephfs_metadata 32ceph fs new my_fs cephfs_metadata cephfs_data ceph fs ls其中:<pg_num> = 128 ,关于创建存储池确定 pg_num 取值是强制性的,因为不能自动计算。下面是几个常用的值:*少于 5 个 OSD 时可把 pg_num 设置为 128
ceph orch apply mds *<fs-name>* --placement="*<num-daemons>* [*<host1>* ...]"ceph fs ls
output: name: my_fs, metadata pool: cephfs_metadata, data pools: [cephfs_data ]ceph orch apply mds my_fs --placement="host-192-168-141-20 host-192-168-141-23 host-192-168-141-24"
output: _ph mds stat
output: my_fs:1 {0=my_fs.host-192-168-141-20.tfeucj=up:active} 2 up:standby
radosgw-admin realm create --rgw-realm=my_realm --default
radosgw-admin zonegroup create --rgw-zonegroup=my_zonegroup --master --default
radosgw-admin zone create --rgw-zonegroup=my_zonegroup --rgw-zone=my_zone --master --default
radosgw-admin period update --rgw-realm=my_realm --commitceph orch apply rgw my_realm my_zone --placement="host-192-168-141-20"
ceph osd pool create my_nfs_pool 64ceph orch apply nfs my_nfs my_nfs_pool nfs-nsceph osd pool application enable my_nfs_pool rbd这里我们使用了rbd(块设备),pool 只能对一种类型进行 enable,另外两种类型是cephfs(文件系统),rgw(对象存储)ceph osd pool application enable my_nfs_pool cephfs
ceph dashboard ac-user-create kk lmk@19980312! administratorceph mgr services
访问
192.168.141.20:8443/#/dashboard
usercount : kk
pwd : lmk@19980312!
Ceph Dashboard is now available at:URL: :8443/User: adminPassword: 03d5auyq0nYou can access the Ceph CLI with:sudo /usr/sbin/cephadm shell --fsid 2aa7de1c-497a-11eb-b926-fa163e717f07 -c /etc/f -k /etc/ceph/ceph.client.admin.keyringPlease consider enabling telemetry to help improve Ceph:ceph telemetry onFor more information see: complete.
cephadm rm-cluster --fsid 1064116e-4976-11eb-b4ae-fa163e717f07 --force
本文发布于:2024-02-01 00:24:29,感谢您对本站的认可!
本文链接:https://www.4u4v.net/it/170671826932474.html
版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。
留言与评论(共有 0 条评论) |