Difference between revisions of "Single Host Ceph Server"
Line 1: | Line 1: | ||
+ | Clean Centos 8 | ||
+ | |||
+ | =Basic Stuff og cephadm= | ||
yum install -y python3 podman chrony lvm2 wget | yum install -y python3 podman chrony lvm2 wget | ||
wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm | wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm | ||
chmod +x /root/cephadm | chmod +x /root/cephadm | ||
+ | |||
+ | mkdir -p /etc/ceph | ||
./cephadm add-repo --release octopus | ./cephadm add-repo --release octopus | ||
./cephadm install | ./cephadm install | ||
− | + | ||
− | + | =Boostrap monitor på egen ip= | |
− | |||
cephadm bootstrap --mon-ip 192.168.2.206 | cephadm bootstrap --mon-ip 192.168.2.206 | ||
− | Installer | + | =Installer ceph= |
cephadm add-repo --release octopus | cephadm add-repo --release octopus | ||
cephadm install ceph-common | cephadm install ceph-common | ||
cephadm install ceph | cephadm install ceph | ||
− | Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup) | + | =Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)= |
ceph orch apply osd --all-available-devices | ceph orch apply osd --all-available-devices | ||
ceph status | ceph status | ||
− | Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts) | + | =Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)= |
ceph osd crush rule create-replicated repl1 default osd | ceph osd crush rule create-replicated repl1 default osd | ||
ceph osd pool ls | ceph osd pool ls | ||
Line 27: | Line 31: | ||
− | EC stuff, her med 4+1 | + | =EC stuff, her med 4+1= |
ceph osd pool create rbdmeta replicated repl1 | ceph osd pool create rbdmeta replicated repl1 | ||
ceph osd erasure-code-profile get default | ceph osd erasure-code-profile get default | ||
Line 34: | Line 38: | ||
− | Hint at denne pool skal bruges til block storage | + | =Hint at denne pool skal bruges til block storage= |
ceph osd pool application enable rbddata rbd | ceph osd pool application enable rbddata rbd | ||
ceph osd pool application enable rbdmeta rbd | ceph osd pool application enable rbdmeta rbd | ||
− | Tillad EC blok overwrites | + | =Opret filsystem= |
+ | ==Tillad EC blok overwrites== | ||
ceph osd pool set rbddata allow_ec_overwrites true | ceph osd pool set rbddata allow_ec_overwrites true | ||
Line 44: | Line 49: | ||
rbd ls rbdmeta | rbd ls rbdmeta | ||
− | Mapper et rbd image ind som blockdevice | + | ==Mapper et rbd image ind som blockdevice== |
rbd map rbdmeta/ectestimage1 | rbd map rbdmeta/ectestimage1 | ||
− | + | ==Indskriv i '''/etc/ceph/rbdmap'''== | |
rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring | rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring | ||
Line 53: | Line 58: | ||
− | + | ==Mount filsystem== | |
− | Mount | ||
mkfs.xfs /dev/rbd0 | mkfs.xfs /dev/rbd0 | ||
mkdir /storage | mkdir /storage | ||
Line 60: | Line 64: | ||
df -h /storage/ | df -h /storage/ | ||
− | + | =='''/etc/fstab'''== | |
− | |||
− | |||
− | |||
− | |||
/dev/rbd0 /storage/ xfs defaults,_netdev 0 0 | /dev/rbd0 /storage/ xfs defaults,_netdev 0 0 | ||
− | + | =Hvad mangler vi ?= | |
Clean shutdown / reboot ? | Clean shutdown / reboot ? | ||
ceph logs ? | ceph logs ? | ||
Line 75: | Line 75: | ||
− | + | =Sources= | |
− | + | https://docs.ceph.com/en/latest/cephadm/install/ | |
− | + | https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e | |
− | + | https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/ | |
− | |||
− | |||
Revision as of 19:29, 27 October 2020
Clean Centos 8
Contents
- 1 Basic Stuff og cephadm
- 2 Boostrap monitor på egen ip
- 3 Installer ceph
- 4 Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)
- 5 Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)
- 6 EC stuff, her med 4+1
- 7 Hint at denne pool skal bruges til block storage
- 8 Opret filsystem
- 9 Hvad mangler vi ?
- 10 Sources
Basic Stuff og cephadm
yum install -y python3 podman chrony lvm2 wget wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm chmod +x /root/cephadm
mkdir -p /etc/ceph
./cephadm add-repo --release octopus ./cephadm install
Boostrap monitor på egen ip
cephadm bootstrap --mon-ip 192.168.2.206
Installer ceph
cephadm add-repo --release octopus cephadm install ceph-common cephadm install ceph
Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)
ceph orch apply osd --all-available-devices ceph status
Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)
ceph osd crush rule create-replicated repl1 default osd ceph osd pool ls ceph osd pool set device_health_metrics crush_rule repl1
EC stuff, her med 4+1
ceph osd pool create rbdmeta replicated repl1 ceph osd erasure-code-profile get default ceph osd erasure-code-profile set ec41 k=4 m=1 crush-failure-domain=osd ceph osd pool create rbddata erasure ec41
Hint at denne pool skal bruges til block storage
ceph osd pool application enable rbddata rbd ceph osd pool application enable rbdmeta rbd
Opret filsystem
Tillad EC blok overwrites
ceph osd pool set rbddata allow_ec_overwrites true
rbd create --size 40G --data-pool rbddata rbdmeta/ectestimage1 rbd ls rbdmeta
Mapper et rbd image ind som blockdevice
rbd map rbdmeta/ectestimage1
Indskriv i /etc/ceph/rbdmap
rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
systemctl enable rbdmap.service
Mount filsystem
mkfs.xfs /dev/rbd0 mkdir /storage mount -t xfs /dev/rbd0 /storage/ df -h /storage/
/etc/fstab
/dev/rbd0 /storage/ xfs defaults,_netdev 0 0
Hvad mangler vi ?
Clean shutdown / reboot ? ceph logs ? Scrubbing ? Overvågning / prometheus ?
Sources
https://docs.ceph.com/en/latest/cephadm/install/ https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/
--- Diverse, rod
== Diverse
ceph dashboard ac-user-create <username> <password> administrator
// Zap disk for re-use ceph-volume lvm zap /dev/sdX eller dd if=/dev/zero of=/dev/vdc bs=1M count=10
== Cloud Ceph
URL: https://217.61.236.225:8443/
User: admin
Password: 6x52bzvkad =< Admin9000
INFO:cephadm:You can access the Ceph CLI with:
sudo /usr/sbin/cephadm shell --fsid 1e57f65a-0d17-11eb-8201-00163ef9d22a -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
INFO:cephadm:Please consider enabling telemetry to help improve Ceph:
ceph telemetry on