Difference between revisions of "Single Host Ceph Server"
(Created page with " yum install -y python3 podman chrony lvm2 wget wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm chmod +x /root/cephadm ## https://docs.c...") |
|||
Line 2: | Line 2: | ||
wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm | wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm | ||
chmod +x /root/cephadm | chmod +x /root/cephadm | ||
− | |||
− | |||
− | |||
− | |||
./cephadm add-repo --release octopus | ./cephadm add-repo --release octopus | ||
Line 11: | Line 7: | ||
mkdir -p /etc/ceph | mkdir -p /etc/ceph | ||
− | + | ||
+ | Bootstrap monitor med egen IP | ||
cephadm bootstrap --mon-ip 192.168.2.206 | cephadm bootstrap --mon-ip 192.168.2.206 | ||
− | + | Installer basic ceph tools | |
cephadm add-repo --release octopus | cephadm add-repo --release octopus | ||
cephadm install ceph-common | cephadm install ceph-common | ||
cephadm install ceph | cephadm install ceph | ||
− | + | Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup) | |
ceph orch apply osd --all-available-devices | ceph orch apply osd --all-available-devices | ||
ceph status | ceph status | ||
− | Lav ny regel der bruger failure domain på OSD | + | Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts) |
ceph osd crush rule create-replicated repl1 default osd | ceph osd crush rule create-replicated repl1 default osd | ||
ceph osd pool ls | ceph osd pool ls | ||
Line 37: | Line 34: | ||
− | + | Hint at denne pool skal bruges til block storage | |
− | |||
ceph osd pool application enable rbddata rbd | ceph osd pool application enable rbddata rbd | ||
ceph osd pool application enable rbdmeta rbd | ceph osd pool application enable rbdmeta rbd | ||
− | + | Tillad EC blok overwrites | |
ceph osd pool set rbddata allow_ec_overwrites true | ceph osd pool set rbddata allow_ec_overwrites true | ||
Line 48: | Line 44: | ||
rbd ls rbdmeta | rbd ls rbdmeta | ||
− | + | Mapper et rbd image ind som blockdevice | |
rbd map rbdmeta/ectestimage1 | rbd map rbdmeta/ectestimage1 | ||
− | + | Husk at rette '''/etc/ceph/rbdmap''' f.eks. | |
rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring | rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring | ||
− | |||
systemctl enable rbdmap.service | systemctl enable rbdmap.service | ||
Line 83: | Line 78: | ||
+ | ## https://docs.ceph.com/en/latest/cephadm/install/ | ||
+ | ## https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e | ||
+ | ## https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/ | ||
Revision as of 19:23, 27 October 2020
yum install -y python3 podman chrony lvm2 wget wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm chmod +x /root/cephadm
./cephadm add-repo --release octopus ./cephadm install
mkdir -p /etc/ceph
Bootstrap monitor med egen IP
cephadm bootstrap --mon-ip 192.168.2.206
Installer basic ceph tools
cephadm add-repo --release octopus cephadm install ceph-common cephadm install ceph
Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)
ceph orch apply osd --all-available-devices ceph status
Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)
ceph osd crush rule create-replicated repl1 default osd ceph osd pool ls ceph osd pool set device_health_metrics crush_rule repl1
EC stuff, her med 4+1
ceph osd pool create rbdmeta replicated repl1 ceph osd erasure-code-profile get default ceph osd erasure-code-profile set ec41 k=4 m=1 crush-failure-domain=osd ceph osd pool create rbddata erasure ec41
Hint at denne pool skal bruges til block storage
ceph osd pool application enable rbddata rbd ceph osd pool application enable rbdmeta rbd
Tillad EC blok overwrites
ceph osd pool set rbddata allow_ec_overwrites true
rbd create --size 40G --data-pool rbddata rbdmeta/ectestimage1 rbd ls rbdmeta
Mapper et rbd image ind som blockdevice
rbd map rbdmeta/ectestimage1
Husk at rette /etc/ceph/rbdmap f.eks.
rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
systemctl enable rbdmap.service
Mount and shit
mkfs.xfs /dev/rbd0 mkdir /storage mount -t xfs /dev/rbd0 /storage/ df -h /storage/
Filesystem Size Used Avail Use% Mounted on /dev/rbd0 40G 319M 40G 1% /storage
- /etc/fstab
/dev/rbd0 /storage/ xfs defaults,_netdev 0 0
- HERFRA
Clean shutdown / reboot ? ceph logs ? Scrubbing ? Overvågning / prometheus ?
--- Diverse, rod
== Diverse
ceph dashboard ac-user-create <username> <password> administrator
// Zap disk for re-use ceph-volume lvm zap /dev/sdX eller dd if=/dev/zero of=/dev/vdc bs=1M count=10
== Cloud Ceph
URL: https://217.61.236.225:8443/
User: admin
Password: 6x52bzvkad =< Admin9000
INFO:cephadm:You can access the Ceph CLI with:
sudo /usr/sbin/cephadm shell --fsid 1e57f65a-0d17-11eb-8201-00163ef9d22a -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
INFO:cephadm:Please consider enabling telemetry to help improve Ceph:
ceph telemetry on