Difference between revisions of "Single Host Ceph Server"
(→/etc/fstab) |
|||
(26 intermediate revisions by 2 users not shown) | |||
Line 1: | Line 1: | ||
+ | Clean Centos 8 | ||
+ | |||
+ | =Basic Stuff og cephadm= | ||
yum install -y python3 podman chrony lvm2 wget | yum install -y python3 podman chrony lvm2 wget | ||
wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm | wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm | ||
chmod +x /root/cephadm | chmod +x /root/cephadm | ||
+ | |||
+ | mkdir -p /etc/ceph | ||
./cephadm add-repo --release octopus | ./cephadm add-repo --release octopus | ||
./cephadm install | ./cephadm install | ||
− | + | ||
− | + | =Boostrap monitor på egen ip= | |
− | |||
cephadm bootstrap --mon-ip 192.168.2.206 | cephadm bootstrap --mon-ip 192.168.2.206 | ||
− | Installer | + | =Installer ceph= |
cephadm add-repo --release octopus | cephadm add-repo --release octopus | ||
cephadm install ceph-common | cephadm install ceph-common | ||
cephadm install ceph | cephadm install ceph | ||
− | Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup) | + | =Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)= |
ceph orch apply osd --all-available-devices | ceph orch apply osd --all-available-devices | ||
ceph status | ceph status | ||
− | Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts) | + | =Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)= |
ceph osd crush rule create-replicated repl1 default osd | ceph osd crush rule create-replicated repl1 default osd | ||
ceph osd pool ls | ceph osd pool ls | ||
Line 27: | Line 31: | ||
− | EC stuff, her med 4+1 | + | =Block Device= |
+ | ==EC stuff, her med 4+1== | ||
ceph osd pool create rbdmeta replicated repl1 | ceph osd pool create rbdmeta replicated repl1 | ||
ceph osd erasure-code-profile get default | ceph osd erasure-code-profile get default | ||
Line 33: | Line 38: | ||
ceph osd pool create rbddata erasure ec41 | ceph osd pool create rbddata erasure ec41 | ||
− | + | ==Hint at denne pool skal bruges til block storage== | |
− | Hint at denne pool skal bruges til block storage | ||
ceph osd pool application enable rbddata rbd | ceph osd pool application enable rbddata rbd | ||
ceph osd pool application enable rbdmeta rbd | ceph osd pool application enable rbdmeta rbd | ||
− | Tillad EC blok overwrites | + | ==Tillad EC blok overwrites== |
ceph osd pool set rbddata allow_ec_overwrites true | ceph osd pool set rbddata allow_ec_overwrites true | ||
Line 44: | Line 48: | ||
rbd ls rbdmeta | rbd ls rbdmeta | ||
− | Mapper et rbd image ind som blockdevice | + | ==Mapper et rbd image ind som blockdevice== |
rbd map rbdmeta/ectestimage1 | rbd map rbdmeta/ectestimage1 | ||
− | + | ==Indskriv i '''/etc/ceph/rbdmap'''== | |
rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring | rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring | ||
Line 53: | Line 57: | ||
− | + | ==Mount filsystem== | |
− | Mount | ||
mkfs.xfs /dev/rbd0 | mkfs.xfs /dev/rbd0 | ||
mkdir /storage | mkdir /storage | ||
Line 60: | Line 63: | ||
df -h /storage/ | df -h /storage/ | ||
− | + | =='''/etc/fstab'''== | |
− | /dev/rbd0 | + | /dev/rbd0 /storage/ xfs defaults,discard,_netdev 0 0 |
+ | |||
+ | =CephFS - Filesystem= | ||
+ | # Filsystemet kalder vi myfs | ||
+ | |||
+ | #setup metadata server | ||
+ | ceph orch apply mds myfs | ||
+ | |||
+ | # opret volume | ||
+ | ceph fs volume create myfs | ||
+ | |||
+ | # metadata OG data pool til rod fs skal være replicated, men vi sætter crushrule for at tillade alle på samme host | ||
+ | ceph osd pool set cephfs.myfs.meta crush_rule repl1 | ||
+ | ceph osd pool set cephfs.myfs.data crush_rule repl1 | ||
+ | |||
+ | # set intended use på pools | ||
+ | ceph osd pool application enable cephfs.myfs.data cephfs | ||
+ | ceph osd pool application enable cephfs.myfs.meta cephfs | ||
+ | |||
+ | #brug admin keyring | ||
+ | mount -o name=admin -t ceph 192.168.2.199:/ /mnt/cephfs/ | ||
+ | |||
+ | ceph osd pool create cephfs-ec erasure ec41 | ||
+ | ceph osd pool application enable cephfs-ec cephfs | ||
+ | ceph osd pool set cephfs-ec allow_ec_overwrites true | ||
+ | |||
+ | |||
+ | ceph fs add_data_pool myfs cephfs-ec | ||
+ | #create subvolume, utilizing cephfs-ec as backing pool | ||
+ | ceph fs subvolume create myfs subfs --pool_layout cephfs-ec | ||
+ | |||
+ | #mount subvolume, default gets an annoyingly long path | ||
+ | mount -t ceph -o name=admin 192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/ /mnt/cephfs/ | ||
− | # | + | #fstab |
− | / | + | 192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/ /mnt/cephfs ceph name=admin,_netdev 0 0 |
− | # | + | = Delayed mount = |
− | + | #cat mnt-cephfs.timer | |
− | + | [Unit] | |
− | + | Description=delayed mount of cephfs | |
− | + | ||
+ | |||
+ | [Timer] | ||
+ | OnBootSec=1min | ||
+ | Unit=mnt-cephfs.service | ||
+ | |||
+ | |||
+ | |||
+ | [Install] | ||
+ | WantedBy=timers.target | ||
+ | # cat mnt-cephfs.service | ||
+ | [Unit] | ||
+ | Description=Mount Cephs | ||
+ | |||
+ | |||
+ | [Service] | ||
+ | Type=oneshot | ||
+ | ExecStart=mount /mnt/cephfs | ||
+ | User=root | ||
+ | Group=root | ||
+ | |||
+ | #systemctl enable mnt-cephfs.timer | ||
+ | =Hvad mangler vi ?= | ||
+ | Clean shutdown / reboot ? | ||
− | + | ceph logs ? | |
− | |||
− | |||
+ | Scrubbing ? | ||
− | + | Overvågning / prometheus ? | |
− | |||
− | |||
− | + | Defekt disk, ny disk. | |
− | |||
− | |||
− | |||
+ | Rest API | ||
− | == | + | =Sources n crap= |
− | + | https://docs.ceph.com/en/latest/cephadm/install/ | |
− | |||
− | |||
− | + | https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e | |
− | + | https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/ | |
− | |||
− | + | ==Zap disk for re-use== | |
+ | ceph-volume lvm zap /dev/sdX | ||
+ | eller | ||
+ | dd if=/dev/zero of=/dev/vdc bs=1M count=10 |
Latest revision as of 18:55, 24 November 2020
Clean Centos 8
Contents
- 1 Basic Stuff og cephadm
- 2 Boostrap monitor på egen ip
- 3 Installer ceph
- 4 Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)
- 5 Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)
- 6 Block Device
- 7 CephFS - Filesystem
- 8 Delayed mount
- 9 Hvad mangler vi ?
- 10 Sources n crap
Basic Stuff og cephadm
yum install -y python3 podman chrony lvm2 wget wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm chmod +x /root/cephadm
mkdir -p /etc/ceph
./cephadm add-repo --release octopus ./cephadm install
Boostrap monitor på egen ip
cephadm bootstrap --mon-ip 192.168.2.206
Installer ceph
cephadm add-repo --release octopus cephadm install ceph-common cephadm install ceph
Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)
ceph orch apply osd --all-available-devices ceph status
Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)
ceph osd crush rule create-replicated repl1 default osd ceph osd pool ls ceph osd pool set device_health_metrics crush_rule repl1
Block Device
EC stuff, her med 4+1
ceph osd pool create rbdmeta replicated repl1 ceph osd erasure-code-profile get default ceph osd erasure-code-profile set ec41 k=4 m=1 crush-failure-domain=osd ceph osd pool create rbddata erasure ec41
Hint at denne pool skal bruges til block storage
ceph osd pool application enable rbddata rbd ceph osd pool application enable rbdmeta rbd
Tillad EC blok overwrites
ceph osd pool set rbddata allow_ec_overwrites true
rbd create --size 40G --data-pool rbddata rbdmeta/ectestimage1 rbd ls rbdmeta
Mapper et rbd image ind som blockdevice
rbd map rbdmeta/ectestimage1
Indskriv i /etc/ceph/rbdmap
rbdmeta/ectestimage1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
systemctl enable rbdmap.service
Mount filsystem
mkfs.xfs /dev/rbd0 mkdir /storage mount -t xfs /dev/rbd0 /storage/ df -h /storage/
/etc/fstab
/dev/rbd0 /storage/ xfs defaults,discard,_netdev 0 0
CephFS - Filesystem
# Filsystemet kalder vi myfs #setup metadata server ceph orch apply mds myfs # opret volume ceph fs volume create myfs # metadata OG data pool til rod fs skal være replicated, men vi sætter crushrule for at tillade alle på samme host ceph osd pool set cephfs.myfs.meta crush_rule repl1 ceph osd pool set cephfs.myfs.data crush_rule repl1 # set intended use på pools ceph osd pool application enable cephfs.myfs.data cephfs ceph osd pool application enable cephfs.myfs.meta cephfs #brug admin keyring mount -o name=admin -t ceph 192.168.2.199:/ /mnt/cephfs/ ceph osd pool create cephfs-ec erasure ec41 ceph osd pool application enable cephfs-ec cephfs ceph osd pool set cephfs-ec allow_ec_overwrites true
ceph fs add_data_pool myfs cephfs-ec
#create subvolume, utilizing cephfs-ec as backing pool ceph fs subvolume create myfs subfs --pool_layout cephfs-ec #mount subvolume, default gets an annoyingly long path mount -t ceph -o name=admin 192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/ /mnt/cephfs/
#fstab 192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/ /mnt/cephfs ceph name=admin,_netdev 0 0
Delayed mount
#cat mnt-cephfs.timer [Unit] Description=delayed mount of cephfs [Timer] OnBootSec=1min Unit=mnt-cephfs.service [Install] WantedBy=timers.target
# cat mnt-cephfs.service [Unit] Description=Mount Cephs [Service] Type=oneshot ExecStart=mount /mnt/cephfs User=root Group=root #systemctl enable mnt-cephfs.timer
Hvad mangler vi ?
Clean shutdown / reboot ?
ceph logs ?
Scrubbing ?
Overvågning / prometheus ?
Defekt disk, ny disk.
Rest API
Sources n crap
https://docs.ceph.com/en/latest/cephadm/install/
https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e
https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/
Zap disk for re-use
ceph-volume lvm zap /dev/sdX
eller
dd if=/dev/zero of=/dev/vdc bs=1M count=10