Difference between revisions of "Single Host Ceph Server"

From HoerupWiki
Jump to: navigation, search
(Created page with " yum install -y python3 podman chrony lvm2 wget wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm chmod +x /root/cephadm ## https://docs.c...")
 
(/etc/fstab)
 
(27 intermediate revisions by 2 users not shown)
Line 1: Line 1:
 +
Clean Centos 8
 +
 +
=Basic Stuff og cephadm=
 
  yum install -y python3 podman chrony lvm2 wget  
 
  yum install -y python3 podman chrony lvm2 wget  
 
  wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
 
  wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
 
  chmod +x /root/cephadm
 
  chmod +x /root/cephadm
  
## https://docs.ceph.com/en/latest/cephadm/install/
+
mkdir -p /etc/ceph
## https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e
 
## https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/
 
  
 
  ./cephadm add-repo --release octopus
 
  ./cephadm add-repo --release octopus
 
  ./cephadm install
 
  ./cephadm install
  
mkdir -p /etc/ceph
+
 
# Egen IP
+
=Boostrap monitor på egen ip=
 
  cephadm bootstrap --mon-ip 192.168.2.206   
 
  cephadm bootstrap --mon-ip 192.168.2.206   
  
 
+
=Installer ceph=
 
  cephadm add-repo --release octopus
 
  cephadm add-repo --release octopus
 
  cephadm install ceph-common
 
  cephadm install ceph-common
 
  cephadm install ceph  
 
  cephadm install ceph  
  
# Få lige specifik kommando fra Hoerup
+
=Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)=
 
  ceph orch apply osd --all-available-devices
 
  ceph orch apply osd --all-available-devices
 
  ceph status
 
  ceph status
  
  
Lav ny regel der bruger failure domain på OSD
+
=Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)=
 
  ceph osd crush rule create-replicated repl1 default osd
 
  ceph osd crush rule create-replicated repl1 default osd
 
  ceph osd pool ls
 
  ceph osd pool ls
Line 30: Line 31:
  
  
EC stuff, her med 4+1
+
=Block Device=
 +
==EC stuff, her med 4+1==
 
  ceph osd pool create rbdmeta replicated repl1
 
  ceph osd pool create rbdmeta replicated repl1
 
  ceph osd erasure-code-profile get default
 
  ceph osd erasure-code-profile get default
Line 36: Line 38:
 
  ceph osd pool create rbddata erasure ec41
 
  ceph osd pool create rbddata erasure ec41
  
 
+
==Hint at denne pool skal bruges til block storage==
 
 
# hint at denne pool skal bruges til block storage
 
 
  ceph osd pool application enable rbddata rbd
 
  ceph osd pool application enable rbddata rbd
 
  ceph osd pool application enable rbdmeta rbd
 
  ceph osd pool application enable rbdmeta rbd
  
#tillad EC blok overwrites  
+
==Tillad EC blok overwrites==
 
  ceph osd pool set rbddata allow_ec_overwrites true
 
  ceph osd pool set rbddata allow_ec_overwrites true
  
Line 48: Line 48:
 
  rbd ls rbdmeta
 
  rbd ls rbdmeta
  
#mapper et rbd image ind som blockdevice
+
==Mapper et rbd image ind som blockdevice==
 
  rbd map rbdmeta/ectestimage1
 
  rbd map rbdmeta/ectestimage1
  
#husk at rette /etc/ceph/rbdmap f.eks.
+
==Indskriv i '''/etc/ceph/rbdmap'''==
 
  rbdmeta/ectestimage1    id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
 
  rbdmeta/ectestimage1    id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
 
  
 
  systemctl enable rbdmap.service
 
  systemctl enable rbdmap.service
  
  
 
+
==Mount filsystem==
Mount and shit
 
 
  mkfs.xfs /dev/rbd0  
 
  mkfs.xfs /dev/rbd0  
 
  mkdir /storage
 
  mkdir /storage
Line 65: Line 63:
 
  df -h /storage/
 
  df -h /storage/
  
Filesystem      Size Used Avail Use% Mounted on
+
=='''/etc/fstab'''==
/dev/rbd0       40G  319M  40G  1% /storage
+
  /dev/rbd0       /storage/      xfs    defaults,discard,_netdev        0      0
  
 +
=CephFS - Filesystem=
 +
# Filsystemet kalder vi myfs
 +
 +
#setup metadata server
 +
ceph orch apply mds myfs
 +
 +
# opret volume
 +
ceph fs volume create myfs
 +
 +
# metadata OG data pool til rod fs skal være replicated, men vi sætter crushrule for at tillade alle på samme host
 +
ceph osd pool set cephfs.myfs.meta crush_rule repl1
 +
ceph osd pool set cephfs.myfs.data crush_rule repl1
 +
 +
# set intended use på pools
 +
ceph osd pool  application enable  cephfs.myfs.data cephfs
 +
ceph osd pool  application enable  cephfs.myfs.meta cephfs
 +
 +
#brug admin keyring
 +
mount -o name=admin -t ceph 192.168.2.199:/ /mnt/cephfs/
 +
 +
ceph osd pool create cephfs-ec erasure ec41
 +
ceph osd pool  application enable  cephfs-ec cephfs
 +
ceph osd pool set cephfs-ec allow_ec_overwrites true
 +
 
  
## /etc/fstab
+
  ceph fs add_data_pool myfs cephfs-ec
  /dev/rbd0      /storage/      xfs    defaults,_netdev        0      0
 
  
 +
#create subvolume, utilizing cephfs-ec as backing pool
 +
ceph fs subvolume create myfs subfs --pool_layout cephfs-ec
 +
 +
#mount subvolume, default gets an annoyingly long path
 +
mount -t ceph -o name=admin 192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/ /mnt/cephfs/
  
## HERFRA
+
#fstab
Clean shutdown / reboot ?
+
192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/  /mnt/cephfs ceph  name=admin,_netdev 0 0
ceph logs ?
+
 
Scrubbing ?
+
 
Overvågning / prometheus ?
+
= Delayed mount =
 +
#cat mnt-cephfs.timer
 +
[Unit]
 +
Description=delayed mount of cephfs
 +
 +
 +
[Timer]
 +
OnBootSec=1min
 +
Unit=mnt-cephfs.service
 +
 +
 +
 +
[Install]
 +
WantedBy=timers.target
  
 +
# cat mnt-cephfs.service
 +
[Unit]
 +
Description=Mount Cephs
 +
 +
 +
[Service]
 +
Type=oneshot
 +
ExecStart=mount /mnt/cephfs
 +
User=root
 +
Group=root
 +
 +
#systemctl enable mnt-cephfs.timer
  
 +
=Hvad mangler vi ?=
  
  
 +
Clean shutdown / reboot ?
  
 +
ceph logs ?
  
 +
Scrubbing ?
  
--- Diverse, rod
+
Overvågning / prometheus ?
== Diverse
 
ceph dashboard ac-user-create <username> <password> administrator
 
  
// Zap disk for re-use
+
Defekt disk, ny disk.
ceph-volume lvm zap /dev/sdX
 
eller
 
dd if=/dev/zero of=/dev/vdc bs=1M count=10
 
  
 +
Rest API
  
== Cloud Ceph
+
=Sources n crap=
    URL: https://217.61.236.225:8443/
+
https://docs.ceph.com/en/latest/cephadm/install/
    User: admin
 
Password: 6x52bzvkad =< Admin9000
 
  
INFO:cephadm:You can access the Ceph CLI with:
+
https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e
  
sudo /usr/sbin/cephadm shell --fsid 1e57f65a-0d17-11eb-8201-00163ef9d22a -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring
+
https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/
  
INFO:cephadm:Please consider enabling telemetry to help improve Ceph:
 
  
ceph telemetry on
+
==Zap disk for re-use==
 +
ceph-volume lvm zap /dev/sdX
 +
eller
 +
dd if=/dev/zero of=/dev/vdc bs=1M count=10

Latest revision as of 18:55, 24 November 2020

Clean Centos 8

Basic Stuff og cephadm

yum install -y python3 podman chrony lvm2 wget 
wget -O /root/cephadm https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
chmod +x /root/cephadm
mkdir -p /etc/ceph
./cephadm add-repo --release octopus
./cephadm install


Boostrap monitor på egen ip

cephadm bootstrap --mon-ip 192.168.2.206   

Installer ceph

cephadm add-repo --release octopus
cephadm install ceph-common
cephadm install ceph 

Opret OSD'er med alle diske (få lige specifik kommando fra Hoerup)

ceph orch apply osd --all-available-devices
ceph status


Lav ny regel der bruger failure domain på OSD (istedet for 3 hosts)

ceph osd crush rule create-replicated repl1 default osd
ceph osd pool ls
ceph osd pool set device_health_metrics crush_rule repl1


Block Device

EC stuff, her med 4+1

ceph osd pool create rbdmeta replicated repl1
ceph osd erasure-code-profile get default
ceph osd erasure-code-profile set ec41 k=4 m=1 crush-failure-domain=osd
ceph osd pool create rbddata erasure ec41

Hint at denne pool skal bruges til block storage

ceph osd pool application enable rbddata rbd
ceph osd pool application enable rbdmeta rbd

Tillad EC blok overwrites

ceph osd pool set rbddata allow_ec_overwrites true
rbd create --size 40G --data-pool rbddata rbdmeta/ectestimage1
rbd ls rbdmeta

Mapper et rbd image ind som blockdevice

rbd map rbdmeta/ectestimage1

Indskriv i /etc/ceph/rbdmap

rbdmeta/ectestimage1    id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
systemctl enable rbdmap.service


Mount filsystem

mkfs.xfs /dev/rbd0 
mkdir /storage
mount -t xfs /dev/rbd0 /storage/
df -h /storage/

/etc/fstab

/dev/rbd0       /storage/       xfs     defaults,discard,_netdev        0       0

CephFS - Filesystem

# Filsystemet kalder vi myfs

#setup metadata server
ceph orch apply mds myfs

# opret volume
ceph fs volume create myfs

# metadata OG data pool til rod fs skal være replicated, men vi sætter crushrule for at tillade alle på samme host
ceph osd pool set cephfs.myfs.meta crush_rule repl1
ceph osd pool set cephfs.myfs.data crush_rule repl1

# set intended use på pools
ceph osd pool  application enable  cephfs.myfs.data cephfs
ceph osd pool  application enable  cephfs.myfs.meta cephfs

#brug admin keyring
mount -o name=admin -t ceph 192.168.2.199:/ /mnt/cephfs/

ceph osd pool create cephfs-ec erasure ec41
ceph osd pool  application enable  cephfs-ec cephfs
ceph osd pool set cephfs-ec allow_ec_overwrites true
 
ceph fs add_data_pool myfs cephfs-ec
#create subvolume, utilizing cephfs-ec as backing pool
ceph fs subvolume create myfs subfs --pool_layout cephfs-ec

#mount subvolume, default gets an annoyingly long path
mount -t ceph -o name=admin 192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/ /mnt/cephfs/
#fstab
192.168.2.199:/volumes/_nogroup/subfs/60918416-6df6-4b4a-a071-ffd527fba26c/  /mnt/cephfs ceph  name=admin,_netdev 0 0


Delayed mount

#cat mnt-cephfs.timer
[Unit]
Description=delayed mount of cephfs


[Timer]
OnBootSec=1min
Unit=mnt-cephfs.service



[Install]
WantedBy=timers.target
# cat mnt-cephfs.service
[Unit]
Description=Mount Cephs


[Service]
Type=oneshot
ExecStart=mount /mnt/cephfs
User=root
Group=root

#systemctl enable mnt-cephfs.timer

Hvad mangler vi ?

Clean shutdown / reboot ?

ceph logs ?

Scrubbing ?

Overvågning / prometheus ?

Defekt disk, ny disk.

Rest API

Sources n crap

https://docs.ceph.com/en/latest/cephadm/install/

https://medium.com/@balderscape/setting-up-a-virtual-single-node-ceph-storage-cluster-d86d6a6c658e

https://linoxide.com/linux-how-to/hwto-configure-single-node-ceph-cluster/


Zap disk for re-use

ceph-volume lvm zap /dev/sdX

eller

dd if=/dev/zero of=/dev/vdc bs=1M count=10