[7] Dodanie, usunięcie OSD

12 lipca 2020 Wyłączono przez Adam [zicherka] Nogły

Sieć wyglądać/wyglądała następująco. Węzeł [node4] zostanie najpierw dodany, a następnie usunięty.

[1] Dodamy teraz węzeł [node4] to OSD z [Admin Node]. Jako urządzenie blokowe użyjemy [/dev/sdb].

# przetransferuj klucz publiczny
[root@node1 ~]# ssh-copy-id node4
/usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
The authenticity of host 'node4 (192.168.1.7)' can't be established.
ECDSA key fingerprint is SHA256:XziJ4W8VLjGjmPTnreQGjKZWTinoFLisikci9DeEQJ8.
Are you sure you want to continue connecting (yes/no/[fingerprint])? yes
/usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
/usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys
root@node4's password: # wpisz hasło root'a z node4

Number of key(s) added: 1

Now try logging into the machine, with: "ssh 'node4'"
and check to make sure that only the key(s) you wanted were added.

# jeżeli Firewalld pracuje zezwól na ruch następującym usługom
[root@node1 ~]# ssh node4 "firewall-cmd --add-service=ceph --permanent; firewall-cmd --reload"
success
success

# zainstaluj potrzebne pakiety
[root@node1 ~]# ssh node4 "dnf -y install centos-release-ceph-nautilus; dnf -y install ceph"

# przetransferuj potrzebne pliki konfiguracyjne
[root@node1 ~]# scp /etc/ceph/ceph.conf node4:/etc/ceph/ceph.conf
ceph.conf 100% 445 22.1KB/s 00:00
[root@node1 ~]# scp /etc/ceph/ceph.client.admin.keyring node4:/etc/ceph
ceph.client.admin.keyring 100% 151 7.6KB/s 00:00
[root@node1 ~]# scp /var/lib/ceph/bootstrap-osd/ceph.keyring node4:/var/lib/ceph/bootstrap-osd
ceph.keyring 100% 129 124.8KB/s 00:00

# skonfiguruj OSD
[root@node1 ~]# ssh node4 "chown ceph. /etc/ceph/ceph.* /var/lib/ceph/bootstrap-osd/*; parted --script /dev/sdb 'mklabel gpt'; parted --script /dev/sdb "mkpart primary 0% 100%"; ceph-volume lvm create --data /dev/sdb1"
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 18a2d25d-be2d-4b0d-a014-f9e800c52ca2
Running command: /usr/sbin/vgcreate --force --yes ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1 /dev/sdb1
stdout: Physical volume "/dev/sdb1" successfully created.
stdout: Volume group "ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1" successfully created
Running command: /usr/sbin/lvcreate --yes -l 100%FREE -n osd-block-18a2d25d-be2d-4b0d-a014-f9e800c52ca2 ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1
stdout: Logical volume "osd-block-18a2d25d-be2d-4b0d-a014-f9e800c52ca2" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3
Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-3
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1/osd-block-18a2d25d-be2d-4b0d-a014-f9e800c52ca2
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
Running command: /usr/bin/ln -s /dev/ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1/osd-block-18a2d25d-be2d-4b0d-a014-f9e800c52ca2 /var/lib/ceph/osd/ceph-3/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap
stderr: got monmap epoch 2
Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQAz1wlfGUqtBBAAPvFDy/g1/6DRgL3PJ32zEA==
stdout: creating /var/lib/ceph/osd/ceph-3/keyring
added entity osd.3 auth(key=AQAz1wlfGUqtBBAAPvFDy/g1/6DRgL3PJ32zEA==)
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid 18a2d25d-be2d-4b0d-a014-f9e800c52ca2 --setuser ceph --setgroup ceph
--> ceph-volume lvm prepare successful for: /dev/sdb1
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
Running command: /usr/bin/ceph-bluestore-tool --cluster=ceph prime-osd-dir --dev /dev/ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1/osd-block-18a2d25d-be2d-4b0d-a014-f9e800c52ca2 --path /var/lib/ceph/osd/ceph-3 --no-mon-config
Running command: /usr/bin/ln -snf /dev/ceph-bcd77728-b867-4131-a93e-a7fdbf3e2fc1/osd-block-18a2d25d-be2d-4b0d-a014-f9e800c52ca2 /var/lib/ceph/osd/ceph-3/block
Running command: /usr/bin/chown -h ceph:ceph /var/lib/ceph/osd/ceph-3/block
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-3
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3
Running command: /usr/bin/systemctl enable ceph-volume@lvm-3-18a2d25d-be2d-4b0d-a014-f9e800c52ca2
stderr: Created symlink /etc/systemd/system/multi-user.target.wants/ceph-volume@lvm-3-18a2d25d-be2d-4b0d-a014-f9e800c52ca2.service → /usr/lib/systemd/system/ceph-volume@.service.
Running command: /usr/bin/systemctl enable --runtime ceph-osd@3
stderr: Created symlink /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service → /usr/lib/systemd/system/ceph-osd@.service.
Running command: /usr/bin/systemctl start ceph-osd@3
--> ceph-volume lvm activate successful for osd ID: 3
--> ceph-volume lvm create successful for: /dev/sdb1

[root@node1 ~]# ceph -s
cluster:
id: 73eb874b-02b0-4ed7-9e1f-b59a9200b528
health: HEALTH_OK

services:
mon: 1 daemons, quorum node1 (age 22h)
mgr: node1(active, since 22h)
mds: cephfs:1 {0=node1=up:active}
osd: 4 osds: 4 up (since 19h), 4 in (since 19h)

data:
pools: 5 pools, 224 pgs
objects: 58 objects, 14 MiB
usage: 4.3 GiB used, 196 GiB / 200 GiB avail
pgs: 223 active+clean
1 active+clean+inconsistent

[2] Aby usunąć węzeł OSD z istniejącego klastra wykonaj następujące polecenia. Dla przykładu usuniemy węzeł [node4].

[root@node1 ~]# ceph -s
cluster:
id: 73eb874b-02b0-4ed7-9e1f-b59a9200b528
health: HEALTH_OK

services:
mon: 1 daemons, quorum node1 (age 22h)
mgr: node1(active, since 22h)
mds: cephfs:1 {0=node1=up:active}
osd: 4 osds: 4 up (since 19h), 4 in (since 19h)

data:
pools: 5 pools, 224 pgs
objects: 58 objects, 14 MiB
usage: 4.3 GiB used, 196 GiB / 200 GiB avail
pgs: 223 active+clean
1 active+clean+inconsistent


[root@node1 ~]# ceph osd tree
ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
-1 0.19519 root default
-3 0.04880 host node1
0 hdd 0.04880 osd.0 up 1.00000 1.00000
-5 0.04880 host node2
1 hdd 0.04880 osd.1 up 1.00000 1.00000
-7 0.04880 host node3
2 hdd 0.04880 osd.2 up 1.00000 1.00000
-9 0.04880 host node4
3 hdd 0.04880 osd.3 up 1.00000 1.00000

# wskaż ID OSD węzła, który chcesz usunąć
[root@node1 ~]# ceph osd out 3
marked out osd.3.

# spójrzmy na żywo na status klastra; warto tą komendę wydać w osobnym terminalu, przed zaznaczeniem węzła do usunięcia
# po uruchomieniu polecenia [ceph osd out], przebalansowanie uruchomi się automatycznie
# aby wyjść z podglądu na żywo kliknij [Ctrl + c]
[root@node1 ~]# ceph -w

# po zmianie statusu na [health: HEALTH_OK], możemy wyłączyć usługę OSD na docelowym hoście
[root@node1 ~]# ssh node4 "systemctl disable --now ceph-osd@3.service"
Removed /run/systemd/system/ceph-osd.target.wants/ceph-osd@3.service.

# usuwamy węzeł
[root@node1 ~]# ceph osd purge 3 --yes-i-really-mean-it
purged osd.3

[root@node1 ~]# ceph -s
cluster:
id: 73eb874b-02b0-4ed7-9e1f-b59a9200b528
health: HEALTH_OK

services:
mon: 1 daemons, quorum node1 (age 22h)
mgr: node1(active, since 22h)
mds: cephfs:1 {0=node1=up:active}
osd: 3 osds: 3 up (since 63s), 3 in (since 13m)

data:
pools: 5 pools, 224 pgs
objects: 58 objects, 15 MiB
usage: 3.2 GiB used, 147 GiB / 150 GiB avail
pgs: 224 active+clean