mimic或者luminous

rbd_rados

sudo mount -t ceph 192.168.7.151:6789:/ /mnt -o name=admin,secret=AQBaPZNcCalvLRAAt4iyva3DHfb8NbOX4MxBAw==

rbd
ceph osd pool create rbdpool
rbd pool init rbdpool
rbd create --size rbdpool/rbdimage
rbd ls rbdpool
rbd info rbdpool/rbdimage Unless specified, the rbd command will access the Ceph cluster using the ID admin.
ceph auth get-or-create client.qemu mon 'profile rbd' osd 'profile rbd pool=vms, profile rbd-read-only pool=images' Ceph Block Device images are thin provisioned [root@ceph1 ceph]# rbd info rbdpool/rbdimage
rbd image 'rbdimage':
size GiB in objects
order ( MiB objects)
id: 12926b8b4567
block_name_prefix: rbd_data.12926b8b4567
format:
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
op_features:
flags:
create_timestamp: Wed Mar :: [root@ceph1 ceph]# rbd feature disable rbdpool/rbdimage object-map fast-diff deep-flatten
[root@ceph1 ceph]# rbd device map rbdpool/rbdimage --id admin
/dev/rbd0
[root@ceph1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sr0 : 918M rom
vda : 50G disk
├─vda1 : 512M part /boot
├─vda2 : 8G part [SWAP]
└─vda3 : .5G part /
vdb : 60G disk
└─ceph--c087d78f--9bb1--49a5--97ad--437995ee0ae7-osd--block--da3283a7--adfe--43ad--8ebc--0853ee8900bb : 60G lvm
rbd0 : 1G disk

root@cu-pve04:~# rbd info kycrbd/vm-111-disk-0
rbd image 'vm-111-disk-0':
size 50GiB in 12800 objects
order 22 (4MiB objects)
block_name_prefix: rbd_data.b52c6b8b4567
format: 2
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Fri May 10 17:31:49 2019


[root@ceph1 ~]# rbd device list
id pool image snap device
rbdpool rbdimage - /dev/rbd0 root@cu-pve04:~# rbd help du
usage: rbd du [--pool <pool>] [--image <image>] [--snap <snap>]
[--format <format>] [--pretty-format] [--from-snap <from-snap>]
<image-or-snap-spec> Show disk usage stats for pool, image or snapshot Positional arguments
<image-or-snap-spec> image or snapshot specification
(example: [<pool-name>/]<image-name>[@<snap-name>]) Optional arguments
-p [ --pool ] arg pool name
--image arg image name
--snap arg snapshot name
--format arg output format (plain, json, or xml) [default: plain]
--pretty-format pretty formatting (json and xml)
--from-snap arg snapshot starting point root@cu-pve04:~# rbd du -p kycrbd
NAME PROVISIONED USED
base--disk-@__base__ 50GiB .51GiB
base--disk- 50GiB 0B
vm--disk- 100GiB .5GiB
vm--disk- 100GiB .18GiB
vm--disk- 100GiB .7GiB
vm--disk- 100GiB .7GiB
vm--disk- 50GiB .7GiB
vm--disk- 50GiB .45GiB
<TOTAL> 550GiB 160GiB root@cu-pve04:~# rbd ls -l kycrbd
NAME SIZE PARENT FMT PROT LOCK
base--disk- 50GiB
base--disk-@__base__ 50GiB yes
vm--disk- 100GiB excl
vm--disk- 100GiB
vm--disk- 100GiB excl
vm--disk- 100GiB excl
vm--disk- 50GiB excl
vm--disk- 50GiB excl root@cu-pve04:~# rbd status -p kycrbd vm--disk-
Watchers:
watcher=192.168.7.204:/ client. cookie= root@cu-pve04:~# rbd info -p kycrbd vm--disk-
rbd image 'vm-100-disk-0':
size 100GiB in objects
order (4MiB objects)
block_name_prefix: rbd_data.422076b8b4567
format:
features: layering, exclusive-lock, object-map, fast-diff, deep-flatten
flags:
create_timestamp: Sat May :: root@cu-pve04:~# rbd ls kycrbd --format json --pretty-format
[
"base-101-disk-0",
"vm-100-disk-0",
"vm-102-disk-0",
"vm-103-disk-0",
"vm-104-disk-0",
"vm-105-disk-0",
"vm-106-disk-0"
]
root@cu-pve04:~# rbd ls kycrbd --format xml --pretty-format
<images>
<name>base--disk-</name>
<name>vm--disk-</name>
<name>vm--disk-</name>
<name>vm--disk-</name>
<name>vm--disk-</name>
<name>vm--disk-</name>
<name>vm--disk-</name>
</images> root@cu-pve04:~# rbd ls kycrbd --format xml
<images><name>base--disk-</name><name>vm--disk-</name><name>vm--disk-</name><name>vm--disk-</name><name>vm--disk-</name><name>vm--disk-</name><name>vm--disk-</name></images>
root@cu-pve04:~# rbd ls kycrbd --format json
["base-101-disk-0","vm-100-disk-0","vm-102-disk-0","vm-103-disk-0","vm-104-disk-0","vm-105-disk-0","vm-106-disk-0"]

rados

rados -h
radosgw-admin -h

[root@ceph1 ~]# rados lspools
mypool
[root@ceph1 ~]# rados df
POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR
mypool B B B total_objects
total_used 3.0 GiB
total_avail GiB
total_space GiB [root@ali- ~]# rados df
POOL_NAME USED OBJECTS CLONES COPIES MISSING_ON_PRIMARY UNFOUND DEGRADED RD_OPS RD WR_OPS WR
pool-d83c6154956b44aea7639c7bd4c45c65 2001M 11819M 6488M total_objects
total_used 44969M
total_avail 53608G
total_space 53652G [root@ali- ~]# rados -p pool-d83c6154956b44aea7639c7bd4c45c65 ls
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014
xbd_data.618177.0000000000000014 [root@ceph1 ~]# ceph osd lspools
mypool
.rgw.root
default.rgw.control
default.rgw.meta
default.rgw.log
cfs_data
cfs_meta
rbdpool 列出池中对象
[root@ceph1 ~]# rados -p cfs_data ls
10000000005.00000000
10000000006.00000000
[root@ceph1 ~]# rados -p cfs_meta ls
601.00000000
602.00000000
600.00000000
603.00000000
1.00000000.inode
200.00000000
200.00000001
606.00000000
607.00000000
mds0_openfiles.
608.00000000
500.00000001
604.00000000
500.00000000
mds_snaptable
605.00000000
mds0_inotable
100.00000000
mds0_sessionmap
609.00000000
400.00000000
100.00000000.inode
1.00000000
[root@ceph1 ~]# rados -p rbdpool ls
rbd_directory
rbd_id.rbdimage
rbd_info
rbd_header.12926b8b4567

05-13 02:48