每天一点Linux-17LVM管理
LVM管理
- 逻辑卷是可以跨越磁盘,可以打破上述分区方式的硬性规定,可以将多个硬盘逻辑上进行整合为一块硬盘。
- 物理卷:通常一个分区或者一个硬盘就可以建立一个物理卷,物理卷的最小单位是PE,一般默认是4MB。
- 卷组:将多个物理卷组合到一起,成为一个卷组。
- 虚拟卷:其实就是在卷组的基础上再次划分,最小单位是LE,与PE一样,并且一一对应。逻辑卷跟物理卷没有本质区别,只是站在不同的层次来看罢了。
基本分区(MBR|GPT) —-> Filesystem —-> mount
逻辑卷 —-> Filesystem —-> mount
1.创建LVM
准备物理磁盘
可以是: /dev/sdb /dev/sdc1 (建立在RIAD之上)
[root@server0 ~]# ll /dev/vd{c,d,e}
brw-rw—-. 1 root disk 253, 32 Jun 6 17:38 /dev/vdc
brw-rw—-. 1 root disk 253, 48 Jun 6 17:38 /dev/vdd
brw-rw—-. 1 root disk 253, 64 Jun 6 17:38 /dev/vdepv
[root@server0 ~]# pvcreate /dev/vdd
Physical volume “/dev/vdd” successfully created[root@server0 ~]# pvscan
PV /dev/vdd lvm2 [2.00 GiB]
Total: 1 [2.00 GiB] / in use: 0 [0 ] / in no VG: 1 [2.00 GiB][root@server0 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vdd lvm2 a– 2.00g 2.00gvg
[root@server0 ~]# vgcreate vg1 /dev/vdd
Volume group “vg1” successfully created
[root@server0 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg1 1 0 0 wz–n- 2.00g 2.00g
[root@server0 ~]# vgscan
Reading all physical volumes. This may take a while…
Found volume group “vg1” using metadata type lvm2
[root@server0 ~]# vgdisplay
— Volume group —
VG Name vg1
System ID
Format lvm2
Metadata Areas 1
Metadata Sequence No 1
VG Access read/write
VG Status resizable
MAX LV 0
Cur LV 0
Open LV 0
Max PV 0
Cur PV 1
Act PV 1
VG Size 2.00 GiB
PE Size 4.00 MiB
Total PE 511
Alloc PE / Size 0 / 0
Free PE / Size 511 / 2.00 GiB
VG UUID 7E4tlj-l0a2-ph52-OytH-eaq7-58K6-2S4n8Vlv
[root@server0 ~]# lvcreate -l 10 -n lv1 vg1
[root@server0 ~]# lvcreate -L 200M -n lv2 vg1[root@server0 ~]# lvscan
ACTIVE ‘/dev/vg1/lv1’ [640.00 MiB] inherit
ACTIVE ‘/dev/vg1/lv2’ [256.00 MiB] inherit创建文件系统并挂载
[root@server0 ~]# mkfs.xfs /dev/vg1/lv1
[root@server0 ~]# mkfs.ext4 /dev/vg1/lv2[root@server0 ~]# mkdir /mnt/lv1 /mnt/lv2
[root@server0 ~]# vim /etc/fstab
/dev/vg1/lv1 /mnt/lv1 xfs defaults 0 0
/dev/vg1/lv2 /mnt/lv2 ext4 defaults 0 0[root@server0 ~]# mount -a
[root@server0 ~]# df
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/mapper/vg1-lv1 651948 32928 619020 6% /mnt/lv1
/dev/mapper/vg1-lv2 245671 2062 226406 1% /mnt/lv2
2. VG管理
扩大VG vgextend
- pv
[root@server0 ~]# pvcreate /dev/vde - vgextend
[root@server0 ~]# vgextend vg1 /dev/vde
Volume group “vg1” successfully extended
[root@server0 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg1 2 2 0 wz–n- 3.99g 3.76g
减小VG vgreduce
1.查看当前的VG中PV的使用情况
[root@server0 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vdd vg1 lvm2 a– 2.00g 1.76g
/dev/vde vg1 lvm2 a– 2.00g 2.00g
pvmove数据到其它PV
[root@server0 ~]# pvmove /dev/vdd
/dev/vdd: Moved: 16.7%
/dev/vdd: Moved: 100.0%[root@server0 ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/vdd vg1 lvm2 a– 2.00g 2.00g
/dev/vde vg1 lvm2 a– 2.00g 1.76gvgreduce VG
[root@server0 ~]# vgreduce vg1 /dev/vdd
Removed “/dev/vdd” from volume group “vg1”[root@server0 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg1 1 2 0 wz–n- 2.00g 1.76g
3. LV扩容
lv扩容
[root@server0 ~]# vgs
VG #PV #LV #SN Attr VSize VFree
vg1 2 2 0 wz–n- 1.88g 1.00g[root@server0 ~]# lvextend -L 800M /dev/vg1/lv1
[root@server0 ~]# lvextend -L +800M /dev/vg1/lv1[root@server0 ~]# lvextend -l 15 /dev/vg1/lv1
[root@server0 ~]# lvextend -l +15 /dev/vg1/lv1=======================================
+50%FREE
[root@server0 ~]# lvscan
ACTIVE ‘/dev/vg1/lv1’ [768.00 MiB] inherit
ACTIVE ‘/dev/vg1/lv2’ [512.00 MiB] inheritFS扩容
[root@server0 ~]# df -Th
/dev/mapper/vg1-lv1 xfs 637M 67M 570M 11% /mnt/lv1
/dev/mapper/vg1-lv2 ext4 240M 32M 192M 15% /mnt/lv2a. xfs
[root@server0 ~]# xfs_growfs /dev/vg1/lv1b. ext2/3/4
[root@server0 ~]# resize2fs /dev/vg1/lv2[root@server0 ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/vg1-lv1 xfs 765M 67M 698M 9% /mnt/lv1
/dev/mapper/vg1-lv2 ext4 488M 32M 429M 7% /mnt/lv2
4. LVM快照snapshot
创建快照 (EXT4)
[root@server0 ~]# lvcreate -L 128M -s -n lv2-snap /dev/vg1/lv2
[root@server0 ~]# lvs
LV VG Attr LSize Pool Origin Data% Move Log Cpy%Sync Convert
lv1 vg1 -wi-ao—- 768.00m
lv2 vg1 owi-aos— 512.00m
lv2-snap vg1 swi-aos— 128.00m lv2 5.92[root@server0 ~]# mount -o ro /dev/vg1/lv2-snap /mnt/lv2-snap/
[root@server0 ~]# df -Th
Filesystem Type Size Used Avail Use% Mounted on
/dev/mapper/vg1-lv2 ext4 488M 32M 429M 7% /mnt/lv2
/dev/mapper/vg1-lv2–snap ext4 488M 32M 429M 7% /mnt/lv2-snap[root@server0 ~]# lvscan
ACTIVE ‘/dev/vg1/lv1’ [768.00 MiB] inherit
ACTIVE Original ‘/dev/vg1/lv2’ [512.00 MiB] inherit
ACTIVE Snapshot ‘/dev/vg1/lv2-snap’ [128.00 MiB] inherit[root@server0 ~]# ls /mnt/lv2
etc lost+found
[root@server0 ~]# ls /mnt/lv2-snap/
etc lost+found修改原卷的数据
观察Snapshot
[root@server0 ~]# lvs
LV VG Attr LSize Pool Origin Data% Move Log Cpy%Sync Convert
lv1 vg1 -wi-ao—- 768.00m
lv2 vg1 owi-aos— 512.00m
lv2-snap vg1 swi-aos— 128.00m lv2 30.56XFS:
[root@server0 ~]# mount -o nouuid,ro /dev/vg1/lv1-snap /mnt/lv1-snap/s
挂载快照,尽量使用ro(readonly)的方式,将不会破坏快照卷中的数据[root@server0 ~]# dmsetup ls –tree
vg1-lv2–snap (252:5)
├─vg1-lv2–snap-cow (252:7)
│ └─ (253:17)
└─vg1-lv2-real (252:6)├─ (253:17) └─ (253:18)
vg1-lv2 (252:1)
└─vg1-lv2-real (252:6)├─ (253:17) └─ (253:18)
快照自动增长:
[root@server0 ~]# vim /etc/lvm/lvm.conf
snapshot_autoextend_threshold = 70
snapshot_autoextend_percent = 20
[root@server0 ~]# systemctl restart lvm2-monitor.service
[root@server0 ~]# systemctl enable lvm2-lvmetad.service
注:快照卷和原卷使用同一VG空间
转载请注明来源,欢迎对文章中的引用来源进行考证,欢迎指出任何有错误或不够清晰的表达。可以在下面评论区评论,也可以邮件至 2924854739@qq.com
文章标题:每天一点Linux-17LVM管理
本文作者:DROBP
发布时间:2019-08-26, 09:24:48
最后更新:2019-08-26, 09:25:56
原始链接:https://DROBP.github.io/2019/08/26/每天一点Linux-17LVM管理/版权声明: "署名-非商用-相同方式共享 4.0" 转载请保留原文链接及作者。