误操作使用 wipefs 命令清理的磁盘,ansible执行的没有以下返回值,如果有以下信息可以使用其他方法恢复
1
2
|
wipefs -a -f /dev/sdb
/dev/sdb: 8 bytes were erased at offset 0x00000218 (LVM2_member): 4c 56 4d 32 20 30 30 31
|
执行 wipefs 后 pv 、vg、lvm 都没有了,但是分区挂载还在
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
# pvs
# vgs
# lvs
# df -h
Filesystem Size Used Avail Use% Mounted on
udev 472M 0 472M 0% /dev
tmpfs 98M 1.2M 97M 2% /run
/dev/sda1 9.6G 1.7G 7.9G 18% /
tmpfs 489M 0 489M 0% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 489M 0 489M 0% /sys/fs/cgroup
/dev/loop0 64M 64M 0 100% /snap/core20/1623
/dev/sda15 105M 5.2M 100M 5% /boot/efi
/dev/mapper/datavg-lv_data1 10G 104M 9.9G 2% /data1
/dev/mapper/datavg-lv_data2 9.0G 97M 8.9G 2% /data2
|
取消 /etc/fstab 中的挂载,重启主机
1
2
3
4
5
6
7
|
# cat /etc/fstab
LABEL=cloudimg-rootfs / ext4 defaults 0 1
LABEL=UEFI /boot/efi vfat umask=0077 0 1
#/dev/mapper/datavg-lv_data1 /data1 xfs defaults 0 0
#/dev/mapper/datavg-lv_data2 /data2 xfs defaults 0 0
# reboot
|
查看备份的 disk uuid,记录 pv0 的id
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
|
# cat /etc/lvm/backup/datavg
# Generated by LVM2 version 2.03.07(2) (2019-11-30): Tue Nov 15 07:06:15 2022
contents = "Text Format Volume Group"
version = 1
description = "Created *after* executing 'vgs'"
creation_host = "ubuntuguest" # Linux ubuntuguest 5.4.0-131-generic #147-Ubuntu SMP Fri Oct 14 17:07:22 UTC 2022 x86_64
creation_time = 1668495975 # Tue Nov 15 07:06:15 2022
datavg {
id = "biPpie-lyHv-R7Og-u9OH-Z5S2-jVyp-hhZb5e"
seqno = 7
format = "lvm2" # informational
status = ["RESIZEABLE", "READ", "WRITE"]
flags = []
extent_size = 8192 # 4 Megabytes
max_lv = 0
max_pv = 0
metadata_copies = 0
physical_volumes {
pv0 {
id = "5rcocK-1vg0-fzR3-R2dW-ZbVO-eupP-fVf7sB"
device = "/dev/sdb" # Hint only
status = ["ALLOCATABLE"]
flags = []
dev_size = 41943040 # 20 Gigabytes
pe_start = 2048
pe_count = 5119 # 19.9961 Gigabytes
}
}
logical_volumes {
lv_data1 {
id = "OQ3Kqs-tdLh-0ooG-mhhO-wKVU-ekP2-SecDaA"
status = ["READ", "WRITE", "VISIBLE"]
flags = []
creation_time = 1668167654 # 2022-11-11 11:54:14 +0000
creation_host = "ubuntuguest"
segment_count = 1
segment1 {
start_extent = 0
extent_count = 2560 # 10 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv0", 0
]
}
}
lv_data2 {
id = "sFNqz2-e0qH-we8k-p5t7-j8yt-d5oT-GWX2QF"
status = ["READ", "WRITE", "VISIBLE"]
flags = []
creation_time = 1668167667 # 2022-11-11 11:54:27 +0000
creation_host = "ubuntuguest"
segment_count = 1
segment1 {
start_extent = 0
extent_count = 2304 # 9 Gigabytes
type = "striped"
stripe_count = 1 # linear
stripes = [
"pv0", 2560
]
}
}
}
}
|
恢复 pv
1
2
3
4
|
# /etc/lvm/backup/datavg 是最新的一次 datavg 的备份,历史备份位于 /etc/lvm/archive/
# pvcreate --uuid '5rcocK-1vg0-fzR3-R2dW-ZbVO-eupP-fVf7sB' --restorefile '/etc/lvm/backup/datavg' /dev/sdb
WARNING: Couldn't find device with uuid 5rcocK-1vg0-fzR3-R2dW-ZbVO-eupP-fVf7sB.
Physical volume "/dev/sdb" successfully created.
|
恢复vg
1
2
|
# vgcfgrestore -f /etc/lvm/backup/datavg datavg
Restored volume group datavg.
|
重启 lvm2-pvscan 服务
1
2
|
# 多个 pv 可能有多个 lvm2-pvscan 服务,可以查看服务的日志确定其对应哪个 pv
systemctl restart lvm2-pvscan@8\\:16.service
|
取消 fstab 文件注释,重启挂载
1
2
3
4
5
6
7
|
# cat /etc/fstab
LABEL=cloudimg-rootfs / ext4 defaults 0 1
LABEL=UEFI /boot/efi vfat umask=0077 0 1
/dev/mapper/datavg-lv_data1 /data1 xfs defaults 0 0
/dev/mapper/datavg-lv_data2 /data2 xfs defaults 0 0
# mount -a
|
验证,成功恢复pv,vg,lvm
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
# pvs
PV VG Fmt Attr PSize PFree
/dev/sdb datavg lvm2 a-- <20.00g 1020.00m
# vgs
VG #PV #LV #SN Attr VSize VFree
datavg 1 2 0 wz--n- <20.00g 1020.00m
# lvs
LV VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert
lv_data1 datavg -wi-ao---- 10.00g
lv_data2 datavg -wi-ao---- 9.00g
# df -h
Filesystem Size Used Avail Use% Mounted on
udev 472M 0 472M 0% /dev
tmpfs 98M 1.2M 97M 2% /run
/dev/sda1 9.6G 1.7G 7.9G 18% /
/dev/loop0 68M 68M 0 100% /snap/lxd/22753
/dev/sda15 105M 5.2M 100M 5% /boot/efi
/dev/mapper/datavg-lv_data1 10G 104M 9.9G 2% /data1
/dev/mapper/datavg-lv_data2 9.0G 97M 8.9G 2% /data2
|