Don't change resilience of raid1 LVs without --yes.
Adjust respective tests.
const uint32_t new_region_size, struct dm_list *allocate_pvs)
{
struct lv_segment *seg = first_seg(lv);
+ const char *level = seg->area_count == 1 ? "raid1 with " : "";
+ const char *resil = new_count == 1 ? " loosing all" : (new_count < seg->area_count ? "s reducing" : "s enhancing");
+ if (!yes && yes_no_prompt("Are you sure you want to convert %s LV %s to %s%u image%s resilience? [y/n]: ",
+ lvseg_name(first_seg(lv)), display_lvname(lv), level, new_count, resil) == 'n') {
+ log_error("Logical volume %s NOT converted.", display_lvname(lv));
+ return 0;
+ }
if (new_region_size) {
seg->region_size = new_region_size;
_check_and_adjust_region_size(lv);
# Converting to linear should clear flags and writebehind
not lvconvert -m 0 $vg/$lv $d1
lvconvert -y -m 0 $vg/$lv $d1
- lvconvert --type raid1 -m 1 $vg/$lv $d1
+ lvconvert -y --type raid1 -m 1 $vg/$lv $d1
check lv_field $vg/$lv raid_write_behind ""
check lv_attr_bit health $vg/${lv}_rimage_0 "-"
check lv_attr_bit health $vg/${lv}_rimage_1 "-"
printf "#\n#\n# run_checks: RAID as thinpool data\n#\n#\n"
# Hey, specifying devices for thin allocation doesn't work
-# lvconvert --thinpool $1/$2 "$dev6"
+# lvconvert -y --thinpool $1/$2 "$dev6"
lvcreate -aey -L 2M -n ${2}_meta $1 "$dev6"
lvconvert --thinpool $1/$2 --poolmetadata ${2}_meta
lvcreate -T $1/$2 -V 1 -n thinlv
lvrename $1/$2 ${2}_meta
lvcreate -aey -L 2M -n $2 $1 "$dev6"
- lvconvert --thinpool $1/$2 --poolmetadata ${2}_meta
+ lvconvert -y --thinpool $1/$2 --poolmetadata ${2}_meta
lvcreate -T $1/$2 -V 1 -n thinlv
THIN_POSTFIX="_tmeta"
lvchange --syncaction repair $vg/cpool_cdata
aux wait_for_sync $vg cpool_cdata
-lvconvert --repair -y $vg/cpool_cmeta
-lvconvert --repair -y $vg/cpool_cdata
+lvconvert -y --repair $vg/cpool_cmeta
+lvconvert -y --repair $vg/cpool_cdata
# do not allow reserved names for *new* LVs
not lvconvert --splitmirrors 1 --name split_cmeta $vg/cpool_cmeta "$dev1"
lvcreate --type cache-pool $vg/cpool -l 10
lvcreate -n corigin -H $vg/cpool -l 20
-lvconvert -m+1 --type raid1 $vg/cpool_cmeta
+lvconvert -y -m +1 --type raid1 $vg/cpool_cmeta
check lv_field $vg/cpool_cmeta layout "raid,raid1"
check lv_field $vg/cpool_cmeta role "private,cache,pool,metadata"
-lvconvert -m+1 --type raid1 $vg/cpool_cdata
+lvconvert -y -m +1 --type raid1 $vg/cpool_cdata
check lv_field $vg/cpool_cdata layout "raid,raid1"
check lv_field $vg/cpool_cdata role "private,cache,pool,data"
-not lvconvert -m-1 $vg/cpool_cmeta
-lvconvert -y -m-1 $vg/cpool_cmeta
+not lvconvert -m -1 $vg/cpool_cmeta
+lvconvert -y -m -1 $vg/cpool_cmeta
check lv_field $vg/cpool_cmeta layout "linear"
-lvconvert -y -m-1 $vg/cpool_cdata
+lvconvert -y -m -1 $vg/cpool_cdata
check lv_field $vg/cpool_cdata layout "linear"
lvremove -f $vg
lvcreate -aey -l 4 -n $lv1 $vg "$dev1:0-1" "$dev2:0-1"
not lvconvert --type raid1 -m 1 $vg/$lv1 "$dev1" "$dev2"
not lvconvert --type raid1 -m 1 $vg/$lv1 "$dev1" "$dev3:0-2"
-lvconvert --type raid1 -m 1 $vg/$lv1 "$dev3"
+lvconvert -y --type raid1 -m 1 $vg/$lv1 "$dev3"
not lvconvert -m 0 $vg/$lv1
lvconvert -y -m 0 $vg/$lv1
# RAID conversions are not honoring allocation policy!
-# lvconvert --type raid1 -m 1 --alloc anywhere $vg/$lv1 "$dev1" "$dev2"
+# lvconvert -y --type raid1 -m 1 --alloc anywhere $vg/$lv1 "$dev1" "$dev2"
lvremove -ff $vg
# Should not be enough non-overlapping space.
not lvconvert -m +1 $vg/$lv1 \
"$dev5:0-1" "$dev1" "$dev2" "$dev3" "$dev4"
-lvconvert -m +1 $vg/$lv1 "$dev5"
+lvconvert -y -m +1 $vg/$lv1 "$dev5"
not lvconvert -m 0 $vg/$lv1
lvconvert -y -m 0 $vg/$lv1
# Should work due to '--alloc anywhere'
# RAID conversion not honoring allocation policy!
-#lvconvert -m +1 --alloc anywhere $vg/$lv1 \
+#lvconvert -y -m +1 --alloc anywhere $vg/$lv1 \
# "$dev5:0-1" "$dev1" "$dev2" "$dev3" "$dev4"
lvremove -ff $vg
"$dev1:0-1" "$dev2:0-1" "$dev3:0-1" "$dev4:0-1"
aux wait_for_sync $vg $lv1
aux disable_dev "$dev1"
-lvconvert --repair -y $vg/$lv1 "$dev2" "$dev3" "$dev4"
+lvconvert -y --repair $vg/$lv1 "$dev2" "$dev3" "$dev4"
#FIXME: ensure non-overlapping images (they should not share PVs)
aux enable_dev "$dev1"
lvremove -ff $vg
fsck -fn "$DM_DEV_DIR/$vg/$lv1"
# Convert 3-way to 4-way mirror
-lvconvert -m 3 $vg/$lv1
+lvconvert -y -m 3 $vg/$lv1
detect_error_leak_
check lv_field $vg/$lv1 segtype "mirror"
check lv_field $vg/$lv1 stripes 4
fsck -fn "$DM_DEV_DIR/$vg/$lv1"
## Convert 4-way raid1 to 5-way
-lvconvert -m 4 -R 128K $vg/$lv1
+lvconvert -y -m 4 -R 128K $vg/$lv1
detect_error_leak_
check lv_field $vg/$lv1 segtype "raid1"
check lv_field $vg/$lv1 stripes 5
mirrors=$((j - 1))
if [ $i -eq 1 ]
then
- [ $mirrors -eq 0 ] && lvconvert -m $mirrors $vg/$lv1
+ [ $mirrors -eq 0 ] && lvconvert -y -m $mirrors $vg/$lv1
else
if [ $mirrors -eq 0 ]
then
not lvconvert -m $mirrors $vg/$lv1
lvconvert -y -m $mirrors $vg/$lv1
else
- lvconvert -m $mirrors $vg/$lv1
+ lvconvert -y -m $mirrors $vg/$lv1
fi
fi
not lvconvert -m +1 $vg/$lv1
lvchange --resync -y $vg/$lv1
aux wait_for_sync $vg $lv1
-lvconvert -m +1 $vg/$lv1
+lvconvert -y -m +1 $vg/$lv1
lvremove -ff $vg
# 3-way to 2-way convert while specifying devices
lvcreate --type raid1 -m 2 -l 2 -n $lv1 $vg "$dev1" "$dev2" "$dev3"
aux wait_for_sync $vg $lv1
-lvconvert -m1 $vg/$lv1 "$dev2"
+lvconvert -y -m 1 $vg/$lv1 "$dev2"
lvremove -ff $vg
#
# Linear to RAID1 conversion ("raid1" default segtype)
###########################################
lvcreate -aey -l 2 -n $lv1 $vg
-lvconvert -m 1 $vg/$lv1 \
+lvconvert -y -m 1 $vg/$lv1 \
--config 'global { mirror_segtype_default = "raid1" }'
lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*r'
lvremove -ff $vg
# Linear to RAID1 conversion (override "mirror" default segtype)
###########################################
lvcreate -aey -l 2 -n $lv1 $vg
-lvconvert --type raid1 -m 1 $vg/$lv1 \
+lvconvert --yes --type raid1 -m 1 $vg/$lv1 \
--config 'global { mirror_segtype_default = "mirror" }'
lvs --noheadings -o attr $vg/$lv1 | grep '^[[:space:]]*r'
lvremove -ff $vg
###########################################
if [ -e LOCAL_CLVMD ]; then
lvcreate -l 2 -n $lv1 $vg
- not lvconvert --type raid1 -m 1 $vg/$lv1 \
+ not lvconvert -y --type raid1 -m 1 $vg/$lv1 \
--config 'global { mirror_segtype_default = "mirror" }'
lvremove -ff $vg
fi
for i in 1 2 3 ; do
lvcreate -aey --type mirror -m $i -l 2 -n $lv1 $vg
aux wait_for_sync $vg $lv1
- lvconvert --type raid1 $vg/$lv1
+ lvconvert -y --type raid1 $vg/$lv1
lvremove -ff $vg
done
lvconvert --merge $vg/${lv1}_tdata_rimage_1
lvconvert --merge $vg/${lv1}_tmeta_rimage_1
-lvconvert -m+1 $vg/${lv1}_tdata "$dev2"
-lvconvert -m+1 $vg/${lv1}_tmeta "$dev1"
+lvconvert -y -m +1 $vg/${lv1}_tdata "$dev2"
+lvconvert -y -m +1 $vg/${lv1}_tmeta "$dev1"
vgremove -ff $vg
# Convert large 200 TiB linear to RAID1 (belong in different test script?)
#
lvcreate -aey -L 200T -n $lv1 $vg1
-lvconvert --type raid1 -m 1 $vg1/$lv1
+lvconvert -y --type raid1 -m 1 $vg1/$lv1
check lv_field $vg1/$lv1 size "200.00t"
check raid_leg_status $vg1 $lv1 "aa"
lvremove -ff $vg1