]> sourceware.org Git - lvm2.git/commitdiff
vdo: enhance activation with layer -vpool
authorZdenek Kabelac <zkabelac@redhat.com>
Fri, 13 Sep 2019 23:13:33 +0000 (01:13 +0200)
committerZdenek Kabelac <zkabelac@redhat.com>
Tue, 17 Sep 2019 11:17:19 +0000 (13:17 +0200)
Enhance 'activation' experience for VDO pool to more closely match
what happens for thin-pools where we do use a 'fake' LV to keep pool
running even when no thinLVs are active. This gives user a choice
whether he want to keep thin-pool running (wihout possibly lenghty
activation/deactivation process)

As we do plan to support multple VDO LVs to be mapped into a single VDO,
we want to give user same experience and 'use-patter' as with thin-pools.

This patch gives option to activate VDO pool only without activating
VDO LV.

Also due to 'fake' layering LV we can protect usage of VDO pool from
command like 'mkfs' which do require exlusive access to the volume,
which is no longer possible.

Note: VDO pool contains 1024 initial sectors as 'empty' header - such
header is also exposed in layered LV (as read-only LV).
For blkid we are indentified as LV with UUID suffix - thus private DM
device of lvm2 - so we do not need to store any extra info in this
header space (aka zero is good enough).

WHATS_NEW
lib/activate/activate.c
lib/activate/dev_manager.c
lib/metadata/vdo_manip.c
lib/misc/lvm-string.c
tools/lvchange.c

index 81a73ef970cb59aec3af6c8ab477e028da51505c..a9bd7501751e80c6cbb28703674d02a05281129f 100644 (file)
--- a/WHATS_NEW
+++ b/WHATS_NEW
@@ -1,5 +1,6 @@
 Version 2.03.06 - 
 ================================
+  Allow standalone activation of VDO pool just like for thin-pools.
   Activate thin-pool layered volume as 'read-only' device.
   Ignore crypto devices with UUID signature CRYPT-SUBDEV.
   Enhance validation for thin and cache pool conversion and swapping.
index 38e21be6a9c243450d61ad49562ad7f08315fc78..29cd2d3aad66a5087ffd814b9854f07c82285141 100644 (file)
@@ -794,6 +794,18 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
                return 1;
        }
 
+       if (lv_is_vdo_pool(lv)) {
+               /* Always collect status for '-vpool' */
+               if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0) &&
+                   (status->seg_status.type == SEG_STATUS_VDO_POOL)) {
+                       /* There is -tpool device, but query 'active' state of 'fake' vdo-pool */
+                       if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0))
+                               status->info.exists = 0; /* So VDO pool LV is not active */
+               }
+
+               return 1;
+       }
+
        return _lv_info(cmd, lv, 0, &status->info, lv_seg, &status->seg_status,
                        with_open_count, with_read_ahead);
 }
@@ -1342,7 +1354,7 @@ int lv_vdo_pool_status(const struct logical_volume *lv, int flush,
        int r = 0;
        struct dev_manager *dm;
 
-       if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0))
+       if (!lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
                return 0;
 
        log_debug_activation("Checking VDO pool status for LV %s.",
index 32fdcb94f790ca8c7f1a1261ad7b719ed183fb64..5ee5efe20524815647393ab9f9552157b4933d2c 100644 (file)
@@ -1991,7 +1991,7 @@ static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_vol
                /* New thin-pool is regular LV with -tpool UUID suffix. */
                udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
                              DM_UDEV_DISABLE_OTHER_RULES_FLAG;
-       else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv))
+       else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
                udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
                              DM_UDEV_DISABLE_DISK_RULES_FLAG |
                              DM_UDEV_DISABLE_OTHER_RULES_FLAG;
@@ -2611,6 +2611,15 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
                }
        }
 
+       if (lv_is_vdo_pool(lv)) {
+               /*
+                * For both origin_only and !origin_only
+                * skips test for -vpool-real and vpool-cow
+                */
+               if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
+                       return_0;
+       }
+
        if (lv_is_cache(lv)) {
                if (!origin_only && !dm->activation && !dm->track_pending_delete) {
                        /* Setup callback for non-activation partial tree */
@@ -2682,7 +2691,8 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
                        if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s) &&
                            /* origin only for cache without pending delete */
                            (!dm->track_pending_delete || !lv_is_cache(lv)) &&
-                           !_add_lv_to_dtree(dm, dtree, seg_lv(seg, s), 0))
+                           !_add_lv_to_dtree(dm, dtree, seg_lv(seg, s),
+                                             lv_is_vdo_pool(seg_lv(seg, s)) ? 1 : 0))
                                return_0;
                        if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
                            !_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0))
@@ -2908,8 +2918,11 @@ static int _add_layer_target_to_dtree(struct dev_manager *dm,
        if (!(layer_dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
                return_0;
 
+
        /* Add linear mapping over layered LV */
-       if (!add_linear_area_to_dtree(dnode, lv->size, lv->vg->extent_size,
+       /* From VDO layer expose ONLY vdo pool header, we would need to use virtual size otherwise */
+       if (!add_linear_area_to_dtree(dnode, lv_is_vdo_pool(lv) ? first_seg(lv)->vdo_pool_header_size : lv->size,
+                                     lv->vg->extent_size,
                                      lv->vg->cmd->use_linear_target,
                                      lv->vg->name, lv->name) ||
            !dm_tree_node_add_target_area(dnode, NULL, layer_dlid, 0))
@@ -3132,7 +3145,9 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
                    /* origin only for cache without pending delete */
                    (!dm->track_pending_delete || !seg_is_cache(seg)) &&
                    !_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
-                                         laopts, NULL))
+                                         laopts,
+                                         lv_is_vdo_pool(seg_lv(seg, s)) ?
+                                         lv_layer(seg_lv(seg, s)) : NULL))
                        return_0;
                if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
                    !lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
@@ -3424,8 +3439,9 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
                if (!_add_snapshot_target_to_dtree(dm, dnode, lv, laopts))
                        return_0;
        } else if (!layer && ((lv_is_thin_pool(lv) && !lv_is_new_thin_pool(lv)) ||
+                              lv_is_vdo_pool(lv) ||
                              lv_is_external_origin(lv))) {
-               /* External origin or 'used' Thin pool is using layer */
+               /* External origin or 'used' Thin pool or VDO pool is using layer */
                if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, lv_layer(lv)))
                        return_0;
                if (!_add_layer_target_to_dtree(dm, dnode, lv))
@@ -3438,6 +3454,10 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
                        if (max_stripe_size < seg->stripe_size * seg->area_count)
                                max_stripe_size = seg->stripe_size * seg->area_count;
                }
+
+               if (!layer && lv_is_vdo_pool(lv) &&
+                   !_add_layer_target_to_dtree(dm, dnode, lv))
+                       return_0;
        }
 
        /* Setup thin pool callback */
@@ -3705,7 +3725,10 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
                /* Add all required new devices to tree */
                if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts,
                                          (lv_is_origin(lv) && laopts->origin_only) ? "real" :
-                                         (lv_is_thin_pool(lv) && laopts->origin_only) ? "tpool" : NULL))
+                                         (laopts->origin_only &&
+                                          (lv_is_thin_pool(lv) ||
+                                           lv_is_vdo_pool(lv))) ?
+                                         lv_layer(lv) : NULL))
                        goto_out;
 
                /* Preload any devices required before any suspensions */
index 4be9d2bb3d3b714409f2071fbada879371d8016b..548b4ad8fcd19092444df1c6ce8d896bcedb71e2 100644 (file)
@@ -159,7 +159,7 @@ int parse_vdo_pool_status(struct dm_pool *mem, const struct logical_volume *vdo_
        status->data_usage = DM_PERCENT_INVALID;
 
        if (!(dm_name = dm_build_dm_name(mem, vdo_pool_lv->vg->name,
-                                        vdo_pool_lv->name, NULL))) {
+                                        vdo_pool_lv->name, lv_layer(vdo_pool_lv)))) {
                log_error("Failed to build VDO DM name %s.",
                          display_lvname(vdo_pool_lv));
                return 0;
index 901243c602d725f1a6f742b3e5e108e40cddee72..fe24f23d725cc1de9281acfc876667de31cf995d 100644 (file)
@@ -259,7 +259,7 @@ char *build_dm_uuid(struct dm_pool *mem, const struct logical_volume *lv,
                        lv_is_thin_pool(lv) ? "pool" :
                        lv_is_thin_pool_data(lv) ? "tdata" :
                        lv_is_thin_pool_metadata(lv) ? "tmeta" :
-                       lv_is_vdo_pool(lv) ? "vpool" :
+                       lv_is_vdo_pool(lv) ? "pool" :
                        lv_is_vdo_pool_data(lv) ? "vdata" :
                        NULL;
        }
index c28a7bb9c307a5dcfbdda9ab7c02d0c0c2dc76a7..03a7793f55f680e6abf97f4c14522f1295c7f670 100644 (file)
@@ -1400,9 +1400,6 @@ static int _lvchange_activate_check(struct cmd_context *cmd,
                return 0;
        }
 
-       if (lv_is_vdo_pool(lv) && !lv_is_named_arg)
-               return 0;       /* Skip VDO pool processing unless explicitely named */
-
        return 1;
 }
 
This page took 0.053526 seconds and 5 git commands to generate.