Code adds better support for monitoring of thin pool devices.
update_pool_lv uses DMEVENTD_MONITOR_IGNORE to not manipulate with monitoring.
vgchange & lvchange are checking real thin pool device for existance
as we are using _tpool real device and visible LV pool device might not
be even active (_tpool is activated implicitely for any thin volume).
monitor_dev_for_events is another _lv_postorder like code it might be worth
to think about reusing it here - for now update the code to properly
monitory thin volume deps.
For unmonitoring add extra code to check the usage of thin pool - in case it's in use
unmonitoring of thin volume is skipped.
Version 2.02.96 -
================================
+ Update and fix monitoring of thin pool devices.
Check hash insert success in lock_vg clvmd.
Check for buffer overwrite in get_cluster_type() clvmd.
Fix global/detect_internal_vg_cache_corruption config check.
int (*monitor_fn) (struct lv_segment *s, int e);
uint32_t s;
static const struct lv_activate_opts zlaopts = { 0 };
+ static const struct lv_activate_opts thinopts = { .skip_in_use = 1 };
+ struct lvinfo info;
if (!laopts)
laopts = &zlaopts;
if (monitor && !dmeventd_monitor_mode())
return 1;
+ /*
+ * Allow to unmonitor thin pool via explicit pool unmonitor
+ * or unmonitor before the last thin pool user deactivation
+ * Skip unmonitor, if invoked via unmonitor of thin volume
+ * and there is another thin pool user (open_count > 1)
+ */
+ if (laopts->skip_in_use && lv_info(lv->vg->cmd, lv, 1, &info, 1, 0) &&
+ (info.open_count != 1)) {
+ log_debug("Skipping unmonitor of opened %s (open:%d)",
+ lv->name, info.open_count);
+ return 1;
+ }
+
/*
* In case of a snapshot device, we monitor lv->snapshot->lv,
* not the actual LV itself.
}
}
+ /*
+ * If requested unmonitoring of thin volume, request test
+ * if there is no other thin pool user
+ *
+ * FIXME: code here looks like _lv_postorder()
+ */
+ if (seg->pool_lv &&
+ !monitor_dev_for_events(cmd, seg->pool_lv,
+ (!monitor) ? &thinopts : NULL, monitor))
+ r = 0;
+
+ if (seg->metadata_lv &&
+ !monitor_dev_for_events(cmd, seg->metadata_lv, NULL, monitor))
+ r = 0;
+
if (!seg_monitored(seg) || (seg->status & PVMOVE))
continue;
int no_merging;
int real_pool;
int is_activate;
+ int skip_in_use;
unsigned revert;
unsigned read_only;
};
int update_pool_lv(struct logical_volume *lv, int activate)
{
+ int monitored;
+
if (!lv_is_thin_pool(lv)) {
log_error(INTERNAL_ERROR "Updated LV %s is not pool.", lv->name);
return 0;
if (activate) {
/* If the pool is not active, do activate deactivate */
if (!lv_is_active(lv)) {
+ monitored = dmeventd_monitor_mode();
+ init_dmeventd_monitor(DMEVENTD_MONITOR_IGNORE);
if (!activate_lv_excl(lv->vg->cmd, lv))
return_0;
if (!deactivate_lv(lv->vg->cmd, lv))
return_0;
+ init_dmeventd_monitor(monitored);
}
/*
* Resume active pool to send thin messages.
{
struct lvinfo info;
- if (!lv_info(cmd, lv, 0, &info, 0, 0) || !info.exists) {
+ if (!lv_info(cmd, lv, lv_is_thin_pool(lv) ? 1 : 0,
+ &info, 0, 0) || !info.exists) {
log_error("Logical volume, %s, is not active", lv->name);
return 0;
}
struct lv_list *lvl;
struct logical_volume *lv;
struct lvinfo info;
- int lv_active;
int r = 1;
dm_list_iterate_items(lvl, &vg->lvs) {
lv = lvl->lv;
- if (!lv_info(cmd, lv, 0, &info, 0, 0))
- lv_active = 0;
- else
- lv_active = info.exists;
-
+ if (!lv_info(cmd, lv, lv_is_thin_pool(lv) ? 1 : 0,
+ &info, 0, 0) ||
+ !info.exists)
+ continue;
/*
* FIXME: Need to consider all cases... PVMOVE, etc
*/
- if ((lv->status & PVMOVE) || !lv_active)
+ if (lv->status & PVMOVE)
continue;
if (!monitor_dev_for_events(cmd, lv, 0, reg)) {