Version 2.02.89 -
==================================
+ Introduce revert_lv for better pvmove cleanup.
Replace incomplete pvmove activation failure recovery code with a message.
Abort if _finish_pvmove suspend_lvs fails instead of cleaning up incompletely.
Change suspend_lvs to call vg_revert internally.
/* Resume the LV if it was active */
static int do_resume_lv(char *resource, unsigned char lock_flags)
{
- int oldmode, origin_only, exclusive;
+ int oldmode, origin_only, exclusive, revert;
/* Is it open ? */
oldmode = get_current_lock(resource);
}
origin_only = (lock_flags & LCK_ORIGIN_ONLY_MODE) ? 1 : 0;
exclusive = (oldmode == LCK_EXCL) ? 1 : 0;
+ revert = (lock_flags & LCK_REVERT_MODE) ? 1 : 0;
- if (!lv_resume_if_active(cmd, resource, origin_only, exclusive))
+ if (!lv_resume_if_active(cmd, resource, origin_only, exclusive, revert))
return EIO;
return 0;
return 1;
}
int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
- unsigned origin_only, unsigned exclusive)
+ unsigned origin_only, unsigned exclusive, unsigned revert)
{
return 1;
}
laopts->origin_only = 0;
if (test_mode()) {
- _skip("Resuming %s%s.", lv->name, laopts->origin_only ? " without snapshots" : "");
+ _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
+ laopts->revert ? " (reverting)" : "");
r = 1;
goto out;
}
- log_debug("Resuming LV %s/%s%s%s.", lv->vg->name, lv->name,
+ log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
error_if_not_active ? "" : " if active",
- laopts->origin_only ? " without snapshots" : "");
+ laopts->origin_only ? " without snapshots" : "",
+ laopts->revert ? " (reverting)" : "");
if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
goto_out;
/* Returns success if the device is not active */
int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
- unsigned origin_only, unsigned exclusive)
+ unsigned origin_only, unsigned exclusive, unsigned revert)
{
struct lv_activate_opts laopts = {
.origin_only = origin_only,
* non-clustered target should be used. This only happens
* if exclusive is set.
*/
- .exclusive = exclusive
+ .exclusive = exclusive,
+ .revert = revert
};
return _lv_resume(cmd, lvid_s, &laopts, 0);
int exclusive;
int origin_only;
int no_merging;
+ unsigned revert;
};
/* target attribute flags */
int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only);
int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only);
int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
- unsigned origin_only, unsigned exclusive);
+ unsigned origin_only, unsigned exclusive, unsigned revert);
int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive);
int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s,
int exclusive);
layer ? UINT32_C(0) : (uint32_t) lv->major,
layer ? UINT32_C(0) : (uint32_t) lv->minor,
_read_only_lv(lv),
- (lv->vg->status & PRECOMMITTED) ? 1 : 0,
+ ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
lvlayer,
_get_udev_flags(dm, lv, layer))))
return_0;
if (flags & LCK_ORIGIN_ONLY)
args[1] |= LCK_ORIGIN_ONLY_MODE;
+ if (flags & LCK_REVERT)
+ args[1] |= LCK_REVERT_MODE;
+
if (mirror_in_sync())
args[1] |= LCK_MIRROR_NOSYNC_MODE;
{
char lockfile[PATH_MAX];
unsigned origin_only = (flags & LCK_ORIGIN_ONLY) ? 1 : 0;
+ unsigned revert = (flags & LCK_REVERT) ? 1 : 0;
switch (flags & LCK_SCOPE_MASK) {
case LCK_VG:
case LCK_LV:
switch (flags & LCK_TYPE_MASK) {
case LCK_UNLOCK:
- log_very_verbose("Unlocking LV %s%s", resource, origin_only ? " without snapshots" : "");
- if (!lv_resume_if_active(cmd, resource, origin_only, 0))
+ log_very_verbose("Unlocking LV %s%s%s", resource, origin_only ? " without snapshots" : "", revert ? " (reverting)" : "");
+ if (!lv_resume_if_active(cmd, resource, origin_only, 0, revert))
return 0;
break;
case LCK_NULL:
return r;
}
+/* Unlock and revert list of LVs */
+int revert_lvs(struct cmd_context *cmd, struct dm_list *lvs)
+{
+ struct lv_list *lvl;
+ int r = 1;
+
+ dm_list_iterate_items(lvl, lvs)
+ if (!revert_lv(cmd, lvl->lv)) {
+ r = 0;
+ stack;
+ }
+
+ return r;
+}
/*
* Lock a list of LVs.
* On failure to lock any LV, calls vg_revert() if vg_to_revert is set and
vg_revert(vg_to_revert);
dm_list_uniterate(lvh, lvs, &lvl->list) {
lvl = dm_list_item(lvh, struct lv_list);
- if (!resume_lv(cmd, lvl->lv))
+ if (!revert_lv(cmd, lvl->lv))
stack;
}
#define LCK_CLUSTER_VG 0x00000080U /* VG is clustered */
#define LCK_CACHE 0x00000100U /* Operation on cache only using P_ lock */
#define LCK_ORIGIN_ONLY 0x00000200U /* Operation should bypass any snapshots */
+#define LCK_REVERT 0x00000400U /* Revert any incomplete change */
/*
* Additional lock bits for cluster communication via args[1]
#define LCK_CONVERT 0x08 /* Convert existing lock */
#define LCK_ORIGIN_ONLY_MODE 0x20 /* Same as above */
#define LCK_TEST_MODE 0x10 /* Test mode: No activation */
+#define LCK_REVERT_MODE 0x40 /* Remove inactive tables */
/*
* Special cases of VG locks.
#define resume_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME)
#define resume_lv_origin(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME | LCK_ORIGIN_ONLY)
+#define revert_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_RESUME | LCK_REVERT)
#define suspend_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_SUSPEND | LCK_HOLD)
#define suspend_lv_origin(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_SUSPEND | LCK_HOLD | LCK_ORIGIN_ONLY)
#define deactivate_lv(cmd, lv) lock_lv_vol(cmd, lv, LCK_LV_DEACTIVATE)
int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs,
struct volume_group *vg_to_revert);
int resume_lvs(struct cmd_context *cmd, struct dm_list *lvs);
+int revert_lvs(struct cmd_context *cmd, struct dm_list *lvs);
int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusive);
/* Interrupt handling */
case LCK_NULL:
return lv_deactivate(cmd, resource);
case LCK_UNLOCK:
- return lv_resume_if_active(cmd, resource, (flags & LCK_ORIGIN_ONLY) ? 1: 0, 0);
+ return lv_resume_if_active(cmd, resource, (flags & LCK_ORIGIN_ONLY) ? 1: 0, 0, (flags & LCK_REVERT) ? 1 : 0);
case LCK_READ:
return lv_activate_with_filter(cmd, resource, 0);
case LCK_WRITE:
if (!info->exists || !info->inactive_table)
return 1;
+// FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
+
log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
name, info->major, info->minor);
if (!_suspend_lvs(cmd, first_time, lv_mirr, lvs_changed, vg)) {
log_error("ABORTING: Volume group metadata update failed.");
- goto out;
+ if (!first_time && !revert_lv(cmd, lv_mirr))
+ stack;
+ return 0;
}
/* Commit on-disk metadata */
if (!vg_commit(vg)) {
log_error("ABORTING: Volume group metadata update failed.");
- goto out;
+ if (!_resume_lvs(cmd, first_time, lv_mirr, lvs_changed))
+ stack;
+ if (!first_time && !revert_lv(cmd, lv_mirr))
+ stack;
+ return 0;
}
/* Activate the temporary mirror LV */
if (!_resume_lvs(cmd, first_time, lv_mirr, lvs_changed))
r = 0;
- backup(vg);
+ if (r)
+ backup(vg);
+
return r;
}
/* Suspend LVs changed (implicitly suspends lv_mirr) */
if (!suspend_lvs(cmd, lvs_changed, vg)) {
log_error("ABORTING: Locking LVs to remove temporary mirror failed");
+ if (!revert_lv(cmd, lv_mirr))
+ stack;
return 0;
}
if (!vg_commit(vg)) {
log_error("ABORTING: Failed to write new data locations "
"to disk.");
- if (!resume_lv(cmd, lv_mirr))
+ if (!revert_lv(cmd, lv_mirr))
stack;
- if (!resume_lvs(cmd, lvs_changed))
+ if (!revert_lvs(cmd, lvs_changed))
stack;
return 0;
}