const unsigned new_stripe_size,
const uint32_t new_region_size,
struct dm_list *allocate_pvs);
+int lv_raid_rebuild(struct logical_volume *lv, struct dm_list *rebuild_pvs);
int lv_raid_replace(struct logical_volume *lv, struct dm_list *remove_pvs,
struct dm_list *allocate_pvs);
int lv_raid_remove_missing(struct logical_volume *lv);
}
/*
- * lv_raid_replace
+ * Helper:
+ *
+ * _lv_raid_rebuild_or_replace
* @lv
* @remove_pvs
* @allocate_pvs
+ * @rebuild
+ *
+ * Rebuild the specified PVs on @remove_pvs if rebuild != 0;
+ * @allocate_pvs not accessed for rebuild.
*
- * Replace the specified PVs.
+ * Replace the specified PVs on @remove_pvs if rebuild == 0;
+ * new SubLVS are allocated on PVs on list @allocate_pvs.
*/
-int lv_raid_replace(struct logical_volume *lv,
- struct dm_list *remove_pvs,
- struct dm_list *allocate_pvs)
+static int _lv_raid_rebuild_or_replace(struct logical_volume *lv,
+ struct dm_list *remove_pvs,
+ struct dm_list *allocate_pvs,
+ int rebuild)
{
int partial_segment_removed = 0;
uint32_t s, sd, match_count = 0;
struct lv_segment *raid_seg = first_seg(lv);
struct lv_list *lvl;
char *tmp_names[raid_seg->area_count * 2];
+ const char *action_str = rebuild ? "rebuild" : "replace";
if (seg_is_any_raid0(raid_seg)) {
log_error("Can't replace any devices in %s LV %s",
if (lv_is_virtual(seg_lv(raid_seg, s)) ||
lv_is_virtual(seg_metalv(raid_seg, s)) ||
lv_is_on_pvs(seg_lv(raid_seg, s), remove_pvs) ||
- lv_is_on_pvs(seg_metalv(raid_seg, s), remove_pvs))
+ lv_is_on_pvs(seg_metalv(raid_seg, s), remove_pvs)) {
match_count++;
+ if (rebuild) {
+ seg_lv(raid_seg, s)->status |= LV_REBUILD;
+ seg_metalv(raid_seg, s)->status |= LV_REBUILD;
+ }
+ }
}
if (!match_count) {
- log_verbose("%s/%s does not contain devices specified"
- " for replacement", lv->vg->name, lv->name);
+ log_print_unless_silent("%s/%s does not contain devices specified"
+ " to %s", display_lvname(lv), action_str);
return 1;
} else if (match_count == raid_seg->area_count) {
- log_error("Unable to remove all PVs from %s/%s at once.",
- lv->vg->name, lv->name);
+ log_error("Unable to %s all PVs from %s/%s at once.",
+ action_str, lv->vg->name, lv->name);
return 0;
} else if (raid_seg->segtype->parity_devs &&
(match_count > raid_seg->segtype->parity_devs)) {
- log_error("Unable to replace more than %u PVs from (%s) %s/%s",
- raid_seg->segtype->parity_devs,
+ log_error("Unable to %s more than %u PVs from (%s) %s/%s",
+ action_str, raid_seg->segtype->parity_devs,
lvseg_name(raid_seg),
lv->vg->name, lv->name);
return 0;
} else if (seg_is_raid10(raid_seg)) {
uint32_t i, rebuilds_per_group = 0;
- /* FIXME: We only support 2-way mirrors in RAID10 currently */
+ /* FIXME: We only support 2-way mirrors (i.e. 2 data copies) in RAID10 currently */
uint32_t copies = 2;
for (i = 0; i < raid_seg->area_count * copies; i++) {
lv_is_virtual(seg_metalv(raid_seg, s)))
rebuilds_per_group++;
if (rebuilds_per_group >= copies) {
- log_error("Unable to replace all the devices "
- "in a RAID10 mirror group.");
+ log_error("Unable to %s all the devices "
+ "in a RAID10 mirror group.", action_str);
return 0;
}
}
}
+ if (rebuild)
+ goto skip_alloc;
+
/* Prevent any PVs holding image components from being used for allocation */
if (!_avoid_pvs_with_other_images_of_lv(lv, allocate_pvs)) {
log_error("Failed to prevent PVs holding image components "
tmp_names[s] = tmp_names[sd] = NULL;
}
+skip_alloc:
if (!lv_update_and_reload_origin(lv))
return_0;
+ /* @old_lvs is empty in case of a rebuild */
dm_list_iterate_items(lvl, &old_lvs) {
if (!deactivate_lv(lv->vg->cmd, lvl->lv))
return_0;
return_0;
}
- /* Update new sub-LVs to correct name and clear REBUILD flag */
+ /* Clear REBUILD flag */
for (s = 0; s < raid_seg->area_count; s++) {
- sd = s + raid_seg->area_count;
- if (tmp_names[s] && tmp_names[sd]) {
- seg_metalv(raid_seg, s)->name = tmp_names[s];
- seg_lv(raid_seg, s)->name = tmp_names[sd];
- seg_metalv(raid_seg, s)->status &= ~LV_REBUILD;
- seg_lv(raid_seg, s)->status &= ~LV_REBUILD;
- }
+ seg_lv(raid_seg, s)->status &= ~LV_REBUILD;
+ seg_metalv(raid_seg, s)->status &= ~LV_REBUILD;
}
+ /* If replace, correct name(s) */
+ if (!rebuild)
+ for (s = 0; s < raid_seg->area_count; s++) {
+ sd = s + raid_seg->area_count;
+ if (tmp_names[s] && tmp_names[sd]) {
+ seg_metalv(raid_seg, s)->name = tmp_names[s];
+ seg_lv(raid_seg, s)->name = tmp_names[sd];
+ }
+ }
+
if (!lv_update_and_reload_origin(lv))
return_0;
return 1;
}
+/*
+ * lv_raid_rebuild
+ * @lv
+ * @remove_pvs
+ *
+ * Rebuild the specified PVs of @lv on @remove_pvs.
+ */
+int lv_raid_rebuild(struct logical_volume *lv,
+ struct dm_list *rebuild_pvs)
+{
+ return _lv_raid_rebuild_or_replace(lv, rebuild_pvs, NULL, 1);
+}
+
+/*
+ * lv_raid_replace
+ * @lv
+ * @remove_pvs
+ * @allocate_pvs
+ *
+ * Replace the specified PVs on @remove_pvs of @lv
+ * allocating new SubLVs from PVs on list @allocate_pvs.
+ */
+int lv_raid_replace(struct logical_volume *lv,
+ struct dm_list *remove_pvs,
+ struct dm_list *allocate_pvs)
+{
+ return _lv_raid_rebuild_or_replace(lv, remove_pvs, allocate_pvs, 0);
+}
+
int lv_raid_remove_missing(struct logical_volume *lv)
{
uint32_t s;
.IR AllocationPolicy ]
.RB [ \-A | \-\-autobackup
.RB { y | n }]
+.RB [ \-\-rebuild
+.IR PhysicalVolume ]
.RB [ \-\-cachemode
.RB { passthrough | writeback | writethrough }]
.RB [ \-\-cachepolicy
\fB\-\-poll n\fP to defer and then \fB\-\-poll y\fP to restart the process.
.
.HP
+.BR \-\- [ raid ] rebuild
+.BR \fIPhysicalVolume
+.br
+Option can be repeated multiple times.
+Selects PhysicalVolume(s) to be rebuild in a RaidLV.
+Use this option instead of
+.BR \-\-resync
+or
+.BR \-\- [ raid ] syncaction
+\fBrepair\fP in case the PVs with corrupted data are known and their data
+should be reconstructed rather than reconstructing default (rotating) data.
+.br
+E.g. in a raid1 mirror, the master leg on /dev/sda may hold corrupt data due
+to a known transient disk error, thus
+.br
+\fBlvchange --rebuild /dev/sda LV\fP
+.br
+will request the master leg to be rebuild rather than rebuilding
+all other legs from the master.
+On a raid5 with rotating data and parity
+.br
+\fBlvchange --rebuild /dev/sda LV\fP
+.br
+will rebuild all data and parity blocks in the stripe on /dev/sda.
+.HP
.BR \-\- [ raid ] maxrecoveryrate
.BR \fIRate [ b | B | s | S | k | K | m | M | g | G ]
.br
return 1
}
+# aux check_status_chars $vg $lv "Aaaaa"
+check_status_chars() {
+ [ `dmsetup status $1-$2|awk '{print $6}'` = $3 ] && return
+ return 1
+}
+
# Check if tests are running on 64bit architecture
can_use_16T() {
test "$(getconf LONG_BIT)" -eq 64
arg(poolmetadataspare_ARG, '\0', "poolmetadataspare", yes_no_arg, 0, 0)
arg(profile_ARG, '\0', "profile", string_arg, 0, 0)
arg(pvmetadatacopies_ARG, '\0', "pvmetadatacopies", int_arg, 0, 0)
+arg(raidrebuild_ARG, '\0', "raidrebuild", string_arg, ARG_GROUPABLE, 0)
arg(raidmaxrecoveryrate_ARG, '\0', "raidmaxrecoveryrate", size_kb_arg, 0, 0)
arg(raidminrecoveryrate_ARG, '\0', "raidminrecoveryrate", size_kb_arg, 0, 0)
arg(raidsyncaction_ARG, '\0', "raidsyncaction", string_arg, 0, 0)
arg(readonly_ARG, '\0', "readonly", NULL, 0, 0)
arg(refresh_ARG, '\0', "refresh", NULL, 0, 0)
arg(removemissing_ARG, '\0', "removemissing", NULL, 0, 0)
+arg(rebuild_ARG, '\0', "rebuild", string_arg, ARG_GROUPABLE, 0)
arg(repair_ARG, '\0', "repair", NULL, 0, 0)
arg(replace_ARG, '\0', "replace", string_arg, ARG_GROUPABLE, 0)
arg(reportformat_ARG, '\0', "reportformat", string_arg, 0, 0)
"\t[--activationmode {complete|degraded|partial}"
"\t[--addtag <Tag>]\n"
"\t[--alloc <AllocationPolicy>]\n"
+ "\t[--rebuild PhysicalVolume]\n"
"\t[-C|--contiguous {y|n}]\n"
"\t[--cachemode <CacheMode>]\n"
"\t[--cachepolicy <policyname>] [--cachesettings <parameter=value>]\n"
ignoreskippedcluster_ARG, major_ARG, metadataprofile_ARG, minor_ARG,
monitor_ARG, minrecoveryrate_ARG, maxrecoveryrate_ARG, noudevsync_ARG,
partial_ARG, permission_ARG, persistent_ARG, poll_ARG,
- raidminrecoveryrate_ARG, raidmaxrecoveryrate_ARG, raidsyncaction_ARG,
- raidwritebehind_ARG, raidwritemostly_ARG, readahead_ARG, reportformat_ARG,
- resync_ARG, refresh_ARG, select_ARG, setactivationskip_ARG, syncaction_ARG,
- sysinit_ARG, test_ARG, writebehind_ARG, writemostly_ARG, zero_ARG)
+ raidrebuild_ARG, raidminrecoveryrate_ARG, raidmaxrecoveryrate_ARG,
+ raidsyncaction_ARG, raidwritebehind_ARG, raidwritemostly_ARG, readahead_ARG,
+ reportformat_ARG, rebuild_ARG, resync_ARG, refresh_ARG, select_ARG, setactivationskip_ARG,
+ syncaction_ARG, sysinit_ARG, test_ARG, writebehind_ARG, writemostly_ARG, zero_ARG)
#define COMMON_OPTS \
"\t[--commandprofile <ProfileName>] [-d|--debug] [-h|-?|--help]\n" \
"lvconvert "
"[-m|--mirrors <Mirrors> [--mirrorlog {disk|core|mirrored}|--corelog]]\n"
"\t[--type <SegmentType>]\n"
+ "\t[--rebuild PhysicalVolume]\n"
"\t[--repair [--use-policies]]\n"
"\t[--replace PhysicalVolume]\n"
"\t[-R|--regionsize <MirrorLogRegionSize>]\n"
return 1;
}
+static int _lvchange_rebuild(struct logical_volume *lv)
+{
+ int pv_count, i = 0;
+ char **rebuild_pvs;
+ const char *tmp_str;
+ struct dm_list *rebuild_pvh = NULL;
+ struct arg_value_group_list *group;
+ struct volume_group *vg = lv->vg;
+ struct cmd_context *cmd = vg->cmd;
+ struct lv_segment *raid_seg = first_seg(lv);
+
+ if (!seg_is_raid(raid_seg) || seg_is_any_raid0(raid_seg)) {
+ log_error("--rebuild can only be used with 'raid4/5/6/10' segment types.");
+ return 0;
+ }
+
+ if (!(pv_count = arg_count(cmd, rebuild_ARG))) {
+ log_error("No --rebuild found!");
+ return 0;
+ }
+
+ if (!arg_is_set(cmd, yes_ARG) &&
+ yes_no_prompt("Do you really want to rebuild %u PVs "
+ "of logical volume %s [y/n]: ",
+ pv_count, display_lvname(lv)) == 'n') {
+ log_error("Logical volume %s not rebuild.",
+ display_lvname(lv));
+ return 0;
+ }
+
+ /* rebuild can be specified more than once */
+ if (!(rebuild_pvs = dm_pool_alloc(vg->vgmem, sizeof(char *) * pv_count)))
+ return_0;
+
+ dm_list_iterate_items(group, &cmd->arg_value_groups) {
+ if (!grouped_arg_is_set(group->arg_values, rebuild_ARG))
+ continue;
+
+ if (!(tmp_str = grouped_arg_str_value(group->arg_values,
+ rebuild_ARG, NULL)))
+ return_0;
+
+ if (!(rebuild_pvs[i++] = dm_pool_strdup(cmd->mem, tmp_str)))
+ return_0;
+ }
+
+ if (!(rebuild_pvh = create_pv_list(cmd->mem, vg,
+ pv_count, rebuild_pvs, 0)))
+ return_ECMD_FAILED;
+
+ /* Rebuild PVs listed on @rebuild_pvh */
+ return lv_raid_rebuild(lv, rebuild_pvh);
+}
+
static int _lvchange_writemostly(struct logical_volume *lv)
{
int s, pv_count, i = 0;
docmds++;
}
+ /* rebuild selected PVs */
+ if (arg_is_set(cmd, rebuild_ARG)) {
+ if (!archive(lv->vg))
+ return_ECMD_FAILED;
+ doit += _lvchange_rebuild(lv);
+ docmds++;
+ }
+
/* change writemostly/writebehind */
if (arg_is_set(cmd, writemostly_ARG) || arg_is_set(cmd, writebehind_ARG)) {
if (!archive(lv->vg))
errorwhenfull_ARG,
maxrecoveryrate_ARG,
minrecoveryrate_ARG,
+ rebuild_ARG,
resync_ARG,
syncaction_ARG,
writebehind_ARG,
!_merge_synonym(cmd, allocation_ARG, resizeable_ARG) ||
!_merge_synonym(cmd, virtualoriginsize_ARG, virtualsize_ARG) ||
!_merge_synonym(cmd, available_ARG, activate_ARG) ||
+ !_merge_synonym(cmd, raidrebuild_ARG, rebuild_ARG) ||
!_merge_synonym(cmd, raidsyncaction_ARG, syncaction_ARG) ||
!_merge_synonym(cmd, raidwritemostly_ARG, writemostly_ARG) ||
!_merge_synonym(cmd, raidminrecoveryrate_ARG, minrecoveryrate_ARG) ||