]> sourceware.org Git - lvm2.git/blame - lib/activate/dev_manager.c
metadata: use lv_hash in segment-specific metadata parsing
[lvm2.git] / lib / activate / dev_manager.c
CommitLineData
ca73e23f 1/*
67cdbd7e 2 * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
ca9cbd92 3 * Copyright (C) 2004-2018 Red Hat, Inc. All rights reserved.
ca73e23f 4 *
6606c3ae
AK
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
be684599 9 * of the GNU Lesser General Public License v.2.1.
6606c3ae 10 *
be684599 11 * You should have received a copy of the GNU Lesser General Public License
6606c3ae 12 * along with this program; if not, write to the Free Software Foundation,
fcbef05a 13 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
ca73e23f
JT
14 */
15
7f97c7ea 16#include "lib/misc/lib.h"
ca73e23f 17#include "dev_manager.h"
7f97c7ea 18#include "lib/misc/lvm-string.h"
de6c9183 19#include "fs.h"
7f97c7ea
JT
20#include "lib/config/defaults.h"
21#include "lib/metadata/segtype.h"
22#include "lib/display/display.h"
23#include "lib/commands/toolcontext.h"
24#include "lib/activate/targets.h"
25#include "lib/config/config.h"
26#include "lib/activate/activate.h"
27#include "lib/misc/lvm-exec.h"
28#include "lib/datastruct/str_list.h"
ff8aaade 29#include "lib/misc/lvm-signal.h"
ca73e23f 30
349f09e4 31#include <limits.h>
11d2da40 32#include <dirent.h>
ca73e23f 33
9a90f1ab 34#define MAX_TARGET_PARAMSIZE 50000
ce7489ed 35#define LVM_UDEV_NOSCAN_FLAG DM_SUBSYSTEM_UDEV_FLAG0
1bd57b4c 36#define CRYPT_TEMP "CRYPT-TEMP"
69321571 37#define CRYPT_SUBDEV "CRYPT-SUBDEV"
1bd57b4c 38#define STRATIS "stratis-"
9a90f1ab 39
a69de491 40typedef enum {
5f4b2acf 41 PRELOAD,
a69de491 42 ACTIVATE,
7bb6856a 43 DEACTIVATE,
a69de491 44 SUSPEND,
9cd3426d 45 SUSPEND_WITH_LOCKFS,
5f4b2acf 46 CLEAN
7bb6856a 47} action_t;
a69de491 48
7cff640d 49/* This list must match lib/misc/lvm-string.c:build_dm_uuid(). */
eae0314b 50static const char * const _uuid_suffix_list[] = { "pool", "cdata", "cmeta", "cvol", "tdata", "tmeta", "vdata", "vpool", "imeta", NULL};
7cff640d 51
fba86dd4
ZK
52struct dlid_list {
53 struct dm_list list;
54 const char *dlid;
55 const struct logical_volume *lv;
56};
57
ca73e23f 58struct dev_manager {
2262b320 59 struct dm_pool *mem;
ca73e23f 60
4922197a
AK
61 struct cmd_context *cmd;
62
4922197a 63 void *target_state;
b9e67d4f 64 uint32_t pvmove_mirror_count;
eb91c4ee 65 int flush_required;
9a060948 66 int activation; /* building activation tree */
a900d150 67 int suspend; /* building suspend tree */
fba86dd4 68 unsigned track_pending_delete;
df390f17 69 unsigned track_pvmove_deps;
a9953411 70
528695ec 71 const char *vg_name;
ca73e23f
JT
72};
73
5f4b2acf 74struct lv_layer {
84cdf85b 75 const struct logical_volume *lv;
5f4b2acf 76 const char *old_name;
eb3597ac 77 int visible_component;
5f4b2acf 78};
0fe3a2c5 79
a6fdb9d9 80int read_only_lv(const struct logical_volume *lv, const struct lv_activate_opts *laopts, const char *layer)
5f4b2acf 81{
a6fdb9d9
ZK
82 if (layer && lv_is_cow(lv))
83 return 0; /* Keep snapshot's COW volume writable */
84
0646fd46
HM
85 if (lv_is_raid_image(lv) || lv_is_raid_metadata(lv))
86 return 0; /* Keep RAID SubLvs writable */
87
66f69e76 88 if (!layer) {
caff31df 89 if (lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
66f69e76
ZK
90 return 1;
91 }
92
f0f42483 93 return (laopts->read_only || !(lv->status & LVM_WRITE));
5f4b2acf
AK
94}
95
fc28b60f
JT
96/*
97 * Low level device-layer operations.
5163b8f6
ZK
98 *
99 * Unless task is DM_DEVICE_TARGET_MSG, also calls dm_task_run()
fc28b60f 100 */
5163b8f6
ZK
101static struct dm_task *_setup_task_run(int task, struct dm_info *info,
102 const char *name, const char *uuid,
103 uint32_t *event_nr,
104 uint32_t major, uint32_t minor,
105 int with_open_count,
106 int with_flush,
107 int query_inactive)
fc28b60f 108{
6ffb150f
ZK
109 char vsn[80];
110 unsigned maj, min;
fc28b60f
JT
111 struct dm_task *dmt;
112
c51b9fff
AK
113 if (!(dmt = dm_task_create(task)))
114 return_NULL;
fc28b60f 115
006e5fa0
ZK
116 if (name && !dm_task_set_name(dmt, name))
117 goto_out;
e04c5198 118
006e5fa0
ZK
119 if (uuid && *uuid && !dm_task_set_uuid(dmt, uuid))
120 goto_out;
e04c5198 121
006e5fa0
ZK
122 if (event_nr && !dm_task_set_event_nr(dmt, *event_nr))
123 goto_out;
10b29b8d 124
006e5fa0
ZK
125 if (major && !dm_task_set_major_minor(dmt, major, minor, 1))
126 goto_out;
0c8bdaf3 127
2243718f
AK
128 if (activation_checks() && !dm_task_enable_checks(dmt))
129 goto_out;
13e6369d 130
5163b8f6
ZK
131 if (query_inactive && !dm_task_query_inactive_table(dmt)) {
132 log_error("Failed to set query_inactive_table.");
133 goto out;
134 }
135
13e6369d
ZK
136 if (!with_open_count && !dm_task_no_open_count(dmt))
137 log_warn("WARNING: Failed to disable open_count.");
138
a4f8d116
ZK
139 if (!with_flush && !dm_task_no_flush(dmt))
140 log_warn("WARNING: Failed to set no_flush.");
141
0d67bc96
ZK
142 switch (task) {
143 case DM_DEVICE_TARGET_MSG:
5163b8f6 144 return dmt; /* TARGET_MSG needs more local tweaking before task_run() */
0d67bc96 145 case DM_DEVICE_LIST:
6ffb150f
ZK
146 /* Use 'newuuid' only with DM version that supports it */
147 if (driver_version(vsn, sizeof(vsn)) &&
148 (sscanf(vsn, "%u.%u", &maj, &min) == 2) &&
149 (maj == 4 ? min >= 19 : maj > 4) &&
150 !dm_task_set_newuuid(dmt, " ")) // new uuid has no meaning here
0d67bc96
ZK
151 log_warn("WARNING: Failed to query uuid with LIST.");
152 break;
153 default:
154 break;
155 }
5163b8f6
ZK
156
157 if (!dm_task_run(dmt))
158 goto_out;
159
160 if (info && !dm_task_get_info(dmt, info))
161 goto_out;
162
fc28b60f 163 return dmt;
5163b8f6
ZK
164
165out:
006e5fa0 166 dm_task_destroy(dmt);
81ef4eb4 167
006e5fa0 168 return NULL;
fc28b60f
JT
169}
170
dda85902
ZK
171/* Read info from DM VDO 'stats' message */
172static int _vdo_pool_message_stats(struct dm_pool *mem,
173 const struct logical_volume *lv,
174 struct lv_status_vdo *status)
175{
176 const char *response;
177 const char *dlid;
178 struct dm_task *dmt = NULL;
179 int r = 0;
180 unsigned i;
181 const char *p;
182 struct vdo_msg_elem {
183 const char *name;
184 uint64_t *val;
185 } const vme[] = { /* list of properties lvm2 wants to parse */
186 { "dataBlocksUsed", &status->data_blocks_used },
187 { "logicalBlocksUsed", &status->logical_blocks_used }
188 };
189
190 for (i = 0; i < DM_ARRAY_SIZE(vme); ++i)
191 *vme[i].val = ULLONG_MAX;
192
193 if (!(dlid = build_dm_uuid(mem, lv, lv_layer(lv))))
194 return_0;
195
196 if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 0, 0)))
197 return_0;
198
199 if (!dm_task_set_message(dmt, "stats"))
200 goto_out;
201
202 if (!dm_task_run(dmt))
203 goto_out;
204
205 log_debug_activation("Checking VDO pool stats message for LV %s.",
206 display_lvname(lv));
207
208 if ((response = dm_task_get_message_response(dmt))) {
209 for (i = 0; i < DM_ARRAY_SIZE(vme); ++i) {
210 errno = 0;
211 if (!(p = strstr(response, vme[i].name)) ||
212 !(p = strchr(p, ':')) ||
213 ((*vme[i].val = strtoul(p + 1, NULL, 10)) == ULLONG_MAX) || errno) {
214 log_debug("Cannot parse %s in VDO DM stats message.", vme[i].name);
215 *vme[i].val = ULLONG_MAX;
216 goto out;
217 }
218 if (*vme[i].val != ULLONG_MAX)
219 log_debug("VDO property %s = " FMTu64, vme[i].name, *vme[i].val);
220 }
221 }
222
223 r = 1;
224out:
225 dm_task_destroy(dmt);
226
227 return r;
228}
229
a2c1024f
PR
230static int _get_segment_status_from_target_params(const char *target_name,
231 const char *params,
e6f735d4 232 const struct dm_info *dminfo,
a2c1024f
PR
233 struct lv_seg_status *seg_status)
234{
ed93f097
ZK
235 const struct lv_segment *seg = seg_status->seg;
236 const struct segment_type *segtype = seg->segtype;
a2c1024f 237
4a4b22e1
ZK
238 seg_status->type = SEG_STATUS_UNKNOWN; /* Parsing failed */
239
ed93f097
ZK
240 /* Switch to snapshot segtype status logic for merging origin */
241 /* This is 'dynamic' decision, both states are valid */
242 if (lv_is_merging_origin(seg->lv)) {
243 if (!strcmp(target_name, TARGET_NAME_SNAPSHOT_ORIGIN)) {
244 seg_status->type = SEG_STATUS_NONE;
245 return 1; /* Merge has not yet started */
246 }
247 if (!strcmp(target_name, TARGET_NAME_SNAPSHOT_MERGE) &&
248 !(segtype = get_segtype_from_string(seg->lv->vg->cmd, TARGET_NAME_SNAPSHOT)))
249 return_0;
250 /* Merging, parse 'snapshot' status of merge progress */
251 }
252
4a4b22e1
ZK
253 if (!params) {
254 log_warn("WARNING: Cannot find matching %s segment for %s.",
ed93f097 255 segtype->name, display_lvname(seg_status->seg->lv));
4a4b22e1
ZK
256 return 0;
257 }
f7f39566 258
ed93f097 259 /* Validate target_name segtype from DM table with lvm2 metadata segtype */
0c62ae3f
ZK
260 if (!lv_is_locked(seg->lv) &&
261 strcmp(segtype->name, target_name) &&
ed93f097
ZK
262 /* If kernel's type isn't an exact match is it compatible? */
263 (!segtype->ops->target_status_compatible ||
264 !segtype->ops->target_status_compatible(target_name))) {
c2679f76 265 log_warn("WARNING: Detected %s segment type does not match expected type %s for %s.",
ed93f097
ZK
266 target_name, segtype->name, display_lvname(seg_status->seg->lv));
267 return 0;
a2c1024f
PR
268 }
269
ed93f097 270 /* TODO: move into segtype method */
39a97d86 271 if (segtype_is_cache(segtype)) {
b3a348c0
ZK
272 if (!dm_get_status_cache(seg_status->mem, params, &(seg_status->cache)))
273 return_0;
d0f26440 274 seg_status->type = SEG_STATUS_CACHE;
39a97d86 275 } else if (segtype_is_raid(segtype)) {
b3a348c0 276 if (!dm_get_status_raid(seg_status->mem, params, &seg_status->raid))
d0f26440
ZK
277 return_0;
278 seg_status->type = SEG_STATUS_RAID;
39a97d86 279 } else if (segtype_is_thin_volume(segtype)) {
b3a348c0 280 if (!dm_get_status_thin(seg_status->mem, params, &seg_status->thin))
d0f26440
ZK
281 return_0;
282 seg_status->type = SEG_STATUS_THIN;
39a97d86 283 } else if (segtype_is_thin_pool(segtype)) {
b3a348c0 284 if (!dm_get_status_thin_pool(seg_status->mem, params, &seg_status->thin_pool))
d0f26440
ZK
285 return_0;
286 seg_status->type = SEG_STATUS_THIN_POOL;
39a97d86 287 } else if (segtype_is_snapshot(segtype)) {
b3a348c0 288 if (!dm_get_status_snapshot(seg_status->mem, params, &seg_status->snapshot))
d0f26440 289 return_0;
a2c1024f 290 seg_status->type = SEG_STATUS_SNAPSHOT;
0dafd159 291 } else if (segtype_is_vdo_pool(segtype)) {
dda85902
ZK
292 if (!_vdo_pool_message_stats(seg_status->mem, seg->lv, &seg_status->vdo_pool))
293 stack;
e6f735d4 294 if (!parse_vdo_pool_status(seg_status->mem, seg->lv, params, dminfo, &seg_status->vdo_pool))
0dafd159
ZK
295 return_0;
296 seg_status->type = SEG_STATUS_VDO_POOL;
3ae55695
DT
297 } else if (segtype_is_writecache(segtype)) {
298 if (!dm_get_status_writecache(seg_status->mem, params, &(seg_status->writecache)))
299 return_0;
300 seg_status->type = SEG_STATUS_WRITECACHE;
d9e8895a
DT
301 } else if (segtype_is_integrity(segtype)) {
302 if (!dm_get_status_integrity(seg_status->mem, params, &(seg_status->integrity)))
303 return_0;
304 seg_status->type = SEG_STATUS_INTEGRITY;
4a4b22e1 305 } else
ed93f097
ZK
306 /*
307 * TODO: Add support for other segment types too!
308 * Status not supported
309 */
4a4b22e1 310 seg_status->type = SEG_STATUS_NONE;
a2c1024f
PR
311
312 return 1;
313}
314
315typedef enum {
316 INFO, /* DM_DEVICE_INFO ioctl */
317 STATUS, /* DM_DEVICE_STATUS ioctl */
a2c1024f
PR
318} info_type_t;
319
e2354ea3
HM
320/* Return length of segment depending on type and reshape_len */
321static uint32_t _seg_len(const struct lv_segment *seg)
322{
323 uint32_t reshape_len = seg_is_raid(seg) ? ((seg->area_count - seg->segtype->parity_devs) * seg->reshape_len) : 0;
324
325 return seg->len - reshape_len;
326}
327
034931f6
ZK
328static int _info_run(const char *dlid, struct dm_info *dminfo,
329 uint32_t *read_ahead,
a2c1024f 330 struct lv_seg_status *seg_status,
80b2de9e 331 const char *name_check,
a2c1024f
PR
332 int with_open_count, int with_read_ahead,
333 uint32_t major, uint32_t minor)
14a9cda6
AK
334{
335 int r = 0;
336 struct dm_task *dmt;
8c0388e4 337 int dmtask;
034931f6 338 int with_flush; /* TODO: arg for _info_run */
a2c1024f 339 void *target = NULL;
2e79b005 340 uint64_t target_start, target_length, start, extent_size, length, length_crop = 0;
5ba2d58d 341 char *target_name, *target_params;
8679d459 342 const char *devname;
a2c1024f 343
034931f6
ZK
344 if (seg_status) {
345 dmtask = DM_DEVICE_STATUS;
346 with_flush = 0;
347 } else {
348 dmtask = DM_DEVICE_INFO;
349 with_flush = 1; /* doesn't really matter */
a2c1024f 350 }
14a9cda6 351
034931f6
ZK
352 if (!(dmt = _setup_task_run(dmtask, dminfo, NULL, dlid, 0, major, minor,
353 with_open_count, with_flush, 0)))
c51b9fff 354 return_0;
14a9cda6 355
80b2de9e 356 if (name_check && dminfo->exists &&
8679d459
ZK
357 (devname = dm_task_get_name(dmt)) &&
358 (strcmp(name_check, devname) != 0))
80b2de9e
ZK
359 dminfo->exists = 0; /* mismatching name -> device does not exist */
360
43db8f8d 361 if (with_read_ahead && read_ahead && dminfo->exists) {
fd0af4bd
AK
362 if (!dm_task_get_read_ahead(dmt, read_ahead))
363 goto_out;
364 } else if (read_ahead)
69506f1d 365 *read_ahead = DM_READ_AHEAD_NONE;
a6b22cf3 366
5ba2d58d 367 /* Query status only for active device */
034931f6 368 if (seg_status && dminfo->exists) {
2e79b005
ZK
369 extent_size = length = seg_status->seg->lv->vg->extent_size;
370 start = extent_size * seg_status->seg->le;
e2354ea3 371 length *= _seg_len(seg_status->seg);
5ba2d58d 372
10953229
ZK
373 /* Uses max DM_THIN_MAX_METADATA_SIZE sectors for metadata device */
374 if (lv_is_thin_pool_metadata(seg_status->seg->lv) &&
375 (length > DM_THIN_MAX_METADATA_SIZE))
b4212be2 376 length_crop = DM_THIN_MAX_METADATA_SIZE;
10953229 377
a8f84f78
ZK
378 /* Uses virtual size with headers for VDO pool device */
379 if (lv_is_vdo_pool(seg_status->seg->lv))
380 length = get_vdo_pool_virtual_size(seg_status->seg);
381
d9e8895a
DT
382 if (lv_is_integrity(seg_status->seg->lv))
383 length = seg_status->seg->integrity_data_sectors;
384
a2c1024f
PR
385 do {
386 target = dm_get_next_target(dmt, target, &target_start,
387 &target_length, &target_name, &target_params);
5ba2d58d 388
b4212be2
ZK
389 if ((start == target_start) &&
390 ((length == target_length) ||
2e79b005
ZK
391 ((lv_is_vdo_pool(seg_status->seg->lv)) && /* should fit within extent size */
392 (length < target_length) && ((length + extent_size) > target_length)) ||
b4212be2 393 (length_crop && (length_crop == target_length))))
5ba2d58d
ZK
394 break; /* Keep target_params when matching segment is found */
395
396 target_params = NULL; /* Marking this target_params unusable */
a2c1024f
PR
397 } while (target);
398
ab6f4649 399 if (!target_name ||
e6f735d4 400 !_get_segment_status_from_target_params(target_name, target_params, dminfo, seg_status))
5ba2d58d 401 stack;
a2c1024f
PR
402 }
403
14a9cda6
AK
404 r = 1;
405
406 out:
407 dm_task_destroy(dmt);
81ef4eb4 408
14a9cda6
AK
409 return r;
410}
fc28b60f 411
9fd7ac7d
JB
412/*
413 * ignore_blocked_mirror_devices
414 * @dev
415 * @start
416 * @length
417 * @mirror_status_str
418 *
419 * When a DM 'mirror' target is created with 'block_on_error' or
420 * 'handle_errors', it will block I/O if there is a device failure
421 * until the mirror is reconfigured. Thus, LVM should never attempt
422 * to read labels from a mirror that has a failed device. (LVM
423 * commands are issued to repair mirrors; and if LVM is blocked
424 * attempting to read a mirror, a circular dependency would be created.)
425 *
426 * This function is a slimmed-down version of lib/mirror/mirrored.c:
b248ba0a 427 * _mirrored_transient_status().
9fd7ac7d
JB
428 *
429 * If a failed device is detected in the status string, then it must be
430 * determined if 'block_on_error' or 'handle_errors' was used when
431 * creating the mirror. This info can only be determined from the mirror
432 * table. The 'dev', 'start', 'length' trio allow us to correlate the
433 * 'mirror_status_str' with the correct device table in order to check
434 * for blocking.
435 *
436 * Returns: 1 if mirror should be ignored, 0 if safe to use
437 */
65236ee7
ZK
438static int _ignore_blocked_mirror_devices(struct cmd_context *cmd,
439 struct device *dev,
9fd7ac7d
JB
440 uint64_t start, uint64_t length,
441 char *mirror_status_str)
442{
86e7894e
ZK
443 struct dm_pool *mem;
444 struct dm_status_mirror *sm;
9fd7ac7d 445 unsigned i, check_for_blocking = 0;
9fd7ac7d 446 uint64_t s,l;
c363c74a 447 char *p, *params, *target_type = NULL;
9fd7ac7d 448 void *next = NULL;
ec49f07b
ZK
449 struct dm_task *dmt = NULL;
450 int r = 0;
92fd2cb4 451 char fake_dev_name[16];
038760da 452 struct device fake_dev = { .fd = 0 };
92fd2cb4 453 struct dm_str_list *alias;
9fd7ac7d 454
86e7894e 455 if (!(mem = dm_pool_create("blocked_mirrors", 128)))
ec49f07b 456 return_0;
9fd7ac7d 457
86e7894e
ZK
458 if (!dm_get_status_mirror(mem, mirror_status_str, &sm))
459 goto_out;
460
461 for (i = 0; i < sm->dev_count; ++i)
462 if (sm->devs[i].health != DM_STATUS_MIRROR_ALIVE) {
81ef4eb4 463 log_debug_activation("%s: Mirror image %d marked as failed.",
06abb2dd 464 dev_name(dev), i);
9fd7ac7d
JB
465 check_for_blocking = 1;
466 }
467
86e7894e
ZK
468 if (!check_for_blocking && sm->log_count) {
469 if (sm->logs[0].health != DM_STATUS_MIRROR_ALIVE) {
81ef4eb4 470 log_debug_activation("%s: Mirror log device marked as failed.",
06abb2dd 471 dev_name(dev));
b248ba0a
JB
472 check_for_blocking = 1;
473 } else {
92fd2cb4
DT
474 dev_init(&fake_dev);
475 if (dm_snprintf(fake_dev_name, sizeof(fake_dev_name), "%u:%u",
86e7894e 476 sm->logs[0].major, sm->logs[0].minor) < 0)
b248ba0a
JB
477 goto_out;
478
92fd2cb4
DT
479 if (!(alias = dm_pool_zalloc(mem, sizeof(*alias))))
480 goto_out;
481 if (!(alias->str = dm_pool_strdup(mem, fake_dev_name)))
b248ba0a 482 goto_out;
92fd2cb4
DT
483 dm_list_add(&fake_dev.aliases, &alias->list);
484 fake_dev.flags = DEV_REGULAR;
485 fake_dev.dev = MKDEV(sm->logs[0].major, sm->logs[0].minor);
b248ba0a 486
92fd2cb4 487 if (dm_device_is_usable(cmd, &fake_dev, (struct dev_usable_check_params)
24943fe9
DT
488 { .check_empty = 1,
489 .check_blocked = 1,
490 .check_suspended = ignore_suspended_devices(),
491 .check_error_target = 1,
492 .check_reserved = 0 }, NULL))
8b16efd1
ZK
493 goto out; /* safe to use */
494 stack;
b248ba0a
JB
495 }
496 }
497
ec49f07b
ZK
498 if (!check_for_blocking) {
499 r = 1;
500 goto out;
501 }
9fd7ac7d
JB
502
503 /*
504 * We avoid another system call if we can, but if a device is
505 * dead, we have no choice but to look up the table too.
506 */
5163b8f6
ZK
507 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL, NULL, NULL, NULL,
508 MAJOR(dev->dev), MINOR(dev->dev), 0, 1, 0)))
9fd7ac7d
JB
509 goto_out;
510
511 do {
512 next = dm_get_next_target(dmt, next, &s, &l,
513 &target_type, &params);
68955a81
ZK
514 if ((s == start) && (l == length) &&
515 target_type && params) {
1216efdf 516 if (strcmp(target_type, TARGET_NAME_MIRROR))
9fd7ac7d
JB
517 goto_out;
518
c363c74a
JB
519 if (((p = strstr(params, " block_on_error")) &&
520 (p[15] == '\0' || p[15] == ' ')) ||
521 ((p = strstr(params, " handle_errors")) &&
522 (p[14] == '\0' || p[14] == ' '))) {
81ef4eb4 523 log_debug_activation("%s: I/O blocked to mirror device.",
06abb2dd 524 dev_name(dev));
ec49f07b 525 goto out;
9fd7ac7d
JB
526 }
527 }
528 } while (next);
9fd7ac7d 529
ec49f07b 530 r = 1;
9fd7ac7d 531out:
ec49f07b
ZK
532 if (dmt)
533 dm_task_destroy(dmt);
86e7894e
ZK
534
535 dm_pool_destroy(mem);
ec49f07b
ZK
536
537 return r;
9fd7ac7d
JB
538}
539
1e6a926e
PR
540static int _device_is_suspended(int major, int minor)
541{
542 struct dm_task *dmt;
543 struct dm_info info;
1e6a926e 544
5163b8f6
ZK
545 if (!(dmt = _setup_task_run(DM_DEVICE_INFO, &info,
546 NULL, NULL, NULL,
547 major, minor, 0, 0, 0)))
99e96f3c 548 return_0;
1e6a926e 549
1e6a926e 550 dm_task_destroy(dmt);
5163b8f6
ZK
551
552 return (info.exists && info.suspended);
1e6a926e
PR
553}
554
555static int _ignore_suspended_snapshot_component(struct device *dev)
556{
557 struct dm_task *dmt;
558 void *next = NULL;
559 char *params, *target_type = NULL;
560 uint64_t start, length;
561 int major1, minor1, major2, minor2;
562 int r = 0;
563
5163b8f6
ZK
564 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL,
565 NULL, NULL, NULL,
566 MAJOR(dev->dev), MINOR(dev->dev), 0, 1, 0)))
99e96f3c 567 return_0;
1e6a926e 568
1e6a926e
PR
569 do {
570 next = dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
a8a579b1
ZK
571
572 if (!target_type)
573 continue;
574
575 if (!strcmp(target_type, TARGET_NAME_SNAPSHOT)) {
164d7e72 576 if (!params || sscanf(params, "%d:%d %d:%d", &major1, &minor1, &major2, &minor2) != 4) {
cd56b04e
ZK
577 log_warn("WARNING: Incorrect snapshot table found for %u:%u.",
578 MAJOR(dev->dev), MINOR(dev->dev));
1d58074d 579 goto out;
1e6a926e 580 }
5577f2f4 581 r = r || _device_is_suspended(major1, minor1) || _device_is_suspended(major2, minor2);
1216efdf 582 } else if (!strcmp(target_type, TARGET_NAME_SNAPSHOT_ORIGIN)) {
164d7e72 583 if (!params || sscanf(params, "%d:%d", &major1, &minor1) != 2) {
cd56b04e
ZK
584 log_warn("WARNING: Incorrect snapshot-origin table found for %u:%u.",
585 MAJOR(dev->dev), MINOR(dev->dev));
1d58074d 586 goto out;
1e6a926e 587 }
5577f2f4 588 r = r || _device_is_suspended(major1, minor1);
1e6a926e
PR
589 }
590 } while (next);
591
592out:
593 dm_task_destroy(dmt);
81ef4eb4 594
1e6a926e
PR
595 return r;
596}
597
a01eb9c4
ZK
598static int _ignore_unusable_thins(struct device *dev)
599{
600 /* TODO make function for thin testing */
601 struct dm_pool *mem;
602 struct dm_status_thin_pool *status;
603 struct dm_task *dmt = NULL;
604 void *next = NULL;
605 uint64_t start, length;
606 char *target_type = NULL;
607 char *params;
608 int minor, major;
609 int r = 0;
610
611 if (!(mem = dm_pool_create("unusable_thins", 128)))
612 return_0;
613
5163b8f6
ZK
614 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL, NULL, NULL, NULL,
615 MAJOR(dev->dev), MINOR(dev->dev), 0, 1, 0)))
a01eb9c4 616 goto_out;
74e704bb 617
a01eb9c4 618 dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
74e704bb 619 if (!params || sscanf(params, "%d:%d", &major, &minor) != 2) {
cd56b04e
ZK
620 log_warn("WARNING: Cannot get thin-pool major:minor for thin device %u:%u.",
621 MAJOR(dev->dev), MINOR(dev->dev));
a01eb9c4
ZK
622 goto out;
623 }
624 dm_task_destroy(dmt);
625
5163b8f6
ZK
626 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, NULL, NULL, NULL, NULL,
627 major, minor, 0, 0, 0)))
a01eb9c4 628 goto_out;
74e704bb 629
a01eb9c4
ZK
630 dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
631 if (!dm_get_status_thin_pool(mem, params, &status))
80c3fb78 632 goto_out;
a01eb9c4
ZK
633
634 if (status->read_only || status->out_of_data_space) {
635 log_warn("WARNING: %s: Thin's thin-pool needs inspection.",
636 dev_name(dev));
637 goto out;
638 }
639
640 r = 1;
641out:
642 if (dmt)
643 dm_task_destroy(dmt);
644
645 dm_pool_destroy(mem);
646
647 return r;
648}
649
7421252e
ZK
650static int _ignore_invalid_snapshot(const char *params)
651{
652 struct dm_status_snapshot *s;
653 struct dm_pool *mem;
0edd89fa 654 int r = 0;
7421252e
ZK
655
656 if (!(mem = dm_pool_create("invalid snapshots", 128)))
657 return_0;
658
659 if (!dm_get_status_snapshot(mem, params, &s))
0edd89fa
ZK
660 stack;
661 else
662 r = s->invalid;
663
664 dm_pool_destroy(mem);
665
666 return r;
667}
668
669static int _ignore_frozen_raid(struct device *dev, const char *params)
670{
671 struct dm_status_raid *s;
672 struct dm_pool *mem;
673 int r = 0;
674
675 if (!(mem = dm_pool_create("frozen raid", 128)))
7421252e
ZK
676 return_0;
677
0edd89fa
ZK
678 if (!dm_get_status_raid(mem, params, &s))
679 stack;
680 else if (s->sync_action && !strcmp(s->sync_action, "frozen")) {
cd56b04e
ZK
681 log_warn("WARNING: %s frozen raid device (%u:%u) needs inspection.",
682 dev_name(dev), MAJOR(dev->dev), MINOR(dev->dev));
0edd89fa
ZK
683 r = 1;
684 }
685
7421252e
ZK
686 dm_pool_destroy(mem);
687
688 return r;
689}
690
65236ee7
ZK
691static int _is_usable_uuid(const struct device *dev, const char *name, const char *uuid, int check_reserved, int check_lv, int *is_lv)
692{
693 char *vgname, *lvname, *layer;
694 char vg_name[NAME_LEN];
695
696 if (!check_reserved && !check_lv)
697 return 1;
698
699 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1)) { /* with LVM- prefix */
700 if (check_reserved) {
701 /* Check internal lvm devices */
702 if (strlen(uuid) > (sizeof(UUID_PREFIX) + 2 * ID_LEN)) { /* 68 with suffix */
703 log_debug_activation("%s: Reserved uuid %s on internal LV device %s not usable.",
704 dev_name(dev), uuid, name);
705 return 0;
706 }
707
708 /* Recognize some older reserved LVs just from the LV name (snapshot, pvmove...) */
709 vgname = vg_name;
f9fefaaa 710 if (!_dm_strncpy(vg_name, name, sizeof(vg_name)) ||
65236ee7
ZK
711 !dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &layer))
712 return_0;
713
714 /* FIXME: fails to handle dev aliases i.e. /dev/dm-5, replace with UUID suffix */
715 if (lvname && (is_reserved_lvname(lvname) || *layer)) {
716 log_debug_activation("%s: Reserved internal LV device %s/%s%s%s not usable.",
717 dev_name(dev), vgname, lvname, *layer ? "-" : "", layer);
718 return 0;
719 }
720 }
721
722 if (check_lv) {
723 /* Skip LVs */
724 if (is_lv)
725 *is_lv = 1;
726 return 0;
727 }
728 }
729
730 if (check_reserved &&
731 (!strncmp(uuid, CRYPT_TEMP, sizeof(CRYPT_TEMP) - 1) ||
732 !strncmp(uuid, CRYPT_SUBDEV, sizeof(CRYPT_SUBDEV) - 1) ||
733 !strncmp(uuid, STRATIS, sizeof(STRATIS) - 1))) {
734 /* Skip private crypto devices */
735 log_debug_activation("%s: Reserved uuid %s on %s device %s not usable.",
736 dev_name(dev), uuid,
737 uuid[0] == 'C' ? "crypto" : "stratis",
738 name);
739 return 0;
740 }
741
742 return 1;
743}
744
c95f17ea 745/*
24943fe9 746 * dm_device_is_usable
c95f17ea
JB
747 * @dev
748 * @check_lv_names
749 *
750 * A device is considered not usable if it is:
751 * 1) An empty device (no targets)
752 * 2) A blocked mirror (i.e. a mirror with a failure and block_on_error set)
753 * 3) ignore_suspended_devices is set and
754 * a) the device is suspended
755 * b) it is a snapshot origin
756 * 4) an error target
c95f17ea
JB
757 * 5) the LV name is a reserved name.
758 *
759 * Returns: 1 if usable, 0 otherwise
760 */
24943fe9 761int dm_device_is_usable(struct cmd_context *cmd, struct device *dev, struct dev_usable_check_params check, int *is_lv)
f247a4e7
AK
762{
763 struct dm_task *dmt;
764 struct dm_info info;
28e2b5b2 765 const char *name, *uuid;
ed440367 766 const struct dm_active_device *dm_dev;
67cdbd7e
AK
767 uint64_t start, length;
768 char *target_type = NULL;
65236ee7 769 char *params;
41aec14e 770 void *next = NULL;
adc23806 771 int only_error_or_zero_target = 1;
f247a4e7
AK
772 int r = 0;
773
f8aa073a 774 if (dm_devs_cache_use() &&
ed440367 775 /* With cache we can avoid status calls for unusable UUIDs */
ad1d6887 776 (dm_dev = dm_devs_cache_get_by_devno(cmd, dev->dev)) &&
ed440367
ZK
777 !_is_usable_uuid(dev, dm_dev->name, dm_dev->uuid, check.check_reserved, check.check_lv, is_lv))
778 return 0;
779
5163b8f6
ZK
780 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, NULL, NULL,
781 MAJOR(dev->dev), MINOR(dev->dev), 0, 0, 0)))
99e96f3c 782 return_0;
a09d6589 783
f3ad0dcf 784 if (!info.exists)
f247a4e7
AK
785 goto out;
786
41aec14e 787 name = dm_task_get_name(dmt);
28e2b5b2 788 uuid = dm_task_get_uuid(dmt);
41aec14e 789
00d8ab84 790 if (check.check_empty && !info.target_count) {
06abb2dd 791 log_debug_activation("%s: Empty device %s not usable.", dev_name(dev), name);
727d065f
AK
792 goto out;
793 }
794
00d8ab84 795 if (check.check_suspended && info.suspended) {
06abb2dd 796 log_debug_activation("%s: Suspended device %s not usable.", dev_name(dev), name);
f3ad0dcf
PR
797 goto out;
798 }
799
65236ee7
ZK
800 if (uuid &&
801 !_is_usable_uuid(dev, name, uuid, check.check_reserved, check.check_lv, is_lv))
802 goto out;
c0f1eb5f 803
9fd7ac7d 804 /* FIXME Also check for mpath no paths */
67cdbd7e
AK
805 do {
806 next = dm_get_next_target(dmt, next, &start, &length,
807 &target_type, &params);
9fd7ac7d 808
a8a579b1
ZK
809 if (!target_type)
810 continue;
811
812 if (check.check_blocked && !strcmp(target_type, TARGET_NAME_MIRROR)) {
d5896f0a
JB
813 if (ignore_lvm_mirrors()) {
814 log_debug_activation("%s: Scanning mirror devices is disabled.", dev_name(dev));
815 goto out;
816 }
65236ee7 817 if (!_ignore_blocked_mirror_devices(cmd, dev, start,
d5896f0a
JB
818 length, params)) {
819 log_debug_activation("%s: Mirror device %s not usable.",
820 dev_name(dev), name);
821 goto out;
822 }
28e2b5b2 823 }
a71d6051
JEB
824
825 /*
c0e17bca
PR
826 * FIXME: Snapshot origin could be sitting on top of a mirror
827 * which could be blocking I/O. We should add a check for the
828 * stack here and see if there's blocked mirror underneath.
829 * Currently, mirrors used as origin or snapshot is not
830 * supported anymore and in general using mirrors in a stack
831 * is disabled by default (with a warning that if enabled,
832 * it could cause various deadlocks).
1e6a926e
PR
833 * Similar situation can happen with RAID devices where
834 * a RAID device can be snapshotted.
835 * If one of the RAID legs are down and we're doing
836 * lvconvert --repair, there's a time period in which
837 * snapshot components are (besides other devs) suspended.
838 * See also https://bugzilla.redhat.com/show_bug.cgi?id=1219222
839 * for an example where this causes problems.
840 *
841 * This is a quick check for now, but replace it with more
842 * robust and better check that would check the stack
39b7d1ba 843 * correctly, not just snapshots but any combination possible
1e6a926e 844 * in a stack - use proper dm tree to check this instead.
a71d6051 845 */
a8a579b1 846 if (check.check_suspended &&
1216efdf 847 (!strcmp(target_type, TARGET_NAME_SNAPSHOT) || !strcmp(target_type, TARGET_NAME_SNAPSHOT_ORIGIN)) &&
1e6a926e
PR
848 _ignore_suspended_snapshot_component(dev)) {
849 log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
a71d6051 850 goto out;
1e6a926e 851 }
06808d33 852
a8a579b1 853 if (!strcmp(target_type, TARGET_NAME_SNAPSHOT) &&
7421252e
ZK
854 _ignore_invalid_snapshot(params)) {
855 log_debug_activation("%s: Invalid %s device %s not usable.", dev_name(dev), target_type, name);
856 goto out;
0edd89fa
ZK
857 }
858
859 if (!strncmp(target_type, TARGET_NAME_RAID, 4) && _ignore_frozen_raid(dev, params)) {
860 log_debug_activation("%s: Frozen %s device %s not usable.",
861 dev_name(dev), target_type, name);
862 goto out;
7421252e
ZK
863 }
864
a01eb9c4 865 /* TODO: extend check struct ? */
a8a579b1 866 if (!strcmp(target_type, TARGET_NAME_THIN) &&
a01eb9c4
ZK
867 !_ignore_unusable_thins(dev)) {
868 log_debug_activation("%s: %s device %s not usable.", dev_name(dev), target_type, name);
869 goto out;
870 }
871
adc23806
ZK
872 if (only_error_or_zero_target &&
873 strcmp(target_type, TARGET_NAME_ERROR) &&
874 strcmp(target_type, TARGET_NAME_ZERO))
875 only_error_or_zero_target = 0;
67cdbd7e 876 } while (next);
f247a4e7 877
adc23806 878 /* Skip devices consisting entirely of error or zero targets. */
727d065f 879 /* FIXME Deal with device stacked above error targets? */
adc23806 880 if (check.check_error_target && only_error_or_zero_target) {
06abb2dd
AK
881 log_debug_activation("%s: Error device %s not usable.",
882 dev_name(dev), name);
727d065f
AK
883 goto out;
884 }
885
f247a4e7
AK
886 /* FIXME Also check dependencies? */
887
888 r = 1;
889
890 out:
891 dm_task_destroy(dmt);
892 return r;
893}
894
dac2bfe6 895/* Read UUID from a given DM device into buf_uuid */
990f4f7c
DT
896int devno_dm_uuid(struct cmd_context *cmd, int major, int minor,
897 char *uuid_buf, size_t uuid_buf_size)
dac2bfe6
ZK
898{
899 struct dm_task *dmt;
900 struct dm_info info;
e30bc9b1 901 const struct dm_active_device *dm_dev;
dac2bfe6
ZK
902 const char *uuid;
903 int r = 0;
904
07576f7e
DT
905 if (major != cmd->dev_types->device_mapper_major)
906 return 0;
907
f8aa073a 908 if (dm_devs_cache_use()) {
ad1d6887 909 if ((dm_dev = dm_devs_cache_get_by_devno(cmd, MKDEV(major, minor)))) {
e30bc9b1 910 dm_strncpy(uuid_buf, dm_dev->uuid, uuid_buf_size);
dac2bfe6
ZK
911 return 1;
912 }
913 uuid_buf[0] = 0;
914 return 0;
915 }
916
917 if (!(dmt = _setup_task_run(DM_DEVICE_INFO, &info, NULL, NULL, NULL,
918 major, minor, 0, 0, 0)))
919 return_0;
920
921 if (info.exists && (uuid = dm_task_get_uuid(dmt)))
922 r = dm_strncpy(uuid_buf, uuid, uuid_buf_size);
923
924 dm_task_destroy(dmt);
925
926 return r;
927}
928
990f4f7c
DT
929int dev_dm_uuid(struct cmd_context *cmd, struct device *dev,
930 char *uuid_buf, size_t uuid_buf_size)
931{
932 return devno_dm_uuid(cmd, MAJOR(dev->dev), MINOR(dev->dev),
933 uuid_buf, uuid_buf_size);
934}
935
f40dfb48
AK
936/*
937 * If active LVs were activated by a version of LVM2 before 2.02.00 we must
938 * perform additional checks to find them because they do not have the LVM-
939 * prefix on their dm uuids.
940 * As of 2.02.150, we've chosen to disable this compatibility arbitrarily if
941 * we're running kernel version 3 or above.
942 */
943#define MIN_KERNEL_MAJOR 3
944
945static int _original_uuid_format_check_required(struct cmd_context *cmd)
8b2108e6 946{
f40dfb48
AK
947 static int _kernel_major = 0;
948
949 if (!_kernel_major) {
5cfa6cb3
ZK
950 if ((sscanf(cmd->kernel_vsn, "%d", &_kernel_major) == 1) &&
951 (_kernel_major >= MIN_KERNEL_MAJOR))
f40dfb48
AK
952 log_debug_activation("Skipping checks for old devices without " UUID_PREFIX
953 " dm uuid prefix (kernel vsn %d >= %d).", _kernel_major, MIN_KERNEL_MAJOR);
5cfa6cb3
ZK
954 else
955 _kernel_major = -1;
8b2108e6
ZK
956 }
957
5cfa6cb3 958 return (_kernel_major == -1);
8b2108e6
ZK
959}
960
034931f6
ZK
961static int _info(struct cmd_context *cmd,
962 const char *name, const char *dlid,
80b2de9e 963 int with_open_count, int with_read_ahead, int with_name_check,
a2c1024f
PR
964 struct dm_info *dminfo, uint32_t *read_ahead,
965 struct lv_seg_status *seg_status)
e04c5198 966{
7cff640d
AK
967 char old_style_dlid[sizeof(UUID_PREFIX) + 2 * ID_LEN];
968 const char *suffix, *suffix_position;
80b2de9e 969 const char *name_check = (with_name_check) ? name : NULL;
7cff640d 970 unsigned i = 0;
b1ef9cd0 971
034931f6
ZK
972 log_debug_activation("Getting device info for %s [%s].", name, dlid);
973
7cff640d 974 /* Check for dlid */
80b2de9e 975 if (!_info_run(dlid, dminfo, read_ahead, seg_status, name_check,
034931f6
ZK
976 with_open_count, with_read_ahead, 0, 0))
977 return_0;
978
979 if (dminfo->exists)
ab9663f3 980 return 1;
7cff640d
AK
981
982 /* Check for original version of dlid before the suffixes got added in 2.02.106 */
9971459d 983 if ((suffix_position = strrchr(dlid, '-'))) {
eae0314b 984 while ((suffix = _uuid_suffix_list[i++])) {
7cff640d
AK
985 if (strcmp(suffix_position + 1, suffix))
986 continue;
987
995ff589 988 dm_strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
034931f6 989 if (!_info_run(old_style_dlid, dminfo, read_ahead, seg_status,
80b2de9e
ZK
990 name_check, with_open_count, with_read_ahead,
991 0, 0))
034931f6
ZK
992 return_0;
993 if (dminfo->exists)
7cff640d
AK
994 return 1;
995 }
996 }
997
f40dfb48
AK
998 /* Must we still check for the pre-2.02.00 dm uuid format? */
999 if (!_original_uuid_format_check_required(cmd))
034931f6 1000 return 1;
8b2108e6 1001
7cff640d 1002 /* Check for dlid before UUID_PREFIX was added */
034931f6 1003 if (!_info_run(dlid + sizeof(UUID_PREFIX) - 1, dminfo, read_ahead, seg_status,
80b2de9e 1004 name_check, with_open_count, with_read_ahead, 0, 0))
034931f6 1005 return_0;
e04c5198 1006
034931f6 1007 return 1;
e04c5198
AK
1008}
1009
cac4a974
DT
1010int dev_manager_remove_dm_major_minor(uint32_t major, uint32_t minor)
1011{
1012 struct dm_task *dmt;
1013 int r = 0;
1014
1015 log_verbose("Removing dm dev %u:%u", major, minor);
1016
1017 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE)))
1018 return_0;
1019
1020 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1021 log_error("Failed to set device number for remove %u:%u", major, minor);
1022 goto out;
1023 }
1024
1025 r = dm_task_run(dmt);
1026out:
1027 dm_task_destroy(dmt);
1028
1029 return r;
1030}
1031
0c8bdaf3
MB
1032static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info)
1033{
80b2de9e 1034 return _info_run(NULL, info, NULL, NULL, NULL, 0, 0, major, minor);
0c8bdaf3
MB
1035}
1036
1eeb2fa3
ZK
1037int dev_manager_check_prefix_dm_major_minor(uint32_t major, uint32_t minor, const char *prefix)
1038{
1039 struct dm_task *dmt;
1040 const char *uuid;
1041 int r = 1;
1042
1043 if (!(dmt = _setup_task_run(DM_DEVICE_INFO, NULL, NULL, NULL, 0, major, minor, 0, 0, 0)))
1044 return_0;
1045
1046 if (!(uuid = dm_task_get_uuid(dmt)) || strncasecmp(uuid, prefix, strlen(prefix)))
1047 r = 0;
1048
1049 dm_task_destroy(dmt);
1050
1051 return r;
1052}
1053
990f4f7c
DT
1054/*
1055 * Get a list of active dm devices from the kernel.
1056 * The 'devs' list contains a struct dm_active_device.
1057 */
1058
1059int dev_manager_get_dm_active_devices(const char *prefix, struct dm_list **devs, unsigned *devs_features)
0d67bc96
ZK
1060{
1061 struct dm_task *dmt;
1062 int r = 1;
1063
1064 if (!(dmt = _setup_task_run(DM_DEVICE_LIST, NULL, NULL, NULL, 0, 0, 0, 0, 0, 0)))
1065 return_0;
1066
1067 if (!dm_task_get_device_list(dmt, devs, devs_features)) {
1068 r = 0;
1069 goto_out;
1070 }
1071
1072 out:
1073 dm_task_destroy(dmt);
1074
1075 return r;
1076}
1077
034931f6
ZK
1078int dev_manager_info(struct cmd_context *cmd,
1079 const struct logical_volume *lv, const char *layer,
80b2de9e 1080 int with_open_count, int with_read_ahead, int with_name_check,
a2c1024f
PR
1081 struct dm_info *dminfo, uint32_t *read_ahead,
1082 struct lv_seg_status *seg_status)
f894b4b1 1083{
14f782c5 1084 char old_style_dlid[sizeof(UUID_PREFIX) + 2 * ID_LEN];
3eadbbeb 1085 char *dlid, *name;
3331199c 1086 int r = 0;
ab9663f3 1087
f40dfb48 1088 if (!(name = dm_build_dm_name(cmd->mem, lv->vg->name, lv->name, layer)))
b7b59ad9 1089 return_0;
03b49fe1 1090
3331199c 1091 if (!(dlid = build_dm_uuid(cmd->mem, lv, layer)))
b7b59ad9 1092 goto_out;
03b49fe1 1093
14f782c5
ZK
1094 dm_strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
1095
f8aa073a 1096 if (dm_devs_cache_use() &&
ad1d6887
DT
1097 !dm_devs_cache_get_by_uuid(cmd, dlid) &&
1098 !dm_devs_cache_get_by_uuid(cmd, old_style_dlid)) {
04fbffb1
ZK
1099 log_debug("Cached as inactive %s.", name);
1100 if (dminfo)
1101 memset(dminfo, 0, sizeof(*dminfo));
1102 r = 1;
1103 goto out;
1104 }
1105
80b2de9e
ZK
1106 if (!(r = _info(cmd, name, dlid,
1107 with_open_count, with_read_ahead, with_name_check,
034931f6
ZK
1108 dminfo, read_ahead, seg_status)))
1109 stack;
85b9c12e 1110out:
f40dfb48 1111 dm_pool_free(cmd->mem, name);
85b9c12e 1112
ab9663f3 1113 return r;
f894b4b1
AK
1114}
1115
363e8888
ZK
1116static struct dm_tree_node *_cached_dm_tree_node(struct dm_pool *mem,
1117 struct dm_tree *dtree,
1118 const struct logical_volume *lv,
1119 const char *layer)
1120{
1121 struct dm_tree_node *dnode;
1122 char *dlid;
1123
1124 if (!(dlid = build_dm_uuid(mem, lv, layer)))
1125 return_NULL;
1126
1127 dnode = dm_tree_find_node_by_uuid(dtree, dlid);
1128
1129 dm_pool_free(mem, dlid);
1130
1131 return dnode;
1132}
1133
1b439a0b
ZK
1134static const struct dm_info *_cached_dm_info(struct dm_pool *mem,
1135 struct dm_tree *dtree,
1136 const struct logical_volume *lv,
1137 const char *layer)
537f7456 1138{
57be501a
ZK
1139 const struct dm_tree_node *dnode;
1140 const struct dm_info *dinfo = NULL;
537f7456 1141
363e8888
ZK
1142 if (!(dnode = _cached_dm_tree_node(mem, dtree, lv, layer)))
1143 return NULL;
537f7456
MS
1144
1145 if (!(dinfo = dm_tree_node_get_info(dnode))) {
f5cf6cc9
ZK
1146 log_warn("WARNING: Cannot get info from tree node for %s.",
1147 display_lvname(lv));
363e8888 1148 return NULL;
537f7456
MS
1149 }
1150
1151 if (!dinfo->exists)
57be501a 1152 dinfo = NULL;
537f7456
MS
1153
1154 return dinfo;
1155}
1156
84cdf85b 1157int lv_has_target_type(struct dm_pool *mem, const struct logical_volume *lv,
1f661c5d 1158 const char *layer, const char *target_type)
c582e3c0
MS
1159{
1160 int r = 0;
1161 char *dlid;
1162 struct dm_task *dmt;
1163 struct dm_info info;
1164 void *next = NULL;
1165 uint64_t start, length;
1166 char *type = NULL;
1167 char *params = NULL;
1168
6a0d97a6 1169 if (!(dlid = build_dm_uuid(mem, lv, layer)))
c582e3c0
MS
1170 return_0;
1171
5163b8f6 1172 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
647c8edf 1173 goto_bad;
c582e3c0 1174
5163b8f6 1175 if (!info.exists)
c582e3c0
MS
1176 goto_out;
1177
6954de22
AK
1178 /* If there is a preloaded table, use that in preference. */
1179 if (info.inactive_table) {
1180 dm_task_destroy(dmt);
1181
5163b8f6 1182 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 1)))
6954de22
AK
1183 goto_bad;
1184
5163b8f6 1185 if (!info.exists || !info.inactive_table)
6954de22
AK
1186 goto_out;
1187 }
1188
c582e3c0
MS
1189 do {
1190 next = dm_get_next_target(dmt, next, &start, &length,
1191 &type, &params);
6954de22
AK
1192 if (type && !strncmp(type, target_type, strlen(target_type))) {
1193 r = 1;
c582e3c0
MS
1194 break;
1195 }
1196 } while (next);
1197
647c8edf 1198out:
c582e3c0 1199 dm_task_destroy(dmt);
647c8edf
ZK
1200bad:
1201 dm_pool_free(mem, dlid);
1202
c582e3c0
MS
1203 return r;
1204}
1205
334117ee 1206static int _lv_has_thin_device_id(struct dm_pool *mem, const struct logical_volume *lv,
e2cd882f
ZK
1207 const char *layer, unsigned device_id)
1208{
1209 char *dlid;
1210 struct dm_task *dmt;
1211 struct dm_info info;
1212 void *next = NULL;
1213 uint64_t start, length;
1214 char *type = NULL;
1215 char *params = NULL;
1216 unsigned id = ~0;
1217
1218 if (!(dlid = build_dm_uuid(mem, lv, layer)))
1219 return_0;
1220
5163b8f6 1221 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 0)))
e2cd882f
ZK
1222 goto_bad;
1223
5163b8f6 1224 if (!info.exists)
e2cd882f
ZK
1225 goto_out;
1226
1227 /* If there is a preloaded table, use that in preference. */
1228 if (info.inactive_table) {
1229 dm_task_destroy(dmt);
1230
5163b8f6 1231 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 1)))
e2cd882f
ZK
1232 goto_bad;
1233
5163b8f6 1234 if (!info.exists || !info.inactive_table)
e2cd882f
ZK
1235 goto_out;
1236 }
1237
1238 (void) dm_get_next_target(dmt, next, &start, &length, &type, &params);
1239
1240 if (!type || strcmp(type, TARGET_NAME_THIN))
1241 goto_out;
1242
1243 if (!params || sscanf(params, "%*u:%*u %u", &id) != 1)
1244 goto_out;
1245
1246 log_debug_activation("%soaded thin volume %s with id %u is %smatching id %u.",
1247 info.inactive_table ? "Prel" : "L",
1248 display_lvname(lv), id,
1249 (device_id != id) ? "not " : "", device_id);
1250out:
1251 dm_task_destroy(dmt);
1252bad:
1253 dm_pool_free(mem, dlid);
1254
1255 return (device_id == id);
1256}
1257
81ef4eb4
ZK
1258int add_linear_area_to_dtree(struct dm_tree_node *node, uint64_t size, uint32_t extent_size,
1259 int use_linear_target, const char *vgname, const char *lvname)
8dd6036d
AK
1260{
1261 uint32_t page_size;
1262
1263 /*
1264 * Use striped or linear target?
1265 */
1266 if (!use_linear_target) {
1267 page_size = lvm_getpagesize() >> SECTOR_SHIFT;
1268
1269 /*
1270 * We'll use the extent size as the stripe size.
1271 * Extent size and page size are always powers of 2.
1272 * The striped target requires that the stripe size is
1273 * divisible by the page size.
1274 */
1275 if (extent_size >= page_size) {
1276 /* Use striped target */
1277 if (!dm_tree_node_add_striped_target(node, size, extent_size))
1278 return_0;
1279 return 1;
81ef4eb4
ZK
1280 }
1281
1282 /* Some exotic cases are unsupported by striped. */
1283 log_warn("WARNING: Using linear target for %s/%s: Striped requires extent size "
1284 "(" FMTu32 " sectors) >= page size (" FMTu32 ").",
1285 vgname, lvname, extent_size, page_size);
8dd6036d
AK
1286 }
1287
1288 /*
1289 * Use linear target.
1290 */
1291 if (!dm_tree_node_add_linear_target(node, size))
1292 return_0;
1293
1294 return 1;
1295}
1296
cfed0d09
PR
1297static dm_percent_range_t _combine_percent(dm_percent_t a, dm_percent_t b,
1298 uint32_t numerator, uint32_t denominator)
78ad1549 1299{
cfed0d09
PR
1300 if (a == LVM_PERCENT_MERGE_FAILED || b == LVM_PERCENT_MERGE_FAILED)
1301 return LVM_PERCENT_MERGE_FAILED;
23e34c72 1302
cfed0d09
PR
1303 if (a == DM_PERCENT_INVALID || b == DM_PERCENT_INVALID)
1304 return DM_PERCENT_INVALID;
78ad1549 1305
cfed0d09
PR
1306 if (a == DM_PERCENT_100 && b == DM_PERCENT_100)
1307 return DM_PERCENT_100;
78ad1549 1308
cfed0d09
PR
1309 if (a == DM_PERCENT_0 && b == DM_PERCENT_0)
1310 return DM_PERCENT_0;
78ad1549 1311
cfed0d09 1312 return (dm_percent_range_t) dm_make_percent(numerator, denominator);
78ad1549
AK
1313}
1314
b9e67d4f 1315static int _percent_run(struct dev_manager *dm, const char *name,
03b49fe1 1316 const char *dlid,
b9e67d4f 1317 const char *target_type, int wait,
cfed0d09 1318 const struct logical_volume *lv, dm_percent_t *overall_percent,
ff8aaade
ZK
1319 uint32_t *event_nr, int fail_if_percent_unsupported,
1320 int *interrupted)
a9953411
AK
1321{
1322 int r = 0;
1323 struct dm_task *dmt;
10b29b8d 1324 struct dm_info info;
a9953411
AK
1325 void *next = NULL;
1326 uint64_t start, length;
1327 char *type = NULL;
1328 char *params = NULL;
d3b4a0f3 1329 const struct dm_list *segh = lv ? &lv->segments : NULL;
b9e67d4f 1330 struct lv_segment *seg = NULL;
78ad1549 1331 int first_time = 1;
cfed0d09 1332 dm_percent_t percent = DM_PERCENT_INVALID;
a9953411 1333 uint64_t total_numerator = 0, total_denominator = 0;
8bbec41b 1334 struct segment_type *segtype;
a9953411 1335
eb08f865 1336 *overall_percent = percent;
a9953411 1337
8bbec41b
ZK
1338 if (!(segtype = get_segtype_from_string(dm->cmd, target_type)))
1339 return_0;
1340
ff8aaade
ZK
1341 if (wait)
1342 sigint_allow();
1343
5163b8f6
ZK
1344 if (!(dmt = _setup_task_run(wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, &info,
1345 name, dlid, event_nr, 0, 0, 0, 0, 0)))
ff8aaade 1346 goto_bad;
a9953411 1347
5163b8f6 1348 if (!info.exists)
5f4b2acf 1349 goto_out;
10b29b8d
AK
1350
1351 if (event_nr)
1352 *event_nr = info.event_nr;
1353
a9953411
AK
1354 do {
1355 next = dm_get_next_target(dmt, next, &start, &length, &type,
1356 &params);
b9e67d4f 1357 if (lv) {
2c44337b 1358 if (!(segh = dm_list_next(&lv->segments, segh))) {
b9e67d4f 1359 log_error("Number of segments in active LV %s "
922fccc6
ZK
1360 "does not match metadata.",
1361 display_lvname(lv));
b9e67d4f
AK
1362 goto out;
1363 }
2c44337b 1364 seg = dm_list_item(segh, struct lv_segment);
b9e67d4f 1365 }
a9953411 1366
e47a591d 1367 if (!type || !params)
a9953411
AK
1368 continue;
1369
e47a591d
MS
1370 if (strcmp(type, target_type)) {
1371 /* If kernel's type isn't an exact match is it compatible? */
1372 if (!segtype->ops->target_status_compatible ||
1373 !segtype->ops->target_status_compatible(type))
1374 continue;
1375 }
1376
e8bed35d
AK
1377 if (!segtype->ops->target_percent)
1378 continue;
1379
1380 if (!segtype->ops->target_percent(&dm->target_state,
8191fe4f 1381 &percent, dm->mem,
aba30ebc 1382 dm->cmd, seg, params,
4922197a 1383 &total_numerator,
82185ada 1384 &total_denominator))
5f4b2acf 1385 goto_out;
4922197a 1386
78ad1549 1387 if (first_time) {
8191fe4f 1388 *overall_percent = percent;
78ad1549
AK
1389 first_time = 0;
1390 } else
8191fe4f
PR
1391 *overall_percent =
1392 _combine_percent(*overall_percent, percent,
1393 total_numerator, total_denominator);
a9953411
AK
1394 } while (next);
1395
f2554b9d 1396 if (lv && dm_list_next(&lv->segments, segh)) {
b9e67d4f 1397 log_error("Number of segments in active LV %s does not "
922fccc6 1398 "match metadata.", display_lvname(lv));
b9e67d4f
AK
1399 goto out;
1400 }
1401
8191fe4f
PR
1402 if (first_time) {
1403 /* above ->target_percent() was not executed! */
1404 /* FIXME why return PERCENT_100 et. al. in this case? */
cfed0d09 1405 *overall_percent = DM_PERCENT_100;
8191fe4f
PR
1406 if (fail_if_percent_unsupported)
1407 goto_out;
78ad1549 1408 }
a9953411 1409
1bd4b005
ZK
1410 log_debug_activation("LV percent: %s",
1411 display_percent(dm->cmd, *overall_percent));
a9953411
AK
1412 r = 1;
1413
ff8aaade 1414 out:
a9953411 1415 dm_task_destroy(dmt);
ff8aaade
ZK
1416
1417 bad:
1418 if (wait) {
1419 sigint_restore();
1420
1421 if (sigint_caught()) {
1422 *interrupted = 1;
1423 return_0;
1424 }
1425 }
1426
a9953411
AK
1427 return r;
1428}
1429
03b49fe1 1430static int _percent(struct dev_manager *dm, const char *name, const char *dlid,
b9e67d4f 1431 const char *target_type, int wait,
cfed0d09 1432 const struct logical_volume *lv, dm_percent_t *percent,
8191fe4f 1433 uint32_t *event_nr, int fail_if_percent_unsupported)
a9953411 1434{
ff8aaade
ZK
1435 int interrupted = 0;
1436
878467cd
AK
1437 if (dlid && *dlid) {
1438 if (_percent_run(dm, NULL, dlid, target_type, wait, lv, percent,
ff8aaade 1439 event_nr, fail_if_percent_unsupported, &interrupted))
878467cd 1440 return 1;
0bf836aa 1441
ff8aaade
ZK
1442 if (!interrupted &&
1443 _original_uuid_format_check_required(dm->cmd) &&
0bf836aa
ZK
1444 _percent_run(dm, NULL, dlid + sizeof(UUID_PREFIX) - 1,
1445 target_type, wait, lv, percent,
ff8aaade 1446 event_nr, fail_if_percent_unsupported, &interrupted))
878467cd
AK
1447 return 1;
1448 }
a9953411 1449
ff8aaade
ZK
1450 if (!interrupted && name &&
1451 _percent_run(dm, name, NULL, target_type, wait, lv, percent,
1452 event_nr, fail_if_percent_unsupported, &interrupted))
a9953411
AK
1453 return 1;
1454
fd31cc9d 1455 return_0;
a9953411
AK
1456}
1457
85ed4030 1458/* FIXME Merge with the percent function */
84cdf85b 1459int dev_manager_transient(struct dev_manager *dm, const struct logical_volume *lv)
d345bf2c
PR
1460{
1461 int r = 0;
1462 struct dm_task *dmt;
1463 struct dm_info info;
1464 void *next = NULL;
1465 uint64_t start, length;
1466 char *type = NULL;
1467 char *params = NULL;
1468 char *dlid = NULL;
4af4241b 1469 const char *layer = lv_layer(lv);
d345bf2c
PR
1470 const struct dm_list *segh = &lv->segments;
1471 struct lv_segment *seg = NULL;
1472
6a0d97a6 1473 if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
d345bf2c
PR
1474 return_0;
1475
5163b8f6 1476 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, NULL, 0, 0, 0, 0, 0)))
d345bf2c
PR
1477 return_0;
1478
5163b8f6 1479 if (!info.exists)
d345bf2c
PR
1480 goto_out;
1481
1482 do {
1483 next = dm_get_next_target(dmt, next, &start, &length, &type,
1484 &params);
81e606ab
ZK
1485
1486 if (!(segh = dm_list_next(&lv->segments, segh))) {
1487 log_error("Number of segments in active LV %s "
922fccc6 1488 "does not match metadata.", display_lvname(lv));
81e606ab 1489 goto out;
d345bf2c 1490 }
81e606ab 1491 seg = dm_list_item(segh, struct lv_segment);
d345bf2c
PR
1492
1493 if (!type || !params)
1494 continue;
1495
fd417db2
ZK
1496 if (!seg) {
1497 log_error(INTERNAL_ERROR "Segment is not selected.");
1498 goto out;
1499 }
1500
d345bf2c 1501 if (seg->segtype->ops->check_transient_status &&
6336ef98 1502 !seg->segtype->ops->check_transient_status(dm->mem, seg, params))
d345bf2c
PR
1503 goto_out;
1504
1505 } while (next);
1506
f2554b9d 1507 if (dm_list_next(&lv->segments, segh)) {
d345bf2c 1508 log_error("Number of segments in active LV %s does not "
922fccc6 1509 "match metadata.", display_lvname(lv));
d345bf2c
PR
1510 goto out;
1511 }
1512
1513 r = 1;
1514
1515 out:
1516 dm_task_destroy(dmt);
1517 return r;
1518}
1519
5f4b2acf
AK
1520/*
1521 * dev_manager implementation.
1522 */
1523struct dev_manager *dev_manager_create(struct cmd_context *cmd,
df390f17
AK
1524 const char *vg_name,
1525 unsigned track_pvmove_deps)
2ed2a724 1526{
5f4b2acf
AK
1527 struct dm_pool *mem;
1528 struct dev_manager *dm;
2ed2a724 1529
c51b9fff
AK
1530 if (!(mem = dm_pool_create("dev_manager", 16 * 1024)))
1531 return_NULL;
2ed2a724 1532
61064609 1533 if (!(dm = dm_pool_zalloc(mem, sizeof(*dm))))
4f2f566b 1534 goto_bad;
2ed2a724 1535
5f4b2acf
AK
1536 dm->cmd = cmd;
1537 dm->mem = mem;
528695ec 1538 dm->vg_name = vg_name;
36902810 1539
df390f17
AK
1540 /*
1541 * When we manipulate (normally suspend/resume) the PVMOVE
1542 * device directly, there's no need to touch the LVs above.
1543 */
1544 dm->track_pvmove_deps = track_pvmove_deps;
1545
5f4b2acf 1546 dm->target_state = NULL;
e9c761b8 1547
cf8235e0
AK
1548 dm_udev_set_sync_support(cmd->current_settings.udev_sync);
1549
5f4b2acf 1550 return dm;
36902810 1551
5f4b2acf
AK
1552 bad:
1553 dm_pool_destroy(mem);
81ef4eb4 1554
5f4b2acf 1555 return NULL;
36902810
AK
1556}
1557
5f4b2acf 1558void dev_manager_destroy(struct dev_manager *dm)
36902810 1559{
5f4b2acf
AK
1560 dm_pool_destroy(dm->mem);
1561}
36902810 1562
2293567c
AK
1563void dev_manager_release(void)
1564{
1565 dm_lib_release();
1566}
1567
5f4b2acf
AK
1568void dev_manager_exit(void)
1569{
1570 dm_lib_exit();
36902810
AK
1571}
1572
5f4b2acf 1573int dev_manager_snapshot_percent(struct dev_manager *dm,
472ac5bd 1574 const struct logical_volume *lv,
cfed0d09 1575 dm_percent_t *percent)
36902810 1576{
861c624a 1577 const struct logical_volume *snap_lv;
5f4b2acf
AK
1578 char *name;
1579 const char *dlid;
15816a3b
MS
1580 int fail_if_percent_unsupported = 0;
1581
1582 if (lv_is_merging_origin(lv)) {
1583 /*
1584 * Set 'fail_if_percent_unsupported', otherwise passing
1585 * unsupported LV types to _percent will lead to a default
1586 * successful return with percent_range as PERCENT_100.
1587 * - For a merging origin, this will result in a polldaemon
1588 * that runs infinitely (because completion is PERCENT_0)
1589 * - We unfortunately don't yet _know_ if a snapshot-merge
1590 * target is active (activation is deferred if dev is open);
1591 * so we can't short-circuit origin devices based purely on
1592 * existing LVM LV attributes.
1593 */
1594 fail_if_percent_unsupported = 1;
1595 }
5f4b2acf 1596
861c624a
MS
1597 if (lv_is_merging_cow(lv)) {
1598 /* must check percent of origin for a merging snapshot */
1599 snap_lv = origin_from_cow(lv);
1600 } else
1601 snap_lv = lv;
1602
5f4b2acf
AK
1603 /*
1604 * Build a name for the top layer.
1605 */
861c624a 1606 if (!(name = dm_build_dm_name(dm->mem, snap_lv->vg->name, snap_lv->name, NULL)))
5f4b2acf
AK
1607 return_0;
1608
6a0d97a6 1609 if (!(dlid = build_dm_uuid(dm->mem, snap_lv, NULL)))
5f4b2acf 1610 return_0;
36902810 1611
5f4b2acf
AK
1612 /*
1613 * Try and get some info on this device.
1614 */
1216efdf 1615 if (!_percent(dm, name, dlid, TARGET_NAME_SNAPSHOT, 0, NULL, percent,
fd31cc9d 1616 NULL, fail_if_percent_unsupported))
c51b9fff 1617 return_0;
36902810 1618
5f4b2acf 1619 /* If the snapshot isn't available, percent will be -1 */
36902810
AK
1620 return 1;
1621}
1622
5f4b2acf
AK
1623/* FIXME Merge with snapshot_percent, auto-detecting target type */
1624/* FIXME Cope with more than one target */
1625int dev_manager_mirror_percent(struct dev_manager *dm,
6c1c02dc 1626 const struct logical_volume *lv, int wait,
cfed0d09 1627 dm_percent_t *percent, uint32_t *event_nr)
fc28b60f 1628{
5f4b2acf
AK
1629 char *name;
1630 const char *dlid;
cac52ca4 1631 const char *target_type = first_seg(lv)->segtype->name;
4af4241b 1632 const char *layer = lv_layer(lv);
fc28b60f 1633
4bb3eccf 1634 /*
5f4b2acf 1635 * Build a name for the top layer.
4bb3eccf 1636 */
e59e2f7c 1637 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
5f4b2acf 1638 return_0;
45267479 1639
b7b59ad9
ZK
1640 if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
1641 return_0;
14a9cda6 1642
81ef4eb4 1643 log_debug_activation("Getting device %s status percentage for %s.",
06abb2dd 1644 target_type, name);
81ef4eb4
ZK
1645
1646 if (!_percent(dm, name, dlid, target_type, wait, lv, percent, event_nr, 0))
c51b9fff 1647 return_0;
14a9cda6 1648
5f4b2acf
AK
1649 return 1;
1650}
914c9723 1651
c8242e5c
JB
1652int dev_manager_raid_status(struct dev_manager *dm,
1653 const struct logical_volume *lv,
a9b4acd5 1654 struct lv_status_raid **status, int *exists)
c8242e5c
JB
1655{
1656 int r = 0;
1657 const char *dlid;
1658 struct dm_task *dmt;
1659 struct dm_info info;
1660 uint64_t start, length;
1661 char *type = NULL;
1662 char *params = NULL;
f5cd9c35 1663 const char *layer = lv_layer(lv);
a9b4acd5
ZK
1664 struct dm_status_raid *sr;
1665
1666 *exists = -1;
1667 if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_cache))))
1668 return_0;
c8242e5c 1669
6a0d97a6 1670 if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
c8242e5c
JB
1671 return_0;
1672
5163b8f6 1673 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
c8242e5c
JB
1674 return_0;
1675
a9b4acd5
ZK
1676 if (!(*exists = info.exists))
1677 goto out;
1678
1679 log_debug_activation("Checking raid status for volume %s.",
1680 display_lvname(lv));
c8242e5c
JB
1681
1682 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1683
81ef4eb4
ZK
1684 if (!type || strcmp(type, TARGET_NAME_RAID)) {
1685 log_error("Expected %s segment type but got %s instead.",
1686 TARGET_NAME_RAID, type ? type : "NULL");
38f8f4a9
JB
1687 goto out;
1688 }
1689
c2dc21d8
AK
1690 /* FIXME Check there's only one target */
1691
a9b4acd5 1692 if (!dm_get_status_raid(dm->mem, params, &sr))
c8242e5c
JB
1693 goto_out;
1694
a9b4acd5
ZK
1695 (*status)->mem = dm->mem; /* User has to destroy this mem pool later */
1696 (*status)->raid = sr;
1697 (*status)->in_sync = dm_make_percent(sr->insync_regions, sr->total_regions);
1698
c8242e5c
JB
1699 r = 1;
1700out:
1701 dm_task_destroy(dmt);
1702
1703 return r;
1704}
1705
ff64e350
JB
1706int dev_manager_raid_message(struct dev_manager *dm,
1707 const struct logical_volume *lv,
1708 const char *msg)
1709{
1710 int r = 0;
1711 const char *dlid;
1712 struct dm_task *dmt;
1713 const char *layer = lv_layer(lv);
1714
2360ce35 1715 if (!lv_is_raid(lv)) {
81ef4eb4 1716 log_error(INTERNAL_ERROR "%s is not a RAID logical volume.",
922fccc6 1717 display_lvname(lv));
ff64e350
JB
1718 return 0;
1719 }
1720
547bdb63 1721 /* These are the supported RAID messages for dm-raid v1.9.0 */
a45cc0fe
ZK
1722 if (strcmp(msg, "idle") &&
1723 strcmp(msg, "frozen") &&
1724 strcmp(msg, "resync") &&
1725 strcmp(msg, "recover") &&
1726 strcmp(msg, "check") &&
547bdb63 1727 strcmp(msg, "repair")) {
81ef4eb4 1728 log_error(INTERNAL_ERROR "Unknown RAID message: %s.", msg);
ff64e350
JB
1729 return 0;
1730 }
1731
6a0d97a6 1732 if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
ff64e350
JB
1733 return_0;
1734
5163b8f6 1735 if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 1, 0)))
ff64e350
JB
1736 return_0;
1737
ff64e350
JB
1738 if (!dm_task_set_message(dmt, msg))
1739 goto_out;
1740
1741 if (!dm_task_run(dmt))
1742 goto_out;
1743
1744 r = 1;
1745out:
1746 dm_task_destroy(dmt);
1747
1748 return r;
1749}
1750
3ae55695
DT
1751int dev_manager_writecache_message(struct dev_manager *dm,
1752 const struct logical_volume *lv,
1753 const char *msg)
1754{
1755 int r = 0;
1756 const char *dlid;
1757 struct dm_task *dmt;
1758 const char *layer = lv_layer(lv);
1759
1760 if (!lv_is_writecache(lv)) {
1761 log_error(INTERNAL_ERROR "%s is not a writecache logical volume.",
1762 display_lvname(lv));
1763 return 0;
1764 }
1765
1766 if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
1767 return_0;
1768
1769 if (!(dmt = _setup_task_run(DM_DEVICE_TARGET_MSG, NULL, NULL, dlid, 0, 0, 0, 0, 1, 0)))
1770 return_0;
1771
1772 if (!dm_task_set_message(dmt, msg))
1773 goto_out;
1774
1775 if (!dm_task_run(dmt))
1776 goto_out;
1777
1778 r = 1;
1779out:
1780 dm_task_destroy(dmt);
1781
1782 return r;
1783}
1784
75b8ea19
JB
1785int dev_manager_cache_status(struct dev_manager *dm,
1786 const struct logical_volume *lv,
e5a60086 1787 struct lv_status_cache **status, int *exists)
75b8ea19
JB
1788{
1789 int r = 0;
1790 const char *dlid;
1791 struct dm_task *dmt;
1792 struct dm_info info;
1793 uint64_t start, length;
1794 char *type = NULL;
1795 char *params = NULL;
ab491204 1796 struct dm_status_cache *c;
75b8ea19 1797
e5a60086 1798 *exists = -1;
ab491204
ZK
1799 if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
1800 return_0;
1801
5163b8f6 1802 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
75b8ea19
JB
1803 return_0;
1804
e5a60086
ZK
1805 if (!(*exists = info.exists))
1806 goto out;
1807
1808 log_debug_activation("Checking status for cache volume %s.",
1809 display_lvname(lv));
75b8ea19
JB
1810
1811 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1812
1216efdf 1813 if (!type || strcmp(type, TARGET_NAME_CACHE)) {
81ef4eb4
ZK
1814 log_error("Expected %s segment type but got %s instead.",
1815 TARGET_NAME_CACHE, type ? type : "NULL");
75b8ea19
JB
1816 goto out;
1817 }
1818
ab491204
ZK
1819 /*
1820 * FIXME:
1821 * ->target_percent() API is able to transfer only a single value.
1822 * Needs to be able to pass whole structure.
1823 */
5034bb8d 1824 if (!dm_get_status_cache(dm->mem, params, &c))
75b8ea19
JB
1825 goto_out;
1826
4de6f580
ZK
1827 if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_cache))))
1828 goto_out;
1829
e5d3f812 1830 (*status)->mem = dm->mem; /* User has to destroy this mem pool later */
4de6f580 1831 (*status)->cache = c;
5c415afd
ZK
1832 if (c->fail || c->error) {
1833 (*status)->data_usage =
1834 (*status)->metadata_usage =
1835 (*status)->dirty_usage = DM_PERCENT_INVALID;
1836 } else {
1837 (*status)->data_usage = dm_make_percent(c->used_blocks,
1838 c->total_blocks);
1839 (*status)->metadata_usage = dm_make_percent(c->metadata_used_blocks,
1840 c->metadata_total_blocks);
05dc70e2 1841 (*status)->dirty_usage = (c->used_blocks) ?
454b891f
ZK
1842 dm_make_percent(c->dirty_blocks,
1843 c->used_blocks) : DM_PERCENT_0;
5c415afd 1844 }
75b8ea19
JB
1845 r = 1;
1846out:
1847 dm_task_destroy(dmt);
1848
1849 return r;
1850}
1851
bdba904d 1852int dev_manager_thin_pool_status(struct dev_manager *dm,
4de6f580 1853 const struct logical_volume *lv, int flush,
e5a60086 1854 struct lv_status_thin_pool **status, int *exists)
bdba904d 1855{
4de6f580 1856 struct dm_status_thin_pool *dm_status;
bdba904d
ZK
1857 const char *dlid;
1858 struct dm_task *dmt;
1859 struct dm_info info;
1860 uint64_t start, length;
1861 char *type = NULL;
1862 char *params = NULL;
1863 int r = 0;
1864
e5a60086
ZK
1865 *exists = -1;
1866 if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_thin_pool))))
1867 return_0;
1868
bdba904d 1869 /* Build dlid for the thin pool layer */
6a0d97a6 1870 if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
bdba904d
ZK
1871 return_0;
1872
5163b8f6 1873 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
bdba904d
ZK
1874 return_0;
1875
e5a60086
ZK
1876 if (!(*exists = info.exists))
1877 goto out;
1878
1879 log_debug_activation("Checking thin pool status for LV %s.",
1880 display_lvname(lv));
bdba904d
ZK
1881
1882 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1883
4de6f580
ZK
1884 if (!type || strcmp(type, TARGET_NAME_THIN_POOL)) {
1885 log_error("Expected %s segment type but got %s instead.",
1886 TARGET_NAME_THIN_POOL, type ? type : "NULL");
1887 goto out;
1888 }
c2dc21d8 1889
4de6f580 1890 if (!dm_get_status_thin_pool(dm->mem, params, &dm_status))
bdba904d
ZK
1891 goto_out;
1892
4de6f580
ZK
1893 (*status)->mem = dm->mem;
1894 (*status)->thin_pool = dm_status;
1895
1896 if (dm_status->fail || dm_status->error) {
1897 (*status)->data_usage =
1898 (*status)->metadata_usage = DM_PERCENT_INVALID;
1899 } else {
1900 (*status)->data_usage = dm_make_percent(dm_status->used_data_blocks,
1901 dm_status->total_data_blocks);
1902 (*status)->metadata_usage = dm_make_percent(dm_status->used_metadata_blocks,
1903 dm_status->total_metadata_blocks);
1904 }
1905
bdba904d
ZK
1906 r = 1;
1907out:
1908 dm_task_destroy(dmt);
1909
1910 return r;
1911}
1912
4de6f580
ZK
1913int dev_manager_thin_status(struct dev_manager *dm,
1914 const struct logical_volume *lv, int flush,
e5a60086 1915 struct lv_status_thin **status, int *exists)
c0fcaacb 1916{
4de6f580 1917 struct dm_status_thin *dm_status;
c0fcaacb 1918 const char *dlid;
4de6f580
ZK
1919 struct dm_task *dmt;
1920 struct dm_info info;
1921 uint64_t start, length;
1922 char *type = NULL;
1923 char *params = NULL;
1924 uint64_t csize;
1925 int r = 0;
c0fcaacb 1926
e5a60086
ZK
1927 *exists = -1;
1928 if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_thin))))
1929 return_0;
1930
4de6f580 1931 if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
c0fcaacb
ZK
1932 return_0;
1933
4de6f580 1934 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
c0fcaacb
ZK
1935 return_0;
1936
e5a60086
ZK
1937 if (!(*exists = info.exists))
1938 goto out;
1939
1940 log_debug_activation("Checking thin status for LV %s.",
1941 display_lvname(lv));
c0fcaacb 1942
4de6f580 1943 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
c0fcaacb 1944
4de6f580
ZK
1945 if (!type || strcmp(type, TARGET_NAME_THIN)) {
1946 log_error("Expected %s segment type but got %s instead.",
1947 TARGET_NAME_THIN, type ? type : "NULL");
1948 goto out;
1949 }
76ee0899 1950
4de6f580
ZK
1951 if (!dm_get_status_thin(dm->mem, params, &dm_status))
1952 goto_out;
76ee0899 1953
4de6f580
ZK
1954 (*status)->mem = dm->mem;
1955 (*status)->thin = dm_status;
1956
1957 if (dm_status->fail)
1958 (*status)->usage = DM_PERCENT_INVALID;
1959 else {
1960 /* Pool allocates whole chunk so round-up to nearest one */
1961 csize = first_seg(first_seg(lv)->pool_lv)->chunk_size;
1962 csize = ((lv->size + csize - 1) / csize) * csize;
1963 if (dm_status->mapped_sectors > csize) {
1964 log_warn("WARNING: LV %s maps %s while the size is only %s.",
1965 display_lvname(lv),
1966 display_size(dm->cmd, dm_status->mapped_sectors),
1967 display_size(dm->cmd, csize));
1968 /* Don't show nonsense numbers like i.e. 1000% full */
1969 dm_status->mapped_sectors = csize;
1970 }
1971 (*status)->usage = dm_make_percent(dm_status->mapped_sectors, csize);
1972 }
81ef4eb4 1973
4de6f580
ZK
1974 r = 1;
1975out:
1976 dm_task_destroy(dmt);
76ee0899 1977
4de6f580 1978 return r;
76ee0899
ZK
1979}
1980
9968be55
ZK
1981/*
1982 * Explore state of running DM table to obtain currently used deviceId
1983 */
572983d7
ZK
1984int dev_manager_thin_device_id(struct dev_manager *dm,
1985 const struct logical_volume *lv,
e5a60086 1986 uint32_t *device_id, int *exists)
572983d7
ZK
1987{
1988 const char *dlid;
1989 struct dm_task *dmt;
1990 struct dm_info info;
1991 uint64_t start, length;
1992 char *params, *target_type = NULL;
9968be55 1993 const char *layer = lv_layer(lv);
572983d7
ZK
1994 int r = 0;
1995
e5a60086 1996 *exists = -1;
9968be55
ZK
1997 if (lv_is_merging_origin(lv) && !lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
1998 /* If the merge has already happened, that table
1999 * can already be using correct LV without -real layer */
2000 layer = NULL;
2001
572983d7 2002 /* Build dlid for the thin layer */
9968be55 2003 if (!(dlid = build_dm_uuid(dm->mem, lv, layer)))
572983d7
ZK
2004 return_0;
2005
5163b8f6 2006 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 1, 0)))
572983d7
ZK
2007 return_0;
2008
e5a60086
ZK
2009 if (!(*exists = info.exists))
2010 goto out;
2011
2012 log_debug_activation("Checking device id for LV %s.",
2013 display_lvname(lv));
572983d7
ZK
2014
2015 if (dm_get_next_target(dmt, NULL, &start, &length,
2016 &target_type, &params)) {
922fccc6
ZK
2017 log_error("More then one table line found for %s.",
2018 display_lvname(lv));
572983d7
ZK
2019 goto out;
2020 }
2021
1216efdf 2022 if (!target_type || strcmp(target_type, TARGET_NAME_THIN)) {
922fccc6
ZK
2023 log_error("Unexpected target type %s found for thin %s.",
2024 target_type, display_lvname(lv));
572983d7
ZK
2025 goto out;
2026 }
2027
164d7e72 2028 if (!params || sscanf(params, "%*u:%*u %u", device_id) != 1) {
922fccc6
ZK
2029 log_error("Cannot parse table like parameters %s for %s.",
2030 params, display_lvname(lv));
572983d7
ZK
2031 goto out;
2032 }
2033
2034 r = 1;
2035out:
2036 dm_task_destroy(dmt);
2037
2038 return r;
2039}
2040
4f708e87 2041int dev_manager_vdo_pool_status(struct dev_manager *dm,
e5a60086
ZK
2042 const struct logical_volume *lv, int flush,
2043 struct lv_status_vdo **status, int *exists)
4f708e87 2044{
4f708e87
ZK
2045 const char *dlid;
2046 struct dm_info info;
2047 uint64_t start, length;
2048 struct dm_task *dmt = NULL;
2049 char *type = NULL;
2050 char *params = NULL;
2051 int r = 0;
2052
e5a60086
ZK
2053 *exists = -1;
2054 if (!(*status = dm_pool_zalloc(dm->mem, sizeof(struct lv_status_vdo))))
2055 return_0;
4f708e87
ZK
2056
2057 if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
2058 return_0;
2059
2060 if (!(dmt = _setup_task_run(DM_DEVICE_STATUS, &info, NULL, dlid, 0, 0, 0, 0, flush, 0)))
2061 return_0;
2062
e5a60086
ZK
2063 if (!(*exists = info.exists))
2064 goto out;
2065
2066 log_debug_activation("Checking VDO pool status for LV %s.",
2067 display_lvname(lv));
4f708e87
ZK
2068
2069 if (dm_get_next_target(dmt, NULL, &start, &length, &type, &params)) {
2070 log_error("More then one table line found for %s.",
2071 display_lvname(lv));
2072 goto out;
2073 }
2074
2075 if (!type || strcmp(type, TARGET_NAME_VDO)) {
2076 log_error("Expected %s segment type but got %s instead.",
2077 TARGET_NAME_VDO, type ? type : "NULL");
2078 goto out;
2079 }
2080
dda85902 2081 if (!_vdo_pool_message_stats(dm->mem, lv, *status))
55937f9c
ZK
2082 stack;
2083
e6f735d4 2084 if (!parse_vdo_pool_status(dm->mem, lv, params, &info, *status))
e689bfb5 2085 goto_out;
4f708e87 2086
e5a60086 2087 (*status)->mem = dm->mem;
4f708e87
ZK
2088
2089 r = 1;
2090out:
2091 dm_task_destroy(dmt);
2092
2093 return r;
2094}
2095
1bed2caf
ZK
2096int dev_manager_vdo_pool_size_config(struct dev_manager *dm,
2097 const struct logical_volume *lv,
2098 struct vdo_pool_size_config *cfg)
2099{
2100 const char *dlid;
2101 struct dm_info info;
2102 uint64_t start, length;
2103 struct dm_task *dmt = NULL;
2104 char *type = NULL;
2105 char *params = NULL;
2106 int r = 0;
2107 unsigned version = 0;
2108
2109 memset(cfg, 0, sizeof(*cfg));
2110
2111 if (!(dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
2112 return_0;
2113
2114 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, &info, NULL, dlid, 0, 0, 0, 0, 0, 0)))
2115 return_0;
2116
2117 if (!info.exists)
2118 goto inactive; /* VDO device is not active, should not happen here... */
2119
2120 log_debug_activation("Checking VDO pool table line for LV %s.",
2121 display_lvname(lv));
2122
2123 if (dm_get_next_target(dmt, NULL, &start, &length, &type, &params)) {
2124 log_error("More then one table line found for %s.",
2125 display_lvname(lv));
2126 goto out;
2127 }
2128
2129 if (!type || strcmp(type, TARGET_NAME_VDO)) {
2130 log_error("Expected %s segment type but got %s instead.",
2131 TARGET_NAME_VDO, type ? type : "NULL");
2132 goto out;
2133 }
2134
2135 if (sscanf(params, "V%u %*s " FMTu64 " %*u " FMTu32,
2136 &version, &cfg->physical_size, &cfg->block_map_cache_size_mb) != 3) {
2137 log_error("Failed to parse VDO parameters %s for LV %s.",
2138 params, display_lvname(lv));
2139 goto out;
2140 }
2141
2142 switch (version) {
2143 case 2: break;
2144 case 4: break;
2145 default: log_warn("WARNING: Unknown VDO table line version %u.", version);
2146 }
2147
2148 cfg->virtual_size = length;
2149 cfg->physical_size *= 8; // From 4K unit to 512B
2150 cfg->block_map_cache_size_mb /= 256; // From 4K unit to MiB
2151 cfg->index_memory_size_mb = first_seg(lv)->vdo_params.index_memory_size_mb; // Preserved
2152
2153inactive:
2154 r = 1;
2155out:
2156 dm_task_destroy(dmt);
2157
2158 return r;
2159}
2160
572983d7 2161
5f4b2acf
AK
2162/*************************/
2163/* NEW CODE STARTS HERE */
2164/*************************/
2165
ab9663f3 2166static int _dev_manager_lv_mknodes(const struct logical_volume *lv)
5f4b2acf
AK
2167{
2168 char *name;
2169
e59e2f7c 2170 if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name,
922fccc6 2171 lv->name, NULL)))
5f4b2acf
AK
2172 return_0;
2173
2174 return fs_add_lv(lv, name);
a76ba817
AK
2175}
2176
ab9663f3 2177static int _dev_manager_lv_rmnodes(const struct logical_volume *lv)
a9953411 2178{
5f4b2acf
AK
2179 return fs_del_lv(lv);
2180}
2181
458918b3
ZK
2182static int _lv_has_mknode(const struct logical_volume *lv)
2183{
2184 return (lv_is_visible(lv) &&
2185 (!lv_is_thin_pool(lv) || lv_is_new_thin_pool(lv)));
2186}
2187
ab9663f3
MB
2188int dev_manager_mknodes(const struct logical_volume *lv)
2189{
2190 struct dm_info dminfo;
114f7e62 2191 struct dm_task *dmt;
3eadbbeb 2192 char *name;
ab9663f3
MB
2193 int r = 0;
2194
e59e2f7c 2195 if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name, lv->name, NULL)))
ab9663f3
MB
2196 return_0;
2197
114f7e62
ZK
2198 if (!(dmt = _setup_task_run(DM_DEVICE_MKNODES, &dminfo, name, NULL, 0, 0, 0, 0, 0, 0)))
2199 return_0;
2200
2201 if (dminfo.exists) {
eb3597ac
ZK
2202 /* read-only component LV is also made visible */
2203 if (_lv_has_mknode(lv) || (dminfo.read_only && lv_is_component(lv)))
114f7e62 2204 r = _dev_manager_lv_mknodes(lv);
e3cc3e55
PGTL
2205 else
2206 r = 1;
114f7e62
ZK
2207 } else
2208 r = _dev_manager_lv_rmnodes(lv);
2209
2210 dm_task_destroy(dmt);
ab9663f3 2211
ab9663f3
MB
2212 return r;
2213}
2214
44071331
PR
2215#ifdef UDEV_SYNC_SUPPORT
2216/*
2217 * Until the DM_UEVENT_GENERATED_FLAG was introduced in kernel patch
2218 * 856a6f1dbd8940e72755af145ebcd806408ecedd
2219 * some operations could not be performed by udev, requiring our fallback code.
2220 */
2221static int _dm_driver_has_stable_udev_support(void)
2222{
2223 char vsn[80];
2224 unsigned maj, min, patchlevel;
2225
2226 return driver_version(vsn, sizeof(vsn)) &&
2227 (sscanf(vsn, "%u.%u.%u", &maj, &min, &patchlevel) == 3) &&
2228 (maj == 4 ? min >= 18 : maj > 4);
2229}
2230
2231static int _check_udev_fallback(struct cmd_context *cmd)
2232{
2233 struct config_info *settings = &cmd->current_settings;
2234
2235 if (settings->udev_fallback != -1)
2236 goto out;
2237
2238 /*
2239 * Use udev fallback automatically in case udev
2240 * is disabled via DM_DISABLE_UDEV environment
2241 * variable or udev rules are switched off.
2242 */
2243 settings->udev_fallback = !settings->udev_rules ? 1 :
d6a91da4 2244 find_config_tree_bool(cmd, activation_verify_udev_operations_CFG, NULL);
44071331
PR
2245
2246 /* Do not rely fully on udev if the udev support is known to be incomplete. */
2247 if (!settings->udev_fallback && !_dm_driver_has_stable_udev_support()) {
2248 log_very_verbose("Kernel driver has incomplete udev support so "
2249 "LVM will check and perform some operations itself.");
2250 settings->udev_fallback = 1;
2251 }
2252out:
2253 return settings->udev_fallback;
2254}
2255
2256#else /* UDEV_SYNC_SUPPORT */
2257
2258static int _check_udev_fallback(struct cmd_context *cmd)
2259{
2260 /* We must use old node/symlink creation code if not compiled with udev support at all! */
2261 return cmd->current_settings.udev_fallback = 1;
2262}
2263
2264#endif /* UDEV_SYNC_SUPPORT */
2265
84cdf85b 2266static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_volume *lv,
eb3597ac
ZK
2267 const char *layer, int noscan, int temporary,
2268 int visible_component)
6ddb5ecd
PR
2269{
2270 uint16_t udev_flags = 0;
2271
418663b6
PR
2272 /*
2273 * Instruct also libdevmapper to disable udev
2274 * fallback in accordance to LVM2 settings.
2275 */
44071331 2276 if (!_check_udev_fallback(dm->cmd))
418663b6
PR
2277 udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
2278
6ddb5ecd
PR
2279 /*
2280 * Is this top-level and visible device?
2281 * If not, create just the /dev/mapper content.
2282 */
5cc2f9a2 2283 /* FIXME: add target's method for this */
eb3597ac 2284 if (lv_is_new_thin_pool(lv) || visible_component)
00a45ca4
ZK
2285 /* New thin-pool is regular LV with -tpool UUID suffix. */
2286 udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
2287 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
6612d8dd 2288 else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
6ddb5ecd
PR
2289 udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
2290 DM_UDEV_DISABLE_DISK_RULES_FLAG |
2291 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
2292 /*
2293 * There's no need for other udev rules to touch special LVs with
2294 * reserved names. We don't need to populate /dev/disk here either.
2295 * Even if they happen to be visible and top-level.
2296 */
2297 else if (is_reserved_lvname(lv->name))
2298 udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
2299 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
2300
2301 /*
2302 * Snapshots and origins could have the same rule applied that will
2303 * give symlinks exactly the same name (e.g. a name based on
2304 * filesystem UUID). We give preference to origins to make such
2305 * naming deterministic (e.g. symlinks in /dev/disk/by-uuid).
2306 */
2307 if (lv_is_cow(lv))
2308 udev_flags |= DM_UDEV_LOW_PRIORITY_FLAG;
2309
2310 /*
2311 * Finally, add flags to disable /dev/mapper and /dev/<vgname> content
2312 * to be created by udev if it is requested by user's configuration.
2313 * This is basically an explicit fallback to old node/symlink creation
2314 * without udev.
2315 */
2316 if (!dm->cmd->current_settings.udev_rules)
2317 udev_flags |= DM_UDEV_DISABLE_DM_RULES_FLAG |
2318 DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG;
2319
ce7489ed 2320 /*
039bdad7 2321 * LVM subsystem specific flags.
ce7489ed 2322 */
039bdad7
PR
2323 if (noscan)
2324 udev_flags |= DM_SUBSYSTEM_UDEV_FLAG0;
2325
2326 if (temporary)
2327 udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
2328 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
ce7489ed 2329
6ddb5ecd
PR
2330 return udev_flags;
2331}
2332
2a6981a6
ZK
2333static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
2334 const struct logical_volume *lv, int origin_only);
0451225c
ZK
2335static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
2336 const struct logical_volume *lv,
2337 struct lv_activate_opts *laopts,
2338 const char *layer);
2339/*
2340 * Check for device holders (ATM used only for removed pvmove targets)
2341 * and add them into dtree structures.
2342 * When 'laopts != NULL' add them as new nodes - which also corrects READ_AHEAD.
39b7d1ba 2343 * Note: correct table are already explicitly PRELOADED.
0451225c 2344 */
8c6fd093 2345static int _check_holder(struct dev_manager *dm, struct dm_tree *dtree,
0451225c
ZK
2346 const struct logical_volume *lv,
2347 struct lv_activate_opts *laopts,
2348 uint32_t major, const char *d_name)
2a6981a6
ZK
2349{
2350 const char *default_uuid_prefix = dm_uuid_prefix();
2351 const size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
8c6fd093
ZK
2352 const char *name;
2353 const char *uuid;
2354 struct dm_info info;
2355 struct dm_task *dmt;
2a6981a6 2356 struct logical_volume *lv_det;
8c6fd093
ZK
2357 union lvid id;
2358 int dev, r = 0;
2a6981a6 2359
8c6fd093
ZK
2360 errno = 0;
2361 dev = strtoll(d_name + 3, NULL, 10);
2362 if (errno) {
2363 log_error("Failed to parse dm device minor number from %s.", d_name);
2a6981a6
ZK
2364 return 0;
2365 }
2366
8c6fd093
ZK
2367 if (!(dmt = _setup_task_run(DM_DEVICE_INFO, &info, NULL, NULL, NULL,
2368 major, dev, 0, 0, 0)))
2369 return_0;
2a6981a6 2370
8c6fd093
ZK
2371 if (info.exists) {
2372 uuid = dm_task_get_uuid(dmt);
2373 name = dm_task_get_name(dmt);
2a6981a6 2374
8c6fd093
ZK
2375 log_debug_activation("Checking holder of %s %s (" FMTu32 ":" FMTu32 ") %s.",
2376 display_lvname(lv), uuid, info.major, info.minor,
2377 name);
2a6981a6 2378
8c6fd093 2379 /* Skip common uuid prefix */
2a6981a6
ZK
2380 if (!strncmp(default_uuid_prefix, uuid, default_uuid_prefix_len))
2381 uuid += default_uuid_prefix_len;
2382
96b77716 2383 if (!memcmp(uuid, &lv->vg->id, ID_LEN) &&
2a6981a6 2384 !dm_tree_find_node_by_uuid(dtree, uuid)) {
c1703845 2385 /* trims any UUID suffix (i.e. -cow) */
995ff589 2386 dm_strncpy((char*)&id, uuid, 2 * sizeof(struct id) + 1);
8c6fd093 2387
2a6981a6 2388 /* If UUID is not yet in dtree, look for matching LV */
8c6fd093
ZK
2389 if (!(lv_det = find_lv_in_vg_by_lvid(lv->vg, &id))) {
2390 log_error("Cannot find holder with device name %s in VG %s.",
2391 name, lv->vg->name);
2a6981a6
ZK
2392 goto out;
2393 }
2394
2395 if (lv_is_cow(lv_det))
2396 lv_det = origin_from_cow(lv_det);
2397 log_debug_activation("Found holder %s of %s.",
2398 display_lvname(lv_det),
2399 display_lvname(lv));
0451225c
ZK
2400 if (!laopts) {
2401 if (!_add_lv_to_dtree(dm, dtree, lv_det, 0))
2402 goto_out;
2403 } else if (!_add_new_lv_to_dtree(dm, dtree, lv_det, laopts, 0))
2404 goto_out;
2a6981a6
ZK
2405 }
2406 }
2407
8c6fd093
ZK
2408 r = 1;
2409out:
2410 dm_task_destroy(dmt);
2411
2412 return r;
2413}
2414
2415/*
2416 * Add exiting devices which holds given LV device open.
2417 * This is used in case when metadata already do not contain information
2418 * i.e. PVMOVE is being finished and final table is going to be resumed.
2419 */
2420static int _add_holders_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
0451225c
ZK
2421 const struct logical_volume *lv,
2422 struct lv_activate_opts *laopts,
2423 const struct dm_info *info)
8c6fd093
ZK
2424{
2425 const char *sysfs_dir = dm_sysfs_dir();
2426 char sysfs_path[PATH_MAX];
2427 struct dirent *dirent;
2428 DIR *d;
2429 int r = 0;
2430
2431 /* Sysfs path of holders */
2432 if (dm_snprintf(sysfs_path, sizeof(sysfs_path), "%sblock/dm-" FMTu32
2433 "/holders", sysfs_dir, info->minor) < 0) {
2434 log_error("sysfs_path dm_snprintf failed.");
2435 return 0;
2436 }
2437
2438 if (!(d = opendir(sysfs_path))) {
2439 log_sys_error("opendir", sysfs_path);
2440 return 0;
2441 }
2442
2443 while ((dirent = readdir(d)))
2444 /* Expects minor is added to 'dm-' prefix */
2445 if (!strncmp(dirent->d_name, "dm-", 3) &&
0451225c 2446 !_check_holder(dm, dtree, lv, laopts, info->major, dirent->d_name))
8c6fd093
ZK
2447 goto_out;
2448
2a6981a6
ZK
2449 r = 1;
2450out:
2451 if (closedir(d))
8c6fd093 2452 log_sys_debug("closedir", "holders");
2a6981a6
ZK
2453
2454 return r;
2455}
2456
ad6b0ebb 2457static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
84cdf85b 2458 const struct logical_volume *lv, const char *layer)
5f4b2acf
AK
2459{
2460 char *dlid, *name;
0c8bdaf3 2461 struct dm_info info, info2;
e30bc9b1 2462 const struct dm_active_device *dm_dev;
a9953411 2463
e59e2f7c 2464 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
5f4b2acf 2465 return_0;
a9953411 2466
b2885b71 2467 if (!(dlid = build_dm_uuid(dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem, lv, layer)))
5f4b2acf
AK
2468 return_0;
2469
f8aa073a 2470 if (dm_devs_cache_use()) {
ad1d6887 2471 if (!(dm_dev = dm_devs_cache_get_by_uuid(dm->cmd, dlid))) {
d4a0816a
ZK
2472 log_debug("Cached as not present %s.", name);
2473 return 1;
2474 }
2475 info = (struct dm_info) {
2476 .exists = 1,
e30bc9b1
ZK
2477 .major = MAJOR(dm_dev->devno),
2478 .minor = MINOR(dm_dev->devno),
d4a0816a
ZK
2479 };
2480 log_debug("Cached as present %s %s (%d:%d).",
2481 name, dlid, info.major, info.minor);
2482 } else if (!_info(dm->cmd, name, dlid, 0, 0, 0, &info, NULL, NULL))
034931f6 2483 return_0;
0c8bdaf3
MB
2484 /*
2485 * For top level volumes verify that existing device match
2486 * requested major/minor and that major/minor pair is available for use
2487 */
2488 if (!layer && lv->major != -1 && lv->minor != -1) {
0f817d38
AK
2489 /*
2490 * FIXME compare info.major with lv->major if multiple major support
2491 */
1be74cfd 2492 if (info.exists && ((int) info.minor != lv->minor)) {
0c8bdaf3
MB
2493 log_error("Volume %s (%" PRIu32 ":%" PRIu32")"
2494 " differs from already active device "
81ef4eb4 2495 "(%" PRIu32 ":%" PRIu32").",
922fccc6
ZK
2496 display_lvname(lv), lv->major, lv->minor,
2497 info.major, info.minor);
0c8bdaf3
MB
2498 return 0;
2499 }
2500 if (!info.exists && _info_by_dev(lv->major, lv->minor, &info2) &&
2501 info2.exists) {
2502 log_error("The requested major:minor pair "
81ef4eb4 2503 "(%" PRIu32 ":%" PRIu32") is already used.",
0c8bdaf3
MB
2504 lv->major, lv->minor);
2505 return 0;
2506 }
2507 }
2508
6ddb5ecd 2509 if (info.exists && !dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
eb3597ac
ZK
2510 _get_udev_flags(dm, lv, layer,
2511 0, 0, 0))) {
81ef4eb4 2512 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.",
5f4b2acf
AK
2513 info.major, info.minor);
2514 return 0;
a9953411
AK
2515 }
2516
fba86dd4 2517 if (info.exists && dm->track_pending_delete) {
9718fd24
ZK
2518 log_debug_activation("Tracking pending delete for %s%s%s (%s).",
2519 display_lvname(lv), layer ? "-" : "", layer ? : "", dlid);
b2885b71 2520 if (!str_list_add(dm->cmd->pending_delete_mem, &dm->cmd->pending_delete, dlid))
fba86dd4
ZK
2521 return_0;
2522 }
2523
2a6981a6
ZK
2524 /*
2525 * Find holders of existing active LV where name starts with 'pvmove',
2526 * but it's not anymore PVMOVE LV and also it's not PVMOVE _mimage
2527 */
2528 if (info.exists && !lv_is_pvmove(lv) &&
2529 !strchr(lv->name, '_') && !strncmp(lv->name, "pvmove", 6))
0451225c 2530 if (!_add_holders_to_dtree(dm, dtree, lv, NULL, &info))
2a6981a6
ZK
2531 return_0;
2532
5f4b2acf
AK
2533 return 1;
2534}
2535
c0c1ada8 2536struct pool_cb_data {
6c7a6c07 2537 struct dev_manager *dm;
c0c1ada8
ZK
2538 const struct logical_volume *pool_lv;
2539
2540 int skip_zero; /* to skip zeroed device header (check first 64B) */
2541 int exec; /* which binary to call */
2542 int opts;
689af323
ZK
2543 struct {
2544 unsigned maj;
2545 unsigned min;
2546 unsigned patch;
2547 } version;
c0c1ada8 2548 const char *global;
6c7a6c07
ZK
2549};
2550
689af323
ZK
2551/*
2552 * Simple version of check function calling 'tool -V'
2553 *
2554 * Returns 1 if the tool's version is equal or better to given.
2555 * Otherwise it returns 0.
2556 */
2557static int _check_tool_version(struct cmd_context *cmd, const char *tool,
2558 unsigned maj, unsigned min, unsigned patch)
2559{
2560 const char *argv[] = { tool, "-V", NULL };
2561 struct pipe_data pdata;
2562 FILE *f;
2563 char buf[128] = { 0 };
2564 char *nl;
2565 unsigned v_maj, v_min, v_patch;
2566 int ret = 0;
2567
2568 if (!(f = pipe_open(cmd, argv, 0, &pdata))) {
2569 log_warn("WARNING: Cannot read output from %s.", argv[0]);
2570 } else {
2571 if (fgets(buf, sizeof(buf) - 1, f) &&
2572 (sscanf(buf, "%u.%u.%u", &v_maj, &v_min, &v_patch) == 3)) {
2573 if ((v_maj > maj) ||
2574 ((v_maj == maj) &&
2575 ((v_min > min) ||
2576 (v_min == min && v_patch >= patch))))
2577 ret = 1;
2578
2579 if ((nl = strchr(buf, '\n')))
2580 nl[0] = 0; /* cut newline away */
2581
2582 log_verbose("Found version of %s %s is %s then requested %u.%u.%u.",
2583 argv[0], buf, ret ? "better" : "older", maj, min, patch);
2584 } else
2585 log_warn("WARNING: Cannot parse output '%s' from %s.", buf, argv[0]);
2586
2587 (void) pipe_close(&pdata);
2588 }
2589
2590 return ret;
2591}
2592
c0c1ada8
ZK
2593static int _pool_callback(struct dm_tree_node *node,
2594 dm_node_callback_t type, void *cb_data)
6c7a6c07 2595{
0c9e3e8d 2596 int ret, status = 0, fd;
c0c1ada8
ZK
2597 const struct pool_cb_data *data = cb_data;
2598 const struct logical_volume *pool_lv = data->pool_lv;
2599 const struct logical_volume *mlv = first_seg(pool_lv)->metadata_lv;
9fe7aba2 2600 struct cmd_context *cmd = pool_lv->vg->cmd;
c0c1ada8 2601 long buf[64 / sizeof(long)]; /* buffer for short disk header (64B) */
e8669311 2602 int args = 0;
f173274f 2603 char *mpath;
4d231165 2604 const char *argv[DEFAULT_MAX_EXEC_ARGS + 7] = { /* Max supported 15 args */
9fe7aba2 2605 find_config_tree_str_allow_empty(cmd, data->exec, NULL)
71485ebf
ZK
2606 };
2607
4d231165 2608 if (!argv[0] || !*argv[0]) /* *_check tool is unconfigured/disabled with "" setting */
71485ebf 2609 return 1;
6c7a6c07 2610
9fe7aba2
DT
2611 if (lv_is_cache_vol(pool_lv)) {
2612 if (!(mpath = lv_dmpath_suffix_dup(data->dm->mem, pool_lv, "-cmeta"))) {
2613 log_error("Failed to build device path for checking cachevol metadata %s.",
2614 display_lvname(pool_lv));
2615 return 0;
2616 }
2617 } else {
2618 if (!(mpath = lv_dmpath_dup(data->dm->mem, mlv))) {
2619 log_error("Failed to build device path for checking pool metadata %s.",
2620 display_lvname(mlv));
2621 return 0;
2622 }
f173274f 2623 }
04fbffb1 2624
f8aa073a 2625 dm_devs_cache_destroy();
04fbffb1 2626
9fe7aba2 2627 log_debug("Running check command on %s", mpath);
f173274f
ZK
2628
2629 if (data->skip_zero) {
2630 if ((fd = open(mpath, O_RDONLY)) < 0) {
2631 log_sys_error("open", mpath);
2632 return 0;
2633 }
2634 /* let's assume there is no problem to read 64 bytes */
2635 if (read(fd, buf, sizeof(buf)) < (int)sizeof(buf)) {
2636 log_sys_error("read", mpath);
2637 if (close(fd))
2638 log_sys_error("close", mpath);
2639 return 0;
2640 }
2641 for (ret = 0; ret < (int) DM_ARRAY_SIZE(buf); ++ret)
2642 if (buf[ret])
2643 break;
2644
2645 if (close(fd))
2646 log_sys_error("close", mpath);
2647
2648 if (ret == (int) DM_ARRAY_SIZE(buf)) {
2649 log_debug_activation("Metadata checking skipped, detected empty disk header on %s.",
2650 mpath);
2651 return 1;
2652 }
2653 }
2654
4d231165
ZK
2655 if (!prepare_exec_args(cmd, argv, &args, data->opts))
2656 return_0;
e8669311 2657
f173274f 2658 argv[++args] = mpath;
c0c1ada8 2659
9fe7aba2 2660 if (!(ret = exec_cmd(cmd, (const char * const *)argv,
6c7a6c07 2661 &status, 0))) {
689af323 2662 if (status == ENOENT) {
4e0c0417 2663 log_warn("WARNING: Check is skipped, please install recommended missing binary %s!",
689af323
ZK
2664 argv[0]);
2665 return 1;
2666 }
2667
2668 if ((data->version.maj || data->version.min || data->version.patch) &&
9fe7aba2 2669 !_check_tool_version(cmd, argv[0],
689af323
ZK
2670 data->version.maj, data->version.min, data->version.patch)) {
2671 log_warn("WARNING: Check is skipped, please upgrade installed version of %s!",
2672 argv[0]);
2673 return 1;
2674 }
975b5b42
ZK
2675 switch (type) {
2676 case DM_NODE_CALLBACK_PRELOADED:
c0c1ada8
ZK
2677 log_err_once("Check of pool %s failed (status:%d). "
2678 "Manual repair required!",
2679 display_lvname(pool_lv), status);
975b5b42
ZK
2680 break;
2681 default:
c0c1ada8
ZK
2682 log_warn("WARNING: Integrity check of metadata for pool "
2683 "%s failed.", display_lvname(pool_lv));
975b5b42 2684 }
6c7a6c07
ZK
2685 /*
2686 * FIXME: What should we do here??
2687 *
2688 * Maybe mark the node, so it's not activating
c0c1ada8 2689 * as pool but as error/linear and let the
6c7a6c07
ZK
2690 * dm tree resolve the issue.
2691 */
2692 }
2693
6c7a6c07
ZK
2694 return ret;
2695}
2696
c0c1ada8
ZK
2697static int _pool_register_callback(struct dev_manager *dm,
2698 struct dm_tree_node *node,
2699 const struct logical_volume *lv)
6c7a6c07 2700{
c0c1ada8 2701 struct pool_cb_data *data;
6c7a6c07 2702
ee627884
ZK
2703 /* Do not skip metadata of testing even for unused thin pools */
2704#if 0
c0c1ada8
ZK
2705 /* Skip metadata testing for unused thin pool. */
2706 if (lv_is_thin_pool(lv) &&
2707 (!first_seg(lv)->transaction_id ||
2708 ((first_seg(lv)->transaction_id == 1) &&
2709 pool_has_message(first_seg(lv), NULL, 0))))
6c7a6c07 2710 return 1;
ee627884 2711#endif
d6df31fb
ZK
2712 /* Skip validation of metadata for lvremove and vgremove */
2713 if (!dm->activation &&
2714 (!strcmp(dm->cmd->name, "lvremove") ||
2715 !strcmp(dm->cmd->name, "vgremove"))) {
2716 log_debug("Skipping %s callback registration for command %s.",
2717 display_lvname(lv), dm->cmd->name);
2718 return 1;
2719 }
6c7a6c07 2720
c0c1ada8 2721 if (!(data = dm_pool_zalloc(dm->mem, sizeof(*data)))) {
6c7a6c07
ZK
2722 log_error("Failed to allocated path for callback.");
2723 return 0;
2724 }
2725
2726 data->dm = dm;
6c7a6c07 2727
c0c1ada8
ZK
2728 if (lv_is_thin_pool(lv)) {
2729 data->pool_lv = lv;
2730 data->skip_zero = 1;
2731 data->exec = global_thin_check_executable_CFG;
2732 data->opts = global_thin_check_options_CFG;
c0c1ada8
ZK
2733 data->global = "thin";
2734 } else if (lv_is_cache(lv)) { /* cache pool */
2735 data->pool_lv = first_seg(lv)->pool_lv;
293aabe4 2736 data->skip_zero = 1; /* cheap read-error detection */
c0c1ada8
ZK
2737 data->exec = global_cache_check_executable_CFG;
2738 data->opts = global_cache_check_options_CFG;
c0c1ada8 2739 data->global = "cache";
689af323
ZK
2740 if (first_seg(first_seg(lv)->pool_lv)->cache_metadata_format > 1) {
2741 data->version.maj = 0;
2742 data->version.min = 7;
2743 }
c0c1ada8
ZK
2744 } else {
2745 log_error(INTERNAL_ERROR "Registering unsupported pool callback.");
2746 return 0;
2747 }
2748
2749 dm_tree_node_set_callback(node, _pool_callback, data);
6c7a6c07
ZK
2750
2751 return 1;
2752}
2753
50b188ee
ZK
2754static struct id _get_id_for_meta_or_data(const struct lv_segment *lvseg, int meta_or_data)
2755{
2756 /* When ID is provided in form of metadata_id or data_id, otherwise use CVOL ID */
2757 if (meta_or_data && lvseg->metadata_id)
2758 return *lvseg->metadata_id;
2759
2760 if (!meta_or_data && lvseg->data_id)
2761 return *lvseg->data_id;
2762
2763 return lvseg->pool_lv->lvid.id[1];
2764}
2765
2825ad9d
ZK
2766/* Add special devices _cmeta & _cdata on top of CacheVol to dm tree */
2767static int _add_cvol_subdev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
2768 const struct logical_volume *lv, int meta_or_data)
2769{
2770 const char *layer = meta_or_data ? "cmeta" : "cdata";
2771 struct dm_pool *mem = dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem;
2825ad9d 2772 struct lv_segment *lvseg = first_seg(lv);
50b188ee 2773 const struct logical_volume *pool_lv = lvseg->pool_lv;
2825ad9d
ZK
2774 struct dm_info info;
2775 char *name ,*dlid;
50b188ee 2776 union lvid lvid = { { lv->vg->id, _get_id_for_meta_or_data(lvseg, meta_or_data) } };
2825ad9d
ZK
2777
2778 if (!(dlid = dm_build_dm_uuid(mem, UUID_PREFIX, (const char *)&lvid.s, layer)))
2779 return_0;
2780
2781 /* Name is actually not really needed here, but aids debugging... */
2782 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, layer)))
2783 return_0;
2784
622884d6 2785 if (!_info(dm->cmd, name, dlid, 0, 0, 0, &info, NULL, NULL))
2825ad9d
ZK
2786 return_0;
2787
2788 if (info.exists) {
2789 if (!dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
2790 _get_udev_flags(dm, lv, layer, 0, 0, 0))) {
2791 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.", info.major, info.minor);
2792 return 0;
2793 }
2794 if (dm->track_pending_delete) {
9718fd24
ZK
2795 log_debug_activation("Tracking pending delete for %s-%s (%s).",
2796 display_lvname(pool_lv), layer, dlid);
2825ad9d
ZK
2797 if (!str_list_add(mem, &dm->cmd->pending_delete, dlid))
2798 return_0;
2799 }
2800 }
2801
2802 return 1;
2803}
2804
a900d150
ZK
2805/* Declaration to resolve suspend tree and message passing for thin-pool */
2806static int _add_target_to_dtree(struct dev_manager *dm,
2807 struct dm_tree_node *dnode,
2808 struct lv_segment *seg,
2809 struct lv_activate_opts *laopts);
5f4b2acf
AK
2810/*
2811 * Add LV and any known dependencies
2812 */
ff58e019 2813static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
84cdf85b 2814 const struct logical_volume *lv, int origin_only)
5f4b2acf 2815{
ff58e019 2816 uint32_t s;
df390f17 2817 struct seg_list *sl;
3679bb1c 2818 struct dm_list *snh;
dd4fdce1 2819 struct lv_segment *seg;
c0c1ada8 2820 struct dm_tree_node *node;
91974437 2821 const struct logical_volume *plv;
df390f17 2822
7e794b77
ZK
2823 if (lv_is_pvmove(lv) && (dm->track_pvmove_deps == 2))
2824 return 1; /* Avoid rechecking of already seen pvmove LV */
2825
a018c57f 2826 if (lv_is_cache_pool(lv)) {
29bd3ccc 2827 if (!dm_list_empty(&lv->segs_using_this_lv)) {
29bd3ccc
ZK
2828 if (!_add_lv_to_dtree(dm, dtree, seg_lv(first_seg(lv), 0), 0))
2829 return_0;
2830 if (!_add_lv_to_dtree(dm, dtree, first_seg(lv)->metadata_lv, 0))
2831 return_0;
8121074f
ZK
2832 /* Cache pool does not have a real device node */
2833 return 1;
2834 }
2835 /* Unused cache pool is activated as metadata */
a018c57f
ZK
2836 }
2837
9f433e6e 2838 if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, NULL))
5f4b2acf
AK
2839 return_0;
2840
2841 /* FIXME Can we avoid doing this every time? */
87331dc4 2842 /* Reused also for lv_is_external_origin(lv) */
ad6b0ebb 2843 if (!_add_dev_to_dtree(dm, dtree, lv, "real"))
5f4b2acf
AK
2844 return_0;
2845
2d6fcbf6 2846 if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, "cow"))
5f4b2acf 2847 return_0;
a9953411 2848
3679bb1c 2849 if (origin_only && lv_is_thin_volume(lv)) {
97d36d57 2850 if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
3679bb1c
ZK
2851 return_0;
2852#if 0
2853 /* ? Use origin_only to avoid 'deep' thin pool suspend ? */
55d90b64 2854 /* FIXME Implement dm_tree_node_skip_childrens optimisation */
6a0d97a6 2855 if (!(uuid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
3679bb1c 2856 return_0;
c0c1ada8
ZK
2857 if ((node = dm_tree_find_node_by_uuid(dtree, uuid)))
2858 dm_tree_node_skip_childrens(node, 1);
3679bb1c
ZK
2859#endif
2860 }
2861
457bd139 2862 if (dm->activation && lv_is_external_origin(lv)) {
9a060948 2863 /* Find possible users of external origin lv */
9a060948 2864 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
457bd139 2865 if (!_add_dev_to_dtree(dm, dtree, sl->seg->lv, lv_layer(sl->seg->lv)))
9a060948 2866 return_0;
9a060948
ZK
2867 }
2868
3679bb1c 2869 if (lv_is_thin_pool(lv)) {
9a060948
ZK
2870 /*
2871 * For both origin_only and !origin_only
2872 * skips test for -tpool-real and tpool-cow
2873 */
3679bb1c
ZK
2874 if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
2875 return_0;
a900d150
ZK
2876
2877 /*
2878 * TODO: change API and move this code
2879 * Could be easier to handle this in _add_dev_to_dtree()
2880 * and base this according to info.exists ?
2881 */
9a060948 2882 if (!dm->activation) {
363e8888 2883 if ((node = _cached_dm_tree_node(dm->mem, dtree, lv, lv_layer(lv)))) {
a900d150
ZK
2884 if (origin_only) {
2885 struct lv_activate_opts laopts = {
2886 .origin_only = 1,
2887 .send_messages = 1 /* Node with messages */
2888 };
2889 /*
39b7d1ba 2890 * Add some messages if right node exist in the table only
a900d150
ZK
2891 * when building SUSPEND tree for origin-only thin-pool.
2892 *
2893 * TODO: Fix call of '_add_target_to_dtree()' to add message
2894 * to thin-pool node as we already know the pool node exists
2895 * in the table. Any better/cleaner API way ?
2896 *
2897 * Probably some 'new' target method to add messages for any node?
2898 */
2899 if (dm->suspend &&
2900 !dm_list_empty(&(first_seg(lv)->thin_messages)) &&
2901 !_add_target_to_dtree(dm, node, first_seg(lv), &laopts))
2902 return_0;
2903 } else {
2904 /* Setup callback for non-activation partial tree */
2905 /* Activation gets own callback when needed */
2906 /* TODO: extend _cached_dm_info() to return dnode */
2907 if (!_pool_register_callback(dm, node, lv))
2908 return_0;
2909 }
2910 }
c0c1ada8
ZK
2911 }
2912 }
2913
6612d8dd
ZK
2914 if (lv_is_vdo_pool(lv)) {
2915 /*
2916 * For both origin_only and !origin_only
2917 * skips test for -vpool-real and vpool-cow
2918 */
2919 if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
2920 return_0;
2921 }
2922
f5e265a0 2923 if (lv_is_cache(lv)) {
fba86dd4 2924 if (!origin_only && !dm->activation && !dm->track_pending_delete) {
c0c1ada8
ZK
2925 /* Setup callback for non-activation partial tree */
2926 /* Activation gets own callback when needed */
363e8888 2927 if ((node = _cached_dm_tree_node(dm->mem, dtree, lv, lv_layer(lv))) &&
c0c1ada8 2928 !_pool_register_callback(dm, node, lv))
9a060948
ZK
2929 return_0;
2930 }
3679bb1c 2931 }
2f1489a6 2932
ad582da3
ZK
2933 if (lv_is_cache_vol(lv))
2934 /* Cachevol with cache LV spans some extra layers -cdata, -cmeta */
2935 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
2936 if (lv_is_cache(sl->seg->lv) &&
2937 (!_add_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, 0) ||
2938 !_add_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, 1) ||
2939 !_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv))))
2940 return_0;
2941
3679bb1c 2942 /* Add any snapshots of this LV */
9a060948 2943 if (!origin_only && lv_is_origin(lv))
3679bb1c
ZK
2944 dm_list_iterate(snh, &lv->snapshot_segs)
2945 if (!_add_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow, 0))
ff58e019 2946 return_0;
9c083d34 2947
664a6955
ZK
2948 if (dm->activation && !origin_only && lv_is_merging_origin(lv) &&
2949 !_add_lv_to_dtree(dm, dtree, find_snapshot(lv)->lv, 1))
2950 return_0;
ff58e019 2951
df390f17 2952 /* Add any LVs referencing a PVMOVE LV unless told not to. */
7e794b77
ZK
2953 if ((dm->track_pvmove_deps == 1) && lv_is_pvmove(lv)) {
2954 dm->track_pvmove_deps = 2; /* Mark as already seen */
91974437
ZK
2955 dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
2956 /* If LV is snapshot COW - whole snapshot needs reload */
2957 plv = lv_is_cow(sl->seg->lv) ? origin_from_cow(sl->seg->lv) : sl->seg->lv;
2958 if (!_add_lv_to_dtree(dm, dtree, plv, 0))
df390f17 2959 return_0;
91974437 2960 }
c0f98794
AK
2961 dm->track_pvmove_deps = 1;
2962 }
df390f17 2963
fba86dd4
ZK
2964 if (!dm->track_pending_delete)
2965 dm_list_iterate_items(sl, &lv->segs_using_this_lv) {
2966 if (lv_is_pending_delete(sl->seg->lv)) {
2967 /* LV is referenced by 'cache pending delete LV */
2968 dm->track_pending_delete = 1;
b47b66eb
ZK
2969 if (!_cached_dm_tree_node(dm->mem, dtree, sl->seg->lv, lv_layer(sl->seg->lv)) &&
2970 !_add_lv_to_dtree(dm, dtree, sl->seg->lv, 0))
fba86dd4
ZK
2971 return_0;
2972 dm->track_pending_delete = 0;
2973 }
f5e265a0 2974 }
f5e265a0 2975
3679bb1c
ZK
2976 /* Add any LVs used by segments in this LV */
2977 dm_list_iterate_items(seg, &lv->segments) {
457bd139
ZK
2978 if (seg->external_lv &&
2979 !_add_lv_to_dtree(dm, dtree, seg->external_lv,
2980 /* For origin LV check for complete device tree */
2981 lv_is_origin(seg->external_lv) ? 0 : 1)) /* stack */
87331dc4 2982 return_0;
3679bb1c 2983 if (seg->log_lv &&
0443c42e 2984 !_add_lv_to_dtree(dm, dtree, seg->log_lv, 0))
a0c4e85c 2985 return_0;
3679bb1c 2986 if (seg->metadata_lv &&
0443c42e 2987 !_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
6c7a6c07 2988 return_0;
3ae55695
DT
2989 if (seg->writecache && seg_is_writecache(seg)) {
2990 if (!_add_lv_to_dtree(dm, dtree, seg->writecache, dm->activation ? origin_only : 1))
2991 return_0;
2992 }
d9e8895a
DT
2993 if (seg->integrity_meta_dev && seg_is_integrity(seg)) {
2994 if (!_add_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, dm->activation ? origin_only : 1))
2995 return_0;
2996 }
a018c57f 2997 if (seg->pool_lv &&
0285066e
ZK
2998 /* When activating and not origin_only detect linear 'overlay' over pool */
2999 !_add_lv_to_dtree(dm, dtree, seg->pool_lv, dm->activation ? origin_only : 1))
6c7a6c07 3000 return_0;
3679bb1c
ZK
3001
3002 for (s = 0; s < seg->area_count; s++) {
3679bb1c 3003 if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s) &&
fba86dd4
ZK
3004 /* origin only for cache without pending delete */
3005 (!dm->track_pending_delete || !lv_is_cache(lv)) &&
6612d8dd 3006 !_add_lv_to_dtree(dm, dtree, seg_lv(seg, s),
af33a008 3007 lv_is_vdo_pool(seg_lv(seg, s)) ? 1 : 0))
3679bb1c 3008 return_0;
b896f7de 3009 if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
af33a008 3010 !_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0))
3679bb1c
ZK
3011 return_0;
3012 }
664a6955
ZK
3013
3014 /* When activating, detect merging LV presence */
3015 if (dm->activation && seg->merge_lv &&
3016 !_add_lv_to_dtree(dm, dtree, seg->merge_lv, 1))
3017 return_0;
efc8ca10 3018 }
87663d5f 3019
a9953411
AK
3020 return 1;
3021}
3022
84cdf85b 3023static struct dm_tree *_create_partial_dtree(struct dev_manager *dm, const struct logical_volume *lv, int origin_only)
22456547 3024{
e88f56d9 3025 struct dm_tree *dtree;
22456547 3026
e88f56d9 3027 if (!(dtree = dm_tree_create())) {
922fccc6
ZK
3028 log_debug_activation("Partial dtree creation failed for %s.",
3029 display_lvname(lv));
5f4b2acf
AK
3030 return NULL;
3031 }
22456547 3032
eae0314b 3033 dm_tree_set_optional_uuid_suffixes(dtree, (const char**)_uuid_suffix_list);
7cff640d 3034
a900d150 3035 if (!_add_lv_to_dtree(dm, dtree, lv, (lv_is_origin(lv) || lv_is_thin_volume(lv) || lv_is_thin_pool(lv)) ? origin_only : 0))
c51b9fff 3036 goto_bad;
22456547 3037
5f4b2acf 3038 return dtree;
22456547 3039
c51b9fff 3040bad:
e88f56d9 3041 dm_tree_free(dtree);
5f4b2acf 3042 return NULL;
22456547
AK
3043}
3044
d8fc4d09
ZK
3045static char *_add_error_or_zero_device(struct dev_manager *dm, struct dm_tree *dtree,
3046 struct lv_segment *seg, int s, int use_zero)
8c5bcdab 3047{
067184f3 3048 char *dlid, *name;
8c5bcdab
AK
3049 char errid[32];
3050 struct dm_tree_node *node;
3051 struct lv_segment *seg_i;
067184f3 3052 struct dm_info info;
aaf92617 3053 int segno = -1, i = 0;
e2354ea3 3054 uint64_t size = (uint64_t) _seg_len(seg) * seg->lv->vg->extent_size;
8c5bcdab 3055
2c44337b 3056 dm_list_iterate_items(seg_i, &seg->lv->segments) {
d8fc4d09 3057 if (seg == seg_i) {
8c5bcdab 3058 segno = i;
d8fc4d09
ZK
3059 break;
3060 }
8c5bcdab
AK
3061 ++i;
3062 }
3063
3064 if (segno < 0) {
d8fc4d09 3065 log_error(INTERNAL_ERROR "_add_error_or_zero_device called with bad segment.");
3df790d9 3066 return NULL;
8c5bcdab
AK
3067 }
3068
3069 sprintf(errid, "missing_%d_%d", segno, s);
3070
6a0d97a6 3071 if (!(dlid = build_dm_uuid(dm->mem, seg->lv, errid)))
8c5bcdab
AK
3072 return_NULL;
3073
e59e2f7c 3074 if (!(name = dm_build_dm_name(dm->mem, seg->lv->vg->name,
922fccc6 3075 seg->lv->name, errid)))
8c5bcdab 3076 return_NULL;
8c5bcdab 3077
622884d6 3078 if (!_info(dm->cmd, name, dlid, 0, 0, 0, &info, NULL, NULL))
034931f6 3079 return_NULL;
067184f3
AK
3080
3081 if (!info.exists) {
3082 /* Create new node */
3083 if (!(node = dm_tree_add_new_dev(dtree, name, dlid, 0, 0, 0, 0, 0)))
3084 return_NULL;
d8fc4d09
ZK
3085
3086 if (use_zero) {
3087 if (!dm_tree_node_add_zero_target(node, size))
3088 return_NULL;
3089 } else
3090 if (!dm_tree_node_add_error_target(node, size))
3091 return_NULL;
067184f3
AK
3092 } else {
3093 /* Already exists */
3094 if (!dm_tree_add_dev(dtree, info.major, info.minor)) {
1bdcb01f 3095 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree.",
067184f3 3096 info.major, info.minor);
a7691cde 3097 return NULL;
067184f3
AK
3098 }
3099 }
3100
3101 return dlid;
8c5bcdab
AK
3102}
3103
3104static int _add_error_area(struct dev_manager *dm, struct dm_tree_node *node,
3105 struct lv_segment *seg, int s)
3106{
3107 char *dlid;
3108 uint64_t extent_size = seg->lv->vg->extent_size;
d8fc4d09 3109 int use_zero = !strcmp(dm->cmd->stripe_filler, TARGET_NAME_ZERO) ? 1 : 0;
8c5bcdab 3110
d8fc4d09 3111 if (!strcmp(dm->cmd->stripe_filler, TARGET_NAME_ERROR) || use_zero) {
8c5bcdab
AK
3112 /*
3113 * FIXME, the tree pointer is first field of dm_tree_node, but
3114 * we don't have the struct definition available.
3115 */
3116 struct dm_tree **tree = (struct dm_tree **) node;
d8fc4d09 3117 if (!(dlid = _add_error_or_zero_device(dm, *tree, seg, s, use_zero)))
8c5bcdab 3118 return_0;
86b15c7c
AK
3119 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
3120 return_0;
87ec9484 3121 } else
86b15c7c
AK
3122 if (!dm_tree_node_add_target_area(node, dm->cmd->stripe_filler, NULL, UINT64_C(0)))
3123 return_0;
87ec9484 3124
8c5bcdab
AK
3125 return 1;
3126}
3127
31cfcf7c
DT
3128static int _bad_pv_area(struct lv_segment *seg, uint32_t s)
3129{
3130 struct stat info;
3131 const char *name;
3132 struct device *dev;
3133
3134 if (!seg_pvseg(seg, s))
3135 return 1;
3136 if (!seg_pv(seg, s))
3137 return 1;
3138 if (!(dev = seg_dev(seg, s)))
3139 return 1;
3140 if (dm_list_empty(&dev->aliases))
3141 return 1;
3142 /* FIXME Avoid repeating identical stat in dm_tree_node_add_target_area */
3143 name = dev_name(dev);
3144 if (stat(name, &info) < 0)
3145 return 1;
3146 if (!S_ISBLK(info.st_mode))
3147 return 1;
3148 return 0;
3149}
3150
5f4b2acf 3151int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
72b2cb61
AK
3152 struct dm_tree_node *node, uint32_t start_area,
3153 uint32_t areas)
323a167b 3154{
31cfcf7c 3155 struct cmd_context *cmd = seg->lv->vg->cmd;
5f4b2acf
AK
3156 uint64_t extent_size = seg->lv->vg->extent_size;
3157 uint32_t s;
2ed2a724 3158 char *dlid;
f5f3defc 3159 const char *name;
ab852ffe
ZK
3160 unsigned num_error_areas = 0;
3161 unsigned num_existing_areas = 0;
323a167b 3162
5f4b2acf 3163 for (s = start_area; s < areas; s++) {
31cfcf7c
DT
3164 if (((seg_type(seg, s) == AREA_PV) && _bad_pv_area(seg, s)) ||
3165 ((seg_type(seg, s) == AREA_LV) && !seg_lv(seg, s))) {
3166 if (!cmd->partial_activation) {
fd6e113b
DT
3167 if (!cmd->degraded_activation ||
3168 (!lv_is_raid_type(seg->lv) &&
3169 !lv_is_integrity(seg->lv) &&
3170 !lv_is_integrity_metadata(seg->lv) &&
3171 !lv_is_integrity_origin(seg->lv))) {
31cfcf7c 3172 log_error("Aborting. LV %s is incomplete and --activationmode partial was not specified.",
922fccc6 3173 display_lvname(seg->lv));
be75076d
JB
3174 return 0;
3175 }
f5f3defc 3176 }
8c5bcdab
AK
3177 if (!_add_error_area(dm, node, seg, s))
3178 return_0;
ab852ffe 3179 num_error_areas++;
86b15c7c 3180 } else if (seg_type(seg, s) == AREA_PV) {
4eb04c8c
DT
3181 struct device *dev = seg_dev(seg, s);
3182 name = dm_list_empty(&dev->aliases) ? NULL : dev_name(dev);
3183
3184 if (!dm_tree_node_add_target_area(node, name, NULL,
86b15c7c
AK
3185 (seg_pv(seg, s)->pe_start + (extent_size * seg_pe(seg, s)))))
3186 return_0;
ab852ffe 3187 num_existing_areas++;
6d04311e
JEB
3188 } else if (seg_is_raid(seg)) {
3189 /*
3190 * RAID can handle unassigned areas. It simple puts
3191 * '- -' in for the metadata/data device pair. This
3192 * is a valid way to indicate to the RAID target that
3193 * the device is missing.
3194 *
3195 * If an image is marked as VISIBLE_LV and !LVM_WRITE,
3196 * it means the device has temporarily been extracted
3197 * from the array. It may come back at a future date,
3198 * so the bitmap must track differences. Again, '- -'
3199 * is used in the CTR table.
3200 */
3201 if ((seg_type(seg, s) == AREA_UNASSIGNED) ||
d2d3f0d7 3202 (lv_is_visible(seg_lv(seg, s)) &&
6d04311e
JEB
3203 !(seg_lv(seg, s)->status & LVM_WRITE))) {
3204 /* One each for metadata area and data area */
3205 if (!dm_tree_node_add_null_area(node, 0) ||
3206 !dm_tree_node_add_null_area(node, 0))
2100c90d 3207 return_0;
6d04311e 3208 continue;
cac52ca4 3209 }
bf8d0098
AK
3210
3211 if (seg->meta_areas && seg_metalv(seg, s)) {
3212 if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s), NULL)))
3213 return_0;
3214 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
3215 return_0;
3216 } else if (!dm_tree_node_add_null_area(node, 0))
6d04311e
JEB
3217 return_0;
3218
6a0d97a6 3219 if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s), NULL)))
6d04311e
JEB
3220 return_0;
3221 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
3222 return_0;
3223 } else if (seg_type(seg, s) == AREA_LV) {
3224
6a0d97a6 3225 if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s), NULL)))
86b15c7c
AK
3226 return_0;
3227 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
5f4b2acf 3228 return_0;
5f4b2acf 3229 } else {
550cae23 3230 log_error(INTERNAL_ERROR "Unassigned area found in LV %s.",
922fccc6 3231 display_lvname(seg->lv));
323a167b
JT
3232 return 0;
3233 }
5f4b2acf 3234 }
323a167b 3235
ab852ffe
ZK
3236 if (num_error_areas) {
3237 /* Thins currently do not support partial activation */
3238 if (lv_is_thin_type(seg->lv)) {
922fccc6
ZK
3239 log_error("Cannot activate %s: pool incomplete.",
3240 display_lvname(seg->lv));
ab852ffe
ZK
3241 return 0;
3242 }
ab852ffe
ZK
3243 }
3244
5f4b2acf
AK
3245 return 1;
3246}
323a167b 3247
0631d233
ZK
3248static int _add_layer_target_to_dtree(struct dev_manager *dm,
3249 struct dm_tree_node *dnode,
84cdf85b 3250 const struct logical_volume *lv)
0631d233
ZK
3251{
3252 const char *layer_dlid;
3253
6a0d97a6 3254 if (!(layer_dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
0631d233
ZK
3255 return_0;
3256
6612d8dd 3257
0631d233 3258 /* Add linear mapping over layered LV */
6612d8dd 3259 /* From VDO layer expose ONLY vdo pool header, we would need to use virtual size otherwise */
e8e6347b 3260 if (!add_linear_area_to_dtree(dnode, lv_is_vdo_pool(lv) ? 8 : lv->size,
6612d8dd 3261 lv->vg->extent_size,
0631d233
ZK
3262 lv->vg->cmd->use_linear_target,
3263 lv->vg->name, lv->name) ||
3264 !dm_tree_node_add_target_area(dnode, NULL, layer_dlid, 0))
3265 return_0;
3266
3267 return 1;
3268}
3269
ad6b0ebb 3270static int _add_origin_target_to_dtree(struct dev_manager *dm,
84cdf85b
ZK
3271 struct dm_tree_node *dnode,
3272 const struct logical_volume *lv)
5f4b2acf
AK
3273{
3274 const char *real_dlid;
2ec94d4d 3275
6a0d97a6 3276 if (!(real_dlid = build_dm_uuid(dm->mem, lv, "real")))
5f4b2acf 3277 return_0;
a9953411 3278
e88f56d9 3279 if (!dm_tree_node_add_snapshot_origin_target(dnode, lv->size, real_dlid))
5f4b2acf 3280 return_0;
323a167b
JT
3281
3282 return 1;
3283}
3284
c21b944a
MS
3285static int _add_snapshot_merge_target_to_dtree(struct dev_manager *dm,
3286 struct dm_tree_node *dnode,
84cdf85b 3287 const struct logical_volume *lv)
c21b944a
MS
3288{
3289 const char *origin_dlid, *cow_dlid, *merge_dlid;
79991aa7 3290 struct lv_segment *merging_snap_seg = find_snapshot(lv);
aed4e9c7 3291
79991aa7 3292 if (!lv_is_merging_origin(lv)) {
922fccc6
ZK
3293 log_error(INTERNAL_ERROR "LV %s is not merging snapshot.",
3294 display_lvname(lv));
aed4e9c7
ZK
3295 return 0;
3296 }
c21b944a 3297
6a0d97a6 3298 if (!(origin_dlid = build_dm_uuid(dm->mem, lv, "real")))
c21b944a
MS
3299 return_0;
3300
6a0d97a6 3301 if (!(cow_dlid = build_dm_uuid(dm->mem, merging_snap_seg->cow, "cow")))
c21b944a
MS
3302 return_0;
3303
6a0d97a6 3304 if (!(merge_dlid = build_dm_uuid(dm->mem, merging_snap_seg->cow, NULL)))
c21b944a
MS
3305 return_0;
3306
3307 if (!dm_tree_node_add_snapshot_merge_target(dnode, lv->size, origin_dlid,
3308 cow_dlid, merge_dlid,
f9e0adcc 3309 merging_snap_seg->chunk_size))
c21b944a
MS
3310 return_0;
3311
3312 return 1;
3313}
3314
ad6b0ebb 3315static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
81beded3 3316 struct dm_tree_node *dnode,
84cdf85b 3317 const struct logical_volume *lv,
81beded3 3318 struct lv_activate_opts *laopts)
323a167b 3319{
5f4b2acf
AK
3320 const char *origin_dlid;
3321 const char *cow_dlid;
3322 struct lv_segment *snap_seg;
3323 uint64_t size;
323a167b 3324
f9e0adcc 3325 if (!(snap_seg = find_snapshot(lv))) {
922fccc6
ZK
3326 log_error("Couldn't find snapshot for '%s'.",
3327 display_lvname(lv));
7a419a5d 3328 return 0;
323a167b
JT
3329 }
3330
6a0d97a6 3331 if (!(origin_dlid = build_dm_uuid(dm->mem, snap_seg->origin, "real")))
5f4b2acf 3332 return_0;
a9953411 3333
6a0d97a6 3334 if (!(cow_dlid = build_dm_uuid(dm->mem, snap_seg->cow, "cow")))
5f4b2acf 3335 return_0;
7bb6856a 3336
5f4b2acf 3337 size = (uint64_t) snap_seg->len * snap_seg->origin->vg->extent_size;
22456547 3338
c6168a14 3339 if (!laopts->no_merging && lv_is_merging_cow(lv)) {
a5ec3e38
MS
3340 /* cow is to be merged so load the error target */
3341 if (!dm_tree_node_add_error_target(dnode, size))
3342 return_0;
3343 }
3344 else if (!dm_tree_node_add_snapshot_target(dnode, size, origin_dlid,
3345 cow_dlid, 1, snap_seg->chunk_size))
5f4b2acf 3346 return_0;
323a167b
JT
3347
3348 return 1;
3349}
3350
ad6b0ebb 3351static int _add_target_to_dtree(struct dev_manager *dm,
81beded3
ZK
3352 struct dm_tree_node *dnode,
3353 struct lv_segment *seg,
3354 struct lv_activate_opts *laopts)
323a167b 3355{
5f4b2acf
AK
3356 uint64_t extent_size = seg->lv->vg->extent_size;
3357
3358 if (!seg->segtype->ops->add_target_line) {
550cae23 3359 log_error(INTERNAL_ERROR "_emit_target cannot handle "
1f5dde38 3360 "segment type %s.", lvseg_name(seg));
5f4b2acf
AK
3361 return 0;
3362 }
3363
2293567c 3364 return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
5f4b2acf 3365 &dm->target_state, seg,
81beded3 3366 laopts, dnode,
e2354ea3 3367 extent_size * _seg_len(seg),
25fe716b 3368 &dm->pvmove_mirror_count);
a69de491 3369}
323a167b 3370
9a060948
ZK
3371static int _add_new_external_lv_to_dtree(struct dev_manager *dm,
3372 struct dm_tree *dtree,
3373 struct logical_volume *external_lv,
3374 struct lv_activate_opts *laopts)
87331dc4
ZK
3375{
3376 struct seg_list *sl;
457bd139 3377 struct dm_tree_node *dnode;
87331dc4 3378
457bd139 3379 /* We've already processed this node if it already has a context ptr */
363e8888 3380 if ((dnode = _cached_dm_tree_node(dm->mem, dtree, external_lv, lv_layer(external_lv))) &&
457bd139
ZK
3381 dm_tree_node_get_context(dnode)) {
3382 /* Skip repeated invocation of external lv processing */
3383 log_debug_activation("Skipping users for already added external origin LV %s.",
3384 display_lvname(external_lv));
3385 return 1;
3386 }
9a060948 3387
922fccc6
ZK
3388 log_debug_activation("Adding external origin LV %s and all active users.",
3389 display_lvname(external_lv));
457bd139 3390 /* If there is active origin LV, add whole origin device, otherwise only -layer */
9a060948 3391 if (!_add_new_lv_to_dtree(dm, dtree, external_lv, laopts,
457bd139
ZK
3392 (lv_is_origin(external_lv) &&
3393 _cached_dm_info(dm->mem, dtree, external_lv, NULL))
3394 ? NULL : lv_layer(external_lv)))
9a060948
ZK
3395 return_0;
3396
3397 /*
3398 * Add all ACTIVE LVs using this external origin LV. This is
3399 * needed because of conversion of thin which could have been
3400 * also an old-snapshot to external origin.
3401 */
9a060948 3402 dm_list_iterate_items(sl, &external_lv->segs_using_this_lv)
457bd139
ZK
3403 /* Add only active layered devices (also avoids loop) */
3404 if (_cached_dm_info(dm->mem, dtree, sl->seg->lv,
1b439a0b 3405 lv_layer(sl->seg->lv)) &&
87331dc4
ZK
3406 !_add_new_lv_to_dtree(dm, dtree, sl->seg->lv,
3407 laopts, lv_layer(sl->seg->lv)))
3408 return_0;
9a060948 3409
922fccc6
ZK
3410 log_debug_activation("Finished adding external origin LV %s and all active users.",
3411 display_lvname(external_lv));
3412
87331dc4
ZK
3413 return 1;
3414}
3415
a985d5c6
ZK
3416static int _add_new_cvol_subdev_to_dtree(struct dev_manager *dm,
3417 struct dm_tree *dtree,
3418 const struct logical_volume *lv,
3419 struct lv_activate_opts *laopts,
3420 struct lv_layer *lvlayer,
3421 int meta_or_data)
3422{
3423 const char *layer = meta_or_data ? "cmeta" : "cdata";
3424 const struct lv_segment *lvseg = first_seg(lv);
3425 uint64_t size = meta_or_data ? lvseg->metadata_len : lvseg->data_len;
3426 const struct logical_volume *pool_lv = lvseg->pool_lv;
3427 struct dm_tree_node *dnode;
3428 char *dlid, *dlid_pool, *name;
50b188ee 3429 union lvid lvid = { { lv->vg->id, _get_id_for_meta_or_data(lvseg, meta_or_data) } };
a985d5c6
ZK
3430
3431 if (!(dlid = dm_build_dm_uuid(dm->mem, UUID_PREFIX, (const char *)&lvid.s, layer)))
3432 return_0;
3433
3434 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, pool_lv->name, layer)))
3435 return_0;
3436
3437 if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid, -1, -1,
3438 read_only_lv(lv, laopts, layer),
3439 ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
3440 lvlayer,
3441 _get_udev_flags(dm, lv, layer, laopts->noscan,
3442 laopts->temporary, 0))))
3443 return_0;
3444
3445 if (dm->track_pending_delete) {
3446 log_debug_activation("Using error for pending delete of %s-%s.",
3447 display_lvname(lv), layer);
3448 if (!dm_tree_node_add_error_target(dnode, size))
3449 return_0;
3450 } else {
3451 /* add load_segment to meta dnode: linear, size of meta area */
3452 if (!add_linear_area_to_dtree(dnode, size, lv->vg->extent_size,
3453 lv->vg->cmd->use_linear_target,
3454 lv->vg->name, lv->name))
3455 return_0;
3456
3457 if (!(dlid_pool = build_dm_uuid(dm->mem, pool_lv, NULL)))
3458 return_0;
3459
3460 /* add seg_area to prev load_seg: offset 0 maps to cachevol lv offset 0 */
3461 if (!dm_tree_node_add_target_area(dnode, NULL, dlid_pool,
3462 meta_or_data ? 0 : lvseg->metadata_len))
3463 return_0;
3464 }
3465
3466 return 1;
3467}
3468
ad6b0ebb 3469static int _add_segment_to_dtree(struct dev_manager *dm,
81beded3
ZK
3470 struct dm_tree *dtree,
3471 struct dm_tree_node *dnode,
3472 struct lv_segment *seg,
3473 struct lv_activate_opts *laopts,
3474 const char *layer)
f7dd6d84 3475{
5f4b2acf 3476 uint32_t s;
25375165 3477 struct lv_segment *seg_present;
fba86dd4 3478 const struct segment_type *segtype;
9443b5d4 3479 const char *target_name;
f7dd6d84 3480
5f4b2acf 3481 /* Ensure required device-mapper targets are loaded */
f9e0adcc 3482 seg_present = find_snapshot(seg->lv) ? : seg;
fba86dd4
ZK
3483 segtype = seg_present->segtype;
3484
3485 target_name = (segtype->ops->target_name ?
3486 segtype->ops->target_name(seg_present, laopts) :
3487 segtype->name);
25375165 3488
06abb2dd 3489 log_debug_activation("Checking kernel supports %s segment type for %s%s%s",
922fccc6 3490 target_name, display_lvname(seg->lv),
06abb2dd 3491 layer ? "-" : "", layer ? : "");
25375165 3492
fba86dd4
ZK
3493 if (segtype->ops->target_present &&
3494 !segtype->ops->target_present(seg_present->lv->vg->cmd,
3495 seg_present, NULL)) {
9443b5d4 3496 log_error("Can't process LV %s: %s target support missing "
922fccc6 3497 "from kernel?", display_lvname(seg->lv), target_name);
f7dd6d84
AK
3498 return 0;
3499 }
3500
87331dc4 3501 /* Add external origin layer */
9a060948
ZK
3502 if (seg->external_lv &&
3503 !_add_new_external_lv_to_dtree(dm, dtree, seg->external_lv, laopts))
3504 return_0;
96626f64 3505
5f4b2acf
AK
3506 /* Add mirror log */
3507 if (seg->log_lv &&
81beded3 3508 !_add_new_lv_to_dtree(dm, dtree, seg->log_lv, laopts, NULL))
5f4b2acf 3509 return_0;
96626f64
JB
3510
3511 /* Add pool metadata */
3679bb1c
ZK
3512 if (seg->metadata_lv &&
3513 !_add_new_lv_to_dtree(dm, dtree, seg->metadata_lv, laopts, NULL))
3514 return_0;
96626f64
JB
3515
3516 /* Add pool layer */
a900d150 3517 if (seg->pool_lv && !laopts->origin_only &&
3679bb1c
ZK
3518 !_add_new_lv_to_dtree(dm, dtree, seg->pool_lv, laopts,
3519 lv_layer(seg->pool_lv)))
3520 return_0;
f7dd6d84 3521
3ae55695
DT
3522 if (seg->writecache && !laopts->origin_only &&
3523 !_add_new_lv_to_dtree(dm, dtree, seg->writecache, laopts,
3524 lv_layer(seg->writecache)))
3525 return_0;
3526
d9e8895a
DT
3527 if (seg->integrity_meta_dev && !laopts->origin_only &&
3528 !_add_new_lv_to_dtree(dm, dtree, seg->integrity_meta_dev, laopts,
3529 lv_layer(seg->integrity_meta_dev)))
3530 return_0;
3531
3679bb1c
ZK
3532 /* Add any LVs used by this segment */
3533 for (s = 0; s < seg->area_count; ++s) {
3534 if ((seg_type(seg, s) == AREA_LV) &&
bdfc96cb
ZK
3535 /* do not bring up tracked image */
3536 !lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
fba86dd4
ZK
3537 /* origin only for cache without pending delete */
3538 (!dm->track_pending_delete || !seg_is_cache(seg)) &&
3539 !_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
6612d8dd
ZK
3540 laopts,
3541 lv_is_vdo_pool(seg_lv(seg, s)) ?
3542 lv_layer(seg_lv(seg, s)) : NULL))
aebf2d5c 3543 return_0;
b896f7de 3544 if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
bdfc96cb 3545 !lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
3679bb1c
ZK
3546 !_add_new_lv_to_dtree(dm, dtree, seg_metalv(seg, s),
3547 laopts, NULL))
aebf2d5c 3548 return_0;
5f4b2acf
AK
3549 }
3550
fba86dd4
ZK
3551 if (dm->track_pending_delete) {
3552 /* Replace target and all its used devs with error mapping */
3553 log_debug_activation("Using error for pending delete %s.",
922fccc6 3554 display_lvname(seg->lv));
e2354ea3 3555 if (!dm_tree_node_add_error_target(dnode, (uint64_t)seg->lv->vg->extent_size * _seg_len(seg)))
fba86dd4
ZK
3556 return_0;
3557 } else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
5f4b2acf
AK
3558 return_0;
3559
5f4b2acf 3560 return 1;
914c9723 3561}
f894b4b1 3562
ad6b0ebb 3563static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
84cdf85b 3564 const struct logical_volume *lv, struct lv_activate_opts *laopts,
81beded3 3565 const char *layer)
3e8479bd 3566{
5f4b2acf
AK
3567 struct lv_segment *seg;
3568 struct lv_layer *lvlayer;
df390f17 3569 struct seg_list *sl;
3679bb1c 3570 struct dm_list *snh;
e88f56d9 3571 struct dm_tree_node *dnode;
537f7456 3572 const struct dm_info *dinfo;
a5ec3e38 3573 char *name, *dlid;
a6b22cf3
AK
3574 uint32_t max_stripe_size = UINT32_C(0);
3575 uint32_t read_ahead = lv->read_ahead;
69506f1d 3576 uint32_t read_ahead_flags = UINT32_C(0);
fba86dd4 3577 int save_pending_delete = dm->track_pending_delete;
7e5881dd 3578 int merge_in_progress = 0;
3e8479bd 3579
cac4a974
DT
3580 if (!(lvlayer = dm_pool_alloc(dm->mem, sizeof(*lvlayer)))) {
3581 log_error("_add_new_lv_to_dtree: pool alloc failed for %s %s.",
3582 display_lvname(lv), layer);
3583 return 0;
3584 }
3585 lvlayer->lv = lv;
3586 lvlayer->visible_component = (laopts->component_lv == lv) ? 1 : 0;
3587
8c17233a
ZK
3588 log_debug_activation("Adding new LV %s%s%s to dtree", display_lvname(lv),
3589 layer ? "-" : "", layer ? : "");
9a6e3683
ZK
3590 /* LV with pending delete is never put new into a table */
3591 if (lv_is_pending_delete(lv) && !_cached_dm_info(dm->mem, dtree, lv, NULL))
3592 return 1; /* Replace with error only when already exists */
3593
29bd3ccc
ZK
3594 if (lv_is_cache_pool(lv) &&
3595 !dm_list_empty(&lv->segs_using_this_lv)) {
a018c57f
ZK
3596 /* cache pool is 'meta' LV and does not have a real device node */
3597 if (!_add_new_lv_to_dtree(dm, dtree, seg_lv(first_seg(lv), 0), laopts, NULL))
3598 return_0;
3599 if (!_add_new_lv_to_dtree(dm, dtree, first_seg(lv)->metadata_lv, laopts, NULL))
3600 return_0;
3601 return 1;
3602 }
3603
8dc351e8
AK
3604 /* FIXME Seek a simpler way to lay out the snapshot-merge tree. */
3605
79991aa7
ZK
3606 if (!layer && lv_is_merging_origin(lv)) {
3607 seg = find_snapshot(lv);
c582e3c0 3608 /*
7e5881dd 3609 * Prevent merge if merge isn't currently possible:
c582e3c0 3610 * either origin or merging snapshot are open
9cccf524 3611 * - for old snaps use "snapshot-merge" if it is already in use
537f7456
MS
3612 * - open_count is always retrieved (as of dm-ioctl 4.7.0)
3613 * so just use the tree's existing nodes' info
c582e3c0 3614 */
7e5881dd 3615 if ((dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL))) {
39b7d1ba 3616 /* Merging origin LV is present, check if merging is already running. */
334117ee 3617 if ((seg_is_thin_volume(seg) && _lv_has_thin_device_id(dm->mem, lv, NULL, seg->device_id)) ||
7e5881dd
ZK
3618 (!seg_is_thin_volume(seg) && lv_has_target_type(dm->mem, lv, NULL, TARGET_NAME_SNAPSHOT_MERGE))) {
3619 log_debug_activation("Merging of snapshot volume %s to origin %s is in progress.",
3620 display_lvname(seg->lv), display_lvname(seg->lv));
3621 merge_in_progress = 1; /* Merge is already running */
3622 } /* Merge is not yet running, so check if it can be started */
3623 else if (laopts->resuming) {
3624 log_debug_activation("Postponing pending snapshot merge for origin %s, "
3625 "merge was not started before suspend.",
3626 display_lvname(lv));
3627 laopts->no_merging = 1; /* Cannot be reloaded in suspend */
3628 } /* Non-resuming merge requires origin to be unused */
3629 else if (dinfo->open_count) {
3630 log_debug_activation("Postponing pending snapshot merge for origin %s, "
3631 "origin volume is opened.",
3632 display_lvname(lv));
3633 laopts->no_merging = 1;
3634 }
3635 }
3636
3637 /* If merge would be still undecided, look as snapshot */
3638 if (!merge_in_progress && !laopts->no_merging &&
3639 (dinfo = _cached_dm_info(dm->mem, dtree,
9cccf524
ZK
3640 seg_is_thin_volume(seg) ?
3641 seg->lv : seg->cow, NULL))) {
3642 if (seg_is_thin_volume(seg)) {
3643 /* Active thin snapshot prevents merge */
7e5881dd
ZK
3644 log_debug_activation("Postponing pending snapshot merge for origin volume %s, "
3645 "merging thin snapshot volume %s is active.",
3646 display_lvname(lv), display_lvname(seg->lv));
3647 laopts->no_merging = 1;
9cccf524 3648 } else if (dinfo->open_count) {
7e5881dd
ZK
3649 log_debug_activation("Postponing pending snapshot merge for origin volume %s, "
3650 "merging snapshot volume %s is opened.",
3651 display_lvname(lv), display_lvname(seg->lv));
3652 laopts->no_merging = 1;
9cccf524
ZK
3653 }
3654 }
c582e3c0
MS
3655 }
3656
e59e2f7c 3657 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
5f4b2acf 3658 return_0;
3e8479bd 3659
a7691cde 3660 /* Even unused thin-pool still needs to get layered UUID -suffix */
00a45ca4
ZK
3661 if (!layer && lv_is_new_thin_pool(lv))
3662 layer = lv_layer(lv);
3663
8153c5f1
DT
3664 /* Adds -real to the dm uuid of wcorig LV. */
3665 if (!layer && lv_is_writecache_origin(lv))
3666 layer = lv_layer(lv); /* "real" */
3667
21b215ee
ZK
3668 /*
3669 * FIXME: we would like to have -private suffixes used for device not processed by udev
3670 * however ATM we also sometimes want to provide /dev/vg/lv symlinks to such devices
3671 * and also be able to correctly report its status with lvs.
3672 *
3673 * Until problems are resolved this code path needs to be disabled.
3674 */
3675 if (0 && lvlayer->visible_component) {
14f782c5
ZK
3676 /* Component LV will be public, do not add any layer suffixes */
3677 if (!(dlid = dm_build_dm_uuid(dm->mem, UUID_PREFIX, lv->lvid.s, NULL)))
3678 return_0;
3679 } else if (!(dlid = build_dm_uuid(dm->mem, lv,layer)))
5f4b2acf 3680 return_0;
3e8479bd 3681
5f4b2acf 3682 /* We've already processed this node if it already has a context ptr */
e88f56d9
AK
3683 if ((dnode = dm_tree_find_node_by_uuid(dtree, dlid)) &&
3684 dm_tree_node_get_context(dnode))
5f4b2acf 3685 return 1;
3e8479bd 3686
5f4b2acf 3687 /*
ad6b0ebb 3688 * Add LV to dtree.
5f4b2acf
AK
3689 * If we're working with precommitted metadata, clear any
3690 * existing inactive table left behind.
3691 * Major/minor settings only apply to the visible layer.
3692 */
df390f17
AK
3693 /* FIXME Move the clear from here until later, so we can leave
3694 * identical inactive tables untouched. (For pvmove.)
3695 */
f16aea9e 3696 if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid,
fe686a51
AK
3697 layer ? UINT32_C(0) : (uint32_t) lv->major,
3698 layer ? UINT32_C(0) : (uint32_t) lv->minor,
a6fdb9d9 3699 read_only_lv(lv, laopts, layer),
10d0d9c7 3700 ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
f16aea9e 3701 lvlayer,
eb3597ac
ZK
3702 _get_udev_flags(dm, lv, layer, laopts->noscan, laopts->temporary,
3703 lvlayer->visible_component))))
5f4b2acf
AK
3704 return_0;
3705
3706 /* Store existing name so we can do rename later */
e88f56d9 3707 lvlayer->old_name = dm_tree_node_get_name(dnode);
5f4b2acf
AK
3708
3709 /* Create table */
3710 dm->pvmove_mirror_count = 0u;
3679bb1c 3711
fba86dd4 3712 if (lv_is_pending_delete(lv))
f5e265a0 3713 /* Handle LVs with pending delete */
fba86dd4
ZK
3714 /* Fow now used only by cache segtype, TODO snapshots */
3715 dm->track_pending_delete = 1;
f5e265a0 3716
a985d5c6
ZK
3717 if (lv_is_cache_vol(lv))
3718 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
3719 if (lv_is_cache(sl->seg->lv) &&
3720 /* Cachevol is used by cache LV segment -> add cvol-cdata/cmeta extra layer */
3721 (!_add_new_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, laopts, lvlayer, 0) ||
3722 !_add_new_cvol_subdev_to_dtree(dm, dtree, sl->seg->lv, laopts, lvlayer, 1)))
3723 return_0;
3724
29bd3ccc
ZK
3725 /* This is unused cache-pool - make metadata accessible */
3726 if (lv_is_cache_pool(lv))
3727 lv = first_seg(lv)->metadata_lv;
3728
3679bb1c
ZK
3729 /* If this is a snapshot origin, add real LV */
3730 /* If this is a snapshot origin + merging snapshot, add cow + real LV */
87331dc4 3731 /* Snapshot origin could be also external origin */
3679bb1c
ZK
3732 if (lv_is_origin(lv) && !layer) {
3733 if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, "real"))
5f4b2acf 3734 return_0;
3679bb1c
ZK
3735 if (!laopts->no_merging && lv_is_merging_origin(lv)) {
3736 if (!_add_new_lv_to_dtree(dm, dtree,
9974136b 3737 find_snapshot(lv)->cow, laopts, "cow"))
3679bb1c
ZK
3738 return_0;
3739 /*
3740 * Must also add "real" LV for use when
3741 * snapshot-merge target is added
3742 */
3743 if (!_add_snapshot_merge_target_to_dtree(dm, dnode, lv))
3744 return_0;
3745 } else if (!_add_origin_target_to_dtree(dm, dnode, lv))
3746 return_0;
3747
3748 /* Add any snapshots of this LV */
3749 dm_list_iterate(snh, &lv->snapshot_segs)
3750 if (!_add_new_lv_to_dtree(dm, dtree,
3751 dm_list_struct_base(snh, struct lv_segment,
3752 origin_list)->cow,
3753 laopts, NULL))
3754 return_0;
3755 } else if (lv_is_cow(lv) && !layer) {
3756 if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, "cow"))
3757 return_0;
3758 if (!_add_snapshot_target_to_dtree(dm, dnode, lv, laopts))
3759 return_0;
00a45ca4 3760 } else if (!layer && ((lv_is_thin_pool(lv) && !lv_is_new_thin_pool(lv)) ||
6612d8dd 3761 lv_is_vdo_pool(lv) ||
00a45ca4 3762 lv_is_external_origin(lv))) {
6612d8dd 3763 /* External origin or 'used' Thin pool or VDO pool is using layer */
3679bb1c
ZK
3764 if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, lv_layer(lv)))
3765 return_0;
3766 if (!_add_layer_target_to_dtree(dm, dnode, lv))
3767 return_0;
3768 } else {
3769 /* Add 'real' segments for LVs */
3770 dm_list_iterate_items(seg, &lv->segments) {
3771 if (!_add_segment_to_dtree(dm, dtree, dnode, seg, laopts, layer))
3772 return_0;
3773 if (max_stripe_size < seg->stripe_size * seg->area_count)
3774 max_stripe_size = seg->stripe_size * seg->area_count;
3775 }
6612d8dd
ZK
3776
3777 if (!layer && lv_is_vdo_pool(lv) &&
3778 !_add_layer_target_to_dtree(dm, dnode, lv))
3779 return_0;
5f4b2acf 3780 }
3e8479bd 3781
3679bb1c
ZK
3782 /* Setup thin pool callback */
3783 if (lv_is_thin_pool(lv) && layer &&
c0c1ada8
ZK
3784 !_pool_register_callback(dm, dnode, lv))
3785 return_0;
3786
a9eaab6b 3787 if (lv_is_cache(lv) && !lv_is_cache_vol(first_seg(lv)->pool_lv) &&
69434c2e
ZK
3788 /* Register callback only for layer activation or non-layered cache LV */
3789 (layer || !lv_layer(lv)) &&
3790 /* Register callback when metadata LV is NOT already active */
3791 !_cached_dm_info(dm->mem, dtree, first_seg(first_seg(lv)->pool_lv)->metadata_lv, NULL) &&
c0c1ada8 3792 !_pool_register_callback(dm, dnode, lv))
3679bb1c
ZK
3793 return_0;
3794
9fe7aba2
DT
3795 if (lv_is_cache(lv) && lv_is_cache_vol(first_seg(lv)->pool_lv) &&
3796 /* Register callback only for layer activation or non-layered cache LV */
3797 (layer || !lv_layer(lv)) &&
317071ec
DT
3798 /* Register callback when cachevol LV is NOT already active */
3799 !_cached_dm_info(dm->mem, dtree, first_seg(lv)->pool_lv, NULL) &&
9fe7aba2
DT
3800 !_pool_register_callback(dm, dnode, lv))
3801 return_0;
3802
0451225c
ZK
3803 /*
3804 * Update tables for ANY PVMOVE holders for active LV where the name starts with 'pvmove',
3805 * but it's not anymore PVMOVE LV and also it's not a PVMOVE _mimage LV.
3806 * When resume happens, tables MUST be already preloaded with correct entries!
3807 * (since we can't preload different table while devices are suspended)
3808 */
3809 if (!lv_is_pvmove(lv) && !strncmp(lv->name, "pvmove", 6) && !strchr(lv->name, '_') &&
3810 (dinfo = _cached_dm_info(dm->mem, dtree, lv, NULL)))
3811 if (!_add_holders_to_dtree(dm, dtree, lv, laopts, dinfo))
3812 return_0;
3813
9b6135dc 3814 if (read_ahead == DM_READ_AHEAD_AUTO) {
1c1b068f
ZK
3815 /* we need RA at least twice a whole stripe - see the comment in md/raid0.c */
3816 read_ahead = max_stripe_size * 2;
3679bb1c 3817 /* FIXME: layered device read-ahead */
d3961002 3818 if (!read_ahead)
c1fdeec9 3819 lv_calculate_readahead(lv, &read_ahead);
69506f1d 3820 read_ahead_flags = DM_READ_AHEAD_MINIMUM_FLAG;
9b6135dc 3821 }
a6b22cf3 3822
69506f1d 3823 dm_tree_node_set_read_ahead(dnode, read_ahead, read_ahead_flags);
a6b22cf3 3824
df390f17 3825 /* Add any LVs referencing a PVMOVE LV unless told not to */
2360ce35 3826 if (dm->track_pvmove_deps && lv_is_pvmove(lv))
df390f17 3827 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
81beded3 3828 if (!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv, laopts, NULL))
df390f17
AK
3829 return_0;
3830
fba86dd4
ZK
3831 dm->track_pending_delete = save_pending_delete; /* restore */
3832
9fdc84c3 3833 return 1;
3e8479bd
AK
3834}
3835
5efa3f1e
MB
3836/* FIXME: symlinks should be created/destroyed at the same time
3837 * as the kernel devices but we can't do that from within libdevmapper
3838 * at present so we must walk the tree twice instead. */
3839
5f4b2acf
AK
3840/*
3841 * Create LV symlinks for children of supplied root node.
3842 */
e88f56d9 3843static int _create_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
3e8479bd 3844{
5f4b2acf 3845 void *handle = NULL;
e88f56d9 3846 struct dm_tree_node *child;
5f4b2acf 3847 struct lv_layer *lvlayer;
3ad47d16
PR
3848 char *old_vgname, *old_lvname, *old_layer;
3849 char *new_vgname, *new_lvname, *new_layer;
5f4b2acf
AK
3850 const char *name;
3851 int r = 1;
3e8479bd 3852
418663b6 3853 /* Nothing to do if udev fallback is disabled. */
44071331 3854 if (!_check_udev_fallback(dm->cmd)) {
7f815706 3855 fs_set_create();
418663b6 3856 return 1;
7f815706 3857 }
418663b6 3858
e88f56d9 3859 while ((child = dm_tree_next_child(&handle, root, 0))) {
6c8ffd03 3860 if (!(lvlayer = dm_tree_node_get_context(child)))
5f4b2acf 3861 continue;
3e8479bd 3862
5f4b2acf 3863 /* Detect rename */
e88f56d9 3864 name = dm_tree_node_get_name(child);
3e8479bd 3865
5f4b2acf 3866 if (name && lvlayer->old_name && *lvlayer->old_name && strcmp(name, lvlayer->old_name)) {
3ad47d16 3867 if (!dm_split_lvm_name(dm->mem, lvlayer->old_name, &old_vgname, &old_lvname, &old_layer)) {
81ef4eb4 3868 log_error("_create_lv_symlinks: Couldn't split up old device name %s.", lvlayer->old_name);
67cdbd7e
AK
3869 return 0;
3870 }
3ad47d16 3871 if (!dm_split_lvm_name(dm->mem, name, &new_vgname, &new_lvname, &new_layer)) {
81ef4eb4 3872 log_error("_create_lv_symlinks: Couldn't split up new device name %s.", name);
3ad47d16
PR
3873 return 0;
3874 }
3875 if (!fs_rename_lv(lvlayer->lv, name, old_vgname, old_lvname))
3876 r = 0;
ca51e5d9
AK
3877 continue;
3878 }
eb3597ac 3879 if (_lv_has_mknode(lvlayer->lv) || lvlayer->visible_component) {
ab9663f3 3880 if (!_dev_manager_lv_mknodes(lvlayer->lv))
ca51e5d9
AK
3881 r = 0;
3882 continue;
3883 }
ab9663f3 3884 if (!_dev_manager_lv_rmnodes(lvlayer->lv))
5f4b2acf 3885 r = 0;
3e8479bd
AK
3886 }
3887
5f4b2acf
AK
3888 return r;
3889}
9fdc84c3 3890
5efa3f1e
MB
3891/*
3892 * Remove LV symlinks for children of supplied root node.
3893 */
3894static int _remove_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
3895{
3896 void *handle = NULL;
3897 struct dm_tree_node *child;
3898 char *vgname, *lvname, *layer;
3899 int r = 1;
3900
418663b6 3901 /* Nothing to do if udev fallback is disabled. */
44071331 3902 if (!_check_udev_fallback(dm->cmd))
418663b6
PR
3903 return 1;
3904
5efa3f1e 3905 while ((child = dm_tree_next_child(&handle, root, 0))) {
67cdbd7e 3906 if (!dm_split_lvm_name(dm->mem, dm_tree_node_get_name(child), &vgname, &lvname, &layer)) {
5efa3f1e
MB
3907 r = 0;
3908 continue;
3909 }
3910
4bfa1324
AK
3911 if (!*vgname)
3912 continue;
3913
5efa3f1e
MB
3914 /* only top level layer has symlinks */
3915 if (*layer)
3916 continue;
3917
cda69e17
PR
3918 fs_del_lv_byname(dm->cmd->dev_dir, vgname, lvname,
3919 dm->cmd->current_settings.udev_rules);
5efa3f1e
MB
3920 }
3921
3922 return r;
3923}
3924
fba86dd4 3925static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, const char *non_toplevel_tree_dlid)
5f4b2acf
AK
3926{
3927 void *handle = NULL;
e88f56d9 3928 struct dm_tree_node *child;
5f4b2acf
AK
3929 char *vgname, *lvname, *layer;
3930 const char *name, *uuid;
fba86dd4
ZK
3931 struct dm_str_list *dl;
3932
e88f56d9
AK
3933 while ((child = dm_tree_next_child(&handle, root, 0))) {
3934 if (!(name = dm_tree_node_get_name(child)))
5f4b2acf
AK
3935 continue;
3936
e88f56d9 3937 if (!(uuid = dm_tree_node_get_uuid(child)))
5f4b2acf
AK
3938 continue;
3939
67cdbd7e
AK
3940 if (!dm_split_lvm_name(dm->mem, name, &vgname, &lvname, &layer)) {
3941 log_error("_clean_tree: Couldn't split up device name %s.", name);
3942 return 0;
3943 }
5f4b2acf
AK
3944
3945 /* Not meant to be top level? */
fba86dd4 3946 if (!*layer)
ca509c97
ZK
3947 continue;
3948
22149572
AK
3949 /* If operation was performed on a partial tree, don't remove it */
3950 if (non_toplevel_tree_dlid && !strcmp(non_toplevel_tree_dlid, uuid))
3951 continue;
3952
b2885b71
ZK
3953 if (!(uuid = dm_pool_strdup(dm->cmd->pending_delete_mem, uuid))) {
3954 log_error("_clean_tree: Failed to duplicate uuid.");
3955 return 0;
3956 }
3957
3958 if (!str_list_add(dm->cmd->pending_delete_mem, &dm->cmd->pending_delete, uuid))
a74be32b 3959 return_0;
5f4b2acf
AK
3960 }
3961
30a98e4d 3962 /* Deactivate any tracked pending delete nodes */
b2885b71 3963 if (!dm_list_empty(&dm->cmd->pending_delete) && !dm_get_suspended_counter()) {
30a98e4d
ZK
3964 fs_unlock();
3965 dm_tree_set_cookie(root, fs_get_cookie());
b2885b71 3966 dm_list_iterate_items(dl, &dm->cmd->pending_delete) {
30a98e4d
ZK
3967 log_debug_activation("Deleting tracked UUID %s.", dl->str);
3968 if (!dm_tree_deactivate_children(root, dl->str, strlen(dl->str)))
3969 return_0;
3970 }
b2885b71
ZK
3971 dm_list_init(&dm->cmd->pending_delete);
3972 dm_pool_empty(dm->cmd->pending_delete_mem);
30a98e4d
ZK
3973 }
3974
5f4b2acf 3975 return 1;
3e8479bd
AK
3976}
3977
84cdf85b 3978static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
81beded3 3979 struct lv_activate_opts *laopts, action_t action)
3e8479bd 3980{
c3e29903 3981 static const char _action_names[][24] = {
02f49caa
ZK
3982 "PRELOAD", "ACTIVATE", "DEACTIVATE", "SUSPEND", "SUSPEND_WITH_LOCKFS", "CLEAN"
3983 };
7a6600b1 3984 const size_t DLID_SIZE = ID_LEN + sizeof(UUID_PREFIX) - 1;
e88f56d9
AK
3985 struct dm_tree *dtree;
3986 struct dm_tree_node *root;
9fdc84c3 3987 char *dlid;
3e8479bd
AK
3988 int r = 0;
3989
02f49caa 3990 if (action < DM_ARRAY_SIZE(_action_names))
e7eb5b06
ZK
3991 log_debug_activation("Creating %s%s tree for %s.",
3992 _action_names[action],
3993 (laopts->origin_only) ? " origin-only" : "",
3994 display_lvname(lv));
02f49caa 3995
12bbfbe8 3996 /* Some LV cannot be used for top level tree */
9eab84aa 3997 /* TODO: add more.... */
29bd3ccc 3998 if (lv_is_cache_pool(lv) && !dm_list_empty(&lv->segs_using_this_lv)) {
922fccc6
ZK
3999 log_error(INTERNAL_ERROR "Cannot create tree for %s.",
4000 display_lvname(lv));
9eab84aa
ZK
4001 return 0;
4002 }
9a060948
ZK
4003 /* Some targets may build bigger tree for activation */
4004 dm->activation = ((action == PRELOAD) || (action == ACTIVATE));
a900d150 4005 dm->suspend = (action == SUSPEND_WITH_LOCKFS) || (action == SUSPEND);
9c083d34 4006
e211768d
ZK
4007 /* Drop any cache before DM table manipulation within locked section
4008 * TODO: check if it makes sense to manage cache within lock */
f8aa073a 4009 dm_devs_cache_destroy();
04fbffb1 4010
4d2f9a4f 4011 dtree = _create_partial_dtree(dm, lv, laopts->origin_only);
04fbffb1 4012
4d2f9a4f
ZK
4013 if (!dtree)
4014 return_0;
4015
e88f56d9 4016 if (!(root = dm_tree_find_node(dtree, 0, 0))) {
81ef4eb4 4017 log_error("Lost dependency tree root node.");
937a21f0 4018 goto out_no_root;
3e8479bd
AK
4019 }
4020
937a21f0
ZK
4021 /* Restore fs cookie */
4022 dm_tree_set_cookie(root, fs_get_cookie());
4023
6a0d97a6 4024 if (!(dlid = build_dm_uuid(dm->mem, lv, laopts->origin_only ? lv_layer(lv) : NULL)))
5f4b2acf 4025 goto_out;
3e8479bd 4026
03b49fe1 4027 /* Only process nodes with uuid of "LVM-" plus VG id. */
352a99b9 4028 switch(action) {
5f4b2acf 4029 case CLEAN:
2f260c99
ZK
4030 if (retry_deactivation())
4031 dm_tree_retry_remove(root);
5f4b2acf 4032 /* Deactivate any unused non-toplevel nodes */
81beded3 4033 if (!_clean_tree(dm, root, laopts->origin_only ? dlid : NULL))
5f4b2acf
AK
4034 goto_out;
4035 break;
352a99b9 4036 case DEACTIVATE:
9fa1d30a
PR
4037 if (retry_deactivation())
4038 dm_tree_retry_remove(root);
409bf6e6 4039 /* Deactivate LV and all devices it references that nothing else has open. */
7a6600b1 4040 if (!dm_tree_deactivate_children(root, dlid, DLID_SIZE))
a74be32b 4041 goto_out;
5efa3f1e 4042 if (!_remove_lv_symlinks(dm, root))
922fccc6
ZK
4043 log_warn("Failed to remove all device symlinks associated with %s.",
4044 display_lvname(lv));
352a99b9
AK
4045 break;
4046 case SUSPEND:
9cd3426d 4047 dm_tree_skip_lockfs(root);
7c1937f8 4048 if (!dm->flush_required)
33f732c5 4049 dm_tree_use_no_flush_suspend(root);
36653e89 4050 /* Fall through */
9cd3426d 4051 case SUSPEND_WITH_LOCKFS:
7a6600b1 4052 if (!dm_tree_suspend_children(root, dlid, DLID_SIZE))
5f4b2acf
AK
4053 goto_out;
4054 break;
4055 case PRELOAD:
4056 case ACTIVATE:
4057 /* Add all required new devices to tree */
a900d150
ZK
4058 if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts,
4059 (lv_is_origin(lv) && laopts->origin_only) ? "real" :
6612d8dd
ZK
4060 (laopts->origin_only &&
4061 (lv_is_thin_pool(lv) ||
4062 lv_is_vdo_pool(lv))) ?
4063 lv_layer(lv) : NULL))
5f4b2acf
AK
4064 goto_out;
4065
4066 /* Preload any devices required before any suspensions */
c3d82d71 4067 if (!dm_tree_preload_children(root, dlid, DLID_SIZE))
a74be32b 4068 goto_out;
5f4b2acf 4069
f898cf75 4070 if ((dm_tree_node_size_changed(root) < 0))
eb91c4ee 4071 dm->flush_required = 1;
ba41ee1d 4072 /* Currently keep the code require flush for any
d4c03cf7
ZK
4073 * non 'thin pool/volume' and size increase */
4074 else if (!lv_is_thin_volume(lv) &&
4075 !lv_is_thin_pool(lv) &&
e26c21cb
ZK
4076 !lv_is_vdo(lv) &&
4077 !lv_is_vdo_pool(lv) &&
d4c03cf7 4078 dm_tree_node_size_changed(root))
ba41ee1d
ZK
4079 dm->flush_required = 1;
4080
bd90c6b2 4081 if (action == ACTIVATE) {
7a6600b1 4082 if (!dm_tree_activate_children(root, dlid, DLID_SIZE))
c3d82d71 4083 goto_out;
4007ac81 4084 if (!_create_lv_symlinks(dm, root))
922fccc6
ZK
4085 log_warn("Failed to create symlinks for %s.",
4086 display_lvname(lv));
bd90c6b2 4087 }
5f4b2acf 4088
352a99b9
AK
4089 break;
4090 default:
02f49caa 4091 log_error(INTERNAL_ERROR "_tree_action: Action %u not supported.", action);
3e8479bd 4092 goto out;
08e64ce5 4093 }
3e8479bd
AK
4094 r = 1;
4095
4096out:
937a21f0
ZK
4097 /* Save fs cookie for udev settle, do not wait here */
4098 fs_set_cookie(dm_tree_get_cookie(root));
4099out_no_root:
e88f56d9 4100 dm_tree_free(dtree);
3e8479bd
AK
4101
4102 return r;
4103}
4104
2d6fcbf6 4105/* origin_only may only be set if we are resuming (not activating) an origin LV */
84cdf85b 4106int dev_manager_activate(struct dev_manager *dm, const struct logical_volume *lv,
81beded3 4107 struct lv_activate_opts *laopts)
5f4b2acf 4108{
81beded3 4109 if (!_tree_action(dm, lv, laopts, ACTIVATE))
5f4b2acf
AK
4110 return_0;
4111
de75bc66
ZK
4112 if (!_tree_action(dm, lv, laopts, CLEAN))
4113 return_0;
4114
4115 return 1;
5f4b2acf
AK
4116}
4117
2d6fcbf6 4118/* origin_only may only be set if we are resuming (not activating) an origin LV */
84cdf85b 4119int dev_manager_preload(struct dev_manager *dm, const struct logical_volume *lv,
81beded3 4120 struct lv_activate_opts *laopts, int *flush_required)
5f4b2acf 4121{
5b2227c2
ZK
4122 dm->flush_required = *flush_required;
4123
81beded3 4124 if (!_tree_action(dm, lv, laopts, PRELOAD))
de75bc66 4125 return_0;
eb91c4ee
MB
4126
4127 *flush_required = dm->flush_required;
4128
4129 return 1;
5f4b2acf 4130}
b427ecee 4131
84cdf85b 4132int dev_manager_deactivate(struct dev_manager *dm, const struct logical_volume *lv)
352a99b9 4133{
81beded3 4134 struct lv_activate_opts laopts = { 0 };
352a99b9 4135
de75bc66
ZK
4136 if (!_tree_action(dm, lv, &laopts, DEACTIVATE))
4137 return_0;
352a99b9 4138
de75bc66 4139 return 1;
352a99b9
AK
4140}
4141
84cdf85b 4142int dev_manager_suspend(struct dev_manager *dm, const struct logical_volume *lv,
81beded3 4143 struct lv_activate_opts *laopts, int lockfs, int flush_required)
b427ecee 4144{
eb91c4ee
MB
4145 dm->flush_required = flush_required;
4146
de75bc66
ZK
4147 if (!_tree_action(dm, lv, laopts, lockfs ? SUSPEND_WITH_LOCKFS : SUSPEND))
4148 return_0;
4149
4150 return 1;
b427ecee
AK
4151}
4152
352a99b9
AK
4153/*
4154 * Does device use VG somewhere in its construction?
4155 * Returns 1 if uncertain.
4156 */
898e6f8e 4157int dev_manager_device_uses_vg(struct device *dev,
352a99b9
AK
4158 struct volume_group *vg)
4159{
e88f56d9
AK
4160 struct dm_tree *dtree;
4161 struct dm_tree_node *root;
08f1ddea 4162 char dlid[sizeof(UUID_PREFIX) + sizeof(struct id) - 1] __attribute__((aligned(8)));
352a99b9
AK
4163 int r = 1;
4164
e88f56d9 4165 if (!(dtree = dm_tree_create())) {
81ef4eb4 4166 log_error("Failed to create partial dtree.");
352a99b9
AK
4167 return r;
4168 }
4169
eae0314b 4170 dm_tree_set_optional_uuid_suffixes(dtree, (const char**)_uuid_suffix_list);
7cff640d 4171
cd56b04e
ZK
4172 if (!dm_tree_add_dev(dtree, MAJOR(dev->dev), MINOR(dev->dev))) {
4173 log_error("Failed to add device %s (%u:%u) to dtree.",
4174 dev_name(dev), MAJOR(dev->dev), MINOR(dev->dev));
352a99b9
AK
4175 goto out;
4176 }
4177
4178 memcpy(dlid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1);
4179 memcpy(dlid + sizeof(UUID_PREFIX) - 1, &vg->id.uuid[0], sizeof(vg->id));
4180
e88f56d9 4181 if (!(root = dm_tree_find_node(dtree, 0, 0))) {
81ef4eb4 4182 log_error("Lost dependency tree root node.");
352a99b9
AK
4183 goto out;
4184 }
4185
e88f56d9 4186 if (dm_tree_children_use_uuid(root, dlid, sizeof(UUID_PREFIX) + sizeof(vg->id) - 1))
5f4b2acf 4187 goto_out;
352a99b9
AK
4188
4189 r = 0;
4190
4191out:
e88f56d9 4192 dm_tree_free(dtree);
81ef4eb4 4193
352a99b9
AK
4194 return r;
4195}
264827cb
DT
4196
4197/*
4198 * crypt offset is usually the LUKS header size but can be larger.
4199 * The LUKS header is usually 2MB for LUKS1 and 16MB for LUKS2.
4200 * The offset needs to be subtracted from the LV size to get the
4201 * size used to resize the crypt device.
4202 */
4203int get_crypt_table_offset(dev_t crypt_devt, uint32_t *offset_bytes)
4204{
73967233 4205 struct dm_task *dmt;
264827cb
DT
4206 uint64_t start, length;
4207 char *target_type = NULL;
4208 void *next = NULL;
4209 char *params = NULL;
4210 char offset_str[32] = { 0 };
4211 int copy_offset = 0;
4212 int spaces = 0;
5ce236a6 4213 unsigned i, i_off = 0;
264827cb 4214
73967233
ZK
4215 if (!(dmt = _setup_task_run(DM_DEVICE_TABLE, NULL, NULL, NULL, NULL,
4216 MAJOR(crypt_devt), MINOR(crypt_devt), 0, 0, 0)))
264827cb 4217 return_0;
264827cb
DT
4218
4219 next = dm_get_next_target(dmt, next, &start, &length, &target_type, &params);
4220
4221 if (!target_type || !params || strcmp(target_type, "crypt")) {
4222 dm_task_destroy(dmt);
4223 return_0;
4224 }
4225
4226 /*
4227 * get offset from params string:
4228 * <cipher> <key> <iv_offset> <device> <offset> [<#opt_params> <opt_params>]
4229 * <offset> is reported in 512 byte sectors.
4230 */
ddf3b6e5 4231 for (i = 0; params[i]; i++) {
264827cb
DT
4232 if (params[i] == ' ') {
4233 spaces++;
4234 if (spaces == 4)
4235 copy_offset = 1;
4236 if (spaces == 5)
4237 break;
4238 continue;
4239 }
4240 if (!copy_offset)
4241 continue;
4242
4243 offset_str[i_off++] = params[i];
4244
4245 if (i_off == sizeof(offset_str)) {
4246 offset_str[0] = '\0';
4247 break;
4248 }
4249 }
4250 dm_task_destroy(dmt);
4251
4252 if (!offset_str[0])
4253 return_0;
4254
4255 *offset_bytes = ((uint32_t)strtoul(offset_str, NULL, 0) * 512);
4256 return 1;
4257}
This page took 0.991764 seconds and 6 git commands to generate.