]> sourceware.org Git - lvm2.git/blame - lib/activate/dev_manager.c
thin: fix recent commits
[lvm2.git] / lib / activate / dev_manager.c
CommitLineData
ca73e23f 1/*
67cdbd7e 2 * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
a18dcfb5 3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
ca73e23f 4 *
6606c3ae
AK
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
be684599 9 * of the GNU Lesser General Public License v.2.1.
6606c3ae 10 *
be684599 11 * You should have received a copy of the GNU Lesser General Public License
6606c3ae
AK
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
ca73e23f
JT
14 */
15
d1d9800e 16#include "lib.h"
aa378998 17#include "str_list.h"
ca73e23f 18#include "dev_manager.h"
c15334eb 19#include "lvm-string.h"
de6c9183 20#include "fs.h"
a9953411 21#include "defaults.h"
c4ddb31a 22#include "segtype.h"
4922197a 23#include "display.h"
c01f8542 24#include "toolcontext.h"
4922197a
AK
25#include "targets.h"
26#include "config.h"
352a99b9 27#include "filter.h"
f247a4e7 28#include "activate.h"
6c7a6c07 29#include "lvm-exec.h"
ca73e23f 30
349f09e4 31#include <limits.h>
11d2da40 32#include <dirent.h>
ca73e23f 33
9a90f1ab
AK
34#define MAX_TARGET_PARAMSIZE 50000
35
a69de491 36typedef enum {
5f4b2acf 37 PRELOAD,
a69de491 38 ACTIVATE,
7bb6856a 39 DEACTIVATE,
a69de491 40 SUSPEND,
9cd3426d 41 SUSPEND_WITH_LOCKFS,
5f4b2acf 42 CLEAN
7bb6856a 43} action_t;
a69de491 44
ca73e23f 45struct dev_manager {
2262b320 46 struct dm_pool *mem;
ca73e23f 47
4922197a
AK
48 struct cmd_context *cmd;
49
4922197a 50 void *target_state;
b9e67d4f 51 uint32_t pvmove_mirror_count;
eb91c4ee 52 int flush_required;
df390f17 53 unsigned track_pvmove_deps;
a9953411 54
ca73e23f 55 char *vg_name;
ca73e23f
JT
56};
57
5f4b2acf
AK
58struct lv_layer {
59 struct logical_volume *lv;
60 const char *old_name;
61};
0fe3a2c5 62
64e353da 63static const char _thin_layer[] = "tpool";
0d59090e 64
a18dcfb5 65int read_only_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
5f4b2acf 66{
a18dcfb5 67 return (laopts->read_only || !(lv->vg->status & LVM_WRITE) || !(lv->status & LVM_WRITE));
5f4b2acf
AK
68}
69
fc28b60f
JT
70/*
71 * Low level device-layer operations.
72 */
b9e67d4f 73static struct dm_task *_setup_task(const char *name, const char *uuid,
0c8bdaf3
MB
74 uint32_t *event_nr, int task,
75 uint32_t major, uint32_t minor)
fc28b60f
JT
76{
77 struct dm_task *dmt;
78
c51b9fff
AK
79 if (!(dmt = dm_task_create(task)))
80 return_NULL;
fc28b60f 81
006e5fa0
ZK
82 if (name && !dm_task_set_name(dmt, name))
83 goto_out;
e04c5198 84
006e5fa0
ZK
85 if (uuid && *uuid && !dm_task_set_uuid(dmt, uuid))
86 goto_out;
e04c5198 87
006e5fa0
ZK
88 if (event_nr && !dm_task_set_event_nr(dmt, *event_nr))
89 goto_out;
10b29b8d 90
006e5fa0
ZK
91 if (major && !dm_task_set_major_minor(dmt, major, minor, 1))
92 goto_out;
0c8bdaf3 93
2243718f
AK
94 if (activation_checks() && !dm_task_enable_checks(dmt))
95 goto_out;
96
fc28b60f 97 return dmt;
006e5fa0
ZK
98 out:
99 dm_task_destroy(dmt);
100 return NULL;
fc28b60f
JT
101}
102
03b49fe1 103static int _info_run(const char *name, const char *dlid, struct dm_info *info,
a6b22cf3 104 uint32_t *read_ahead, int mknodes, int with_open_count,
0c8bdaf3 105 int with_read_ahead, uint32_t major, uint32_t minor)
14a9cda6
AK
106{
107 int r = 0;
108 struct dm_task *dmt;
8c0388e4 109 int dmtask;
14a9cda6 110
8c0388e4
AK
111 dmtask = mknodes ? DM_DEVICE_MKNODES : DM_DEVICE_INFO;
112
ab9663f3 113 if (!(dmt = _setup_task(mknodes ? name : NULL, dlid, 0, dmtask, major, minor)))
c51b9fff 114 return_0;
14a9cda6 115
e9c761b8
AK
116 if (!with_open_count)
117 if (!dm_task_no_open_count(dmt))
118 log_error("Failed to disable open_count");
119
5f4b2acf
AK
120 if (!dm_task_run(dmt))
121 goto_out;
14a9cda6 122
5f4b2acf
AK
123 if (!dm_task_get_info(dmt, info))
124 goto_out;
2ed2a724 125
cfebc626 126 if (with_read_ahead && info->exists) {
fd0af4bd
AK
127 if (!dm_task_get_read_ahead(dmt, read_ahead))
128 goto_out;
129 } else if (read_ahead)
69506f1d 130 *read_ahead = DM_READ_AHEAD_NONE;
a6b22cf3 131
14a9cda6
AK
132 r = 1;
133
134 out:
135 dm_task_destroy(dmt);
136 return r;
137}
fc28b60f 138
28e2b5b2 139int device_is_usable(struct device *dev)
f247a4e7
AK
140{
141 struct dm_task *dmt;
142 struct dm_info info;
28e2b5b2 143 const char *name, *uuid;
67cdbd7e
AK
144 uint64_t start, length;
145 char *target_type = NULL;
b449f0a1 146 char *params, *vgname = NULL, *lvname, *layer;
41aec14e 147 void *next = NULL;
727d065f 148 int only_error_target = 1;
f247a4e7
AK
149 int r = 0;
150
52a5cd31
MB
151 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
152 return_0;
f247a4e7 153
28e2b5b2 154 if (!dm_task_set_major_minor(dmt, MAJOR(dev->dev), MINOR(dev->dev), 1))
f247a4e7
AK
155 goto_out;
156
2243718f
AK
157 if (activation_checks() && !dm_task_enable_checks(dmt))
158 goto_out;
159
f247a4e7
AK
160 if (!dm_task_run(dmt)) {
161 log_error("Failed to get state of mapped device");
162 goto out;
163 }
164
165 if (!dm_task_get_info(dmt, &info))
166 goto_out;
167
f3ad0dcf 168 if (!info.exists)
f247a4e7
AK
169 goto out;
170
41aec14e 171 name = dm_task_get_name(dmt);
28e2b5b2 172 uuid = dm_task_get_uuid(dmt);
41aec14e 173
727d065f
AK
174 if (!info.target_count) {
175 log_debug("%s: Empty device %s not usable.", dev_name(dev), name);
176 goto out;
177 }
178
f3ad0dcf
PR
179 if (info.suspended && ignore_suspended_devices()) {
180 log_debug("%s: Suspended device %s not usable.", dev_name(dev), name);
181 goto out;
182 }
183
f247a4e7 184 /* FIXME Also check for mirror block_on_error and mpath no paths */
41aec14e
AK
185 /* For now, we exclude all mirrors */
186
67cdbd7e
AK
187 do {
188 next = dm_get_next_target(dmt, next, &start, &length,
189 &target_type, &params);
190 /* Skip if target type doesn't match */
f3ad0dcf
PR
191 if (target_type && !strcmp(target_type, "mirror") && ignore_suspended_devices()) {
192 log_debug("%s: Mirror device %s not usable.", dev_name(dev), name);
41aec14e 193 goto out;
28e2b5b2 194 }
a71d6051
JEB
195
196 /*
197 * Snapshot origin could be sitting on top of a mirror which
198 * could be blocking I/O. Skip snapshot origins entirely for
199 * now.
200 *
201 * FIXME: rather than skipping origin, check if mirror is
202 * underneath and if the mirror is blocking I/O.
203 */
204 if (target_type && !strcmp(target_type, "snapshot-origin") &&
205 ignore_suspended_devices()) {
206 log_debug("%s: Snapshot-origin device %s not usable.",
207 dev_name(dev), name);
208 goto out;
209 }
06808d33 210
727d065f
AK
211 if (target_type && strcmp(target_type, "error"))
212 only_error_target = 0;
67cdbd7e 213 } while (next);
f247a4e7 214
727d065f
AK
215 /* Skip devices consisting entirely of error targets. */
216 /* FIXME Deal with device stacked above error targets? */
217 if (only_error_target) {
218 log_debug("%s: Error device %s not usable.",
219 dev_name(dev), name);
220 goto out;
221 }
222
f247a4e7
AK
223 /* FIXME Also check dependencies? */
224
28e2b5b2 225 /* Check internal lvm devices */
b449f0a1
MB
226 if (uuid && !strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1)) {
227 if (!(vgname = dm_strdup(name)) ||
228 !dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &layer))
229 goto_out;
230
ea9e387f 231 if (lvname && (is_reserved_lvname(lvname) || *layer)) {
b449f0a1 232 log_debug("%s: Reserved internal LV device %s/%s%s%s not usable.",
ea9e387f 233 dev_name(dev), vgname, lvname, *layer ? "-" : "", layer);
b449f0a1
MB
234 goto out;
235 }
28e2b5b2
MB
236 }
237
f247a4e7
AK
238 r = 1;
239
240 out:
b449f0a1 241 dm_free(vgname);
f247a4e7
AK
242 dm_task_destroy(dmt);
243 return r;
244}
245
ab9663f3 246static int _info(const char *dlid, int with_open_count, int with_read_ahead,
a6b22cf3 247 struct dm_info *info, uint32_t *read_ahead)
e04c5198 248{
b1ef9cd0
MB
249 int r = 0;
250
ab9663f3
MB
251 if ((r = _info_run(NULL, dlid, info, read_ahead, 0, with_open_count,
252 with_read_ahead, 0, 0)) && info->exists)
253 return 1;
254 else if ((r = _info_run(NULL, dlid + sizeof(UUID_PREFIX) - 1, info,
255 read_ahead, 0, with_open_count,
256 with_read_ahead, 0, 0)) && info->exists)
257 return 1;
e04c5198 258
b1ef9cd0 259 return r;
e04c5198
AK
260}
261
0c8bdaf3
MB
262static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info)
263{
264 return _info_run(NULL, NULL, info, NULL, 0, 0, 0, major, minor);
265}
266
ab9663f3 267int dev_manager_info(struct dm_pool *mem, const struct logical_volume *lv,
2d6fcbf6 268 const char *layer,
a6b22cf3
AK
269 int with_open_count, int with_read_ahead,
270 struct dm_info *info, uint32_t *read_ahead)
f894b4b1 271{
3eadbbeb 272 char *dlid, *name;
ab9663f3
MB
273 int r;
274
e59e2f7c 275 if (!(name = dm_build_dm_name(mem, lv->vg->name, lv->name, layer))) {
ab9663f3
MB
276 log_error("name build failed for %s", lv->name);
277 return 0;
278 }
03b49fe1 279
2d6fcbf6
AK
280 if (!(dlid = build_dm_uuid(mem, lv->lvid.s, layer))) {
281 log_error("dlid build failed for %s", name);
03b49fe1
AK
282 return 0;
283 }
284
ab9663f3
MB
285 log_debug("Getting device info for %s [%s]", name, dlid);
286 r = _info(dlid, with_open_count, with_read_ahead, info, read_ahead);
287
3eadbbeb 288 dm_pool_free(mem, name);
ab9663f3 289 return r;
f894b4b1
AK
290}
291
537f7456
MS
292static const struct dm_info *_cached_info(struct dm_pool *mem,
293 const struct logical_volume *lv,
294 struct dm_tree *dtree)
295{
296 const char *dlid;
297 struct dm_tree_node *dnode;
298 const struct dm_info *dinfo;
299
bda39820 300 if (!(dlid = build_dm_uuid(mem, lv->lvid.s, NULL))) {
537f7456
MS
301 log_error("dlid build failed for %s", lv->name);
302 return NULL;
303 }
304
d6bf26af
MS
305 /* An activating merging origin won't have a node in the tree yet */
306 if (!(dnode = dm_tree_find_node_by_uuid(dtree, dlid)))
537f7456 307 return NULL;
537f7456
MS
308
309 if (!(dinfo = dm_tree_node_get_info(dnode))) {
310 log_error("failed to get info from tree node for %s", lv->name);
311 return NULL;
312 }
313
314 if (!dinfo->exists)
315 return NULL;
316
317 return dinfo;
318}
319
bd43da4f 320#if 0
c826c0d1 321/* FIXME Interface must cope with multiple targets */
39ed033a
AL
322static int _status_run(const char *name, const char *uuid,
323 unsigned long long *s, unsigned long long *l,
324 char **t, uint32_t t_size, char **p, uint32_t p_size)
1951dba9
AL
325{
326 int r = 0;
327 struct dm_task *dmt;
633f889c 328 struct dm_info info;
1951dba9 329 void *next = NULL;
aefa3c6d 330 uint64_t start, length;
1951dba9 331 char *type = NULL;
39ed033a 332 char *params = NULL;
1951dba9 333
0c8bdaf3 334 if (!(dmt = _setup_task(name, uuid, 0, DM_DEVICE_STATUS, 0, 0)))
c51b9fff 335 return_0;
1951dba9 336
e9c761b8
AK
337 if (!dm_task_no_open_count(dmt))
338 log_error("Failed to disable open_count");
339
5f4b2acf
AK
340 if (!dm_task_run(dmt))
341 goto_out;
1951dba9 342
5f4b2acf
AK
343 if (!dm_task_get_info(dmt, &info) || !info.exists)
344 goto_out;
633f889c 345
1951dba9
AL
346 do {
347 next = dm_get_next_target(dmt, next, &start, &length,
348 &type, &params);
c826c0d1 349 if (type) {
39ed033a
AL
350 *s = start;
351 *l = length;
352 /* Make sure things are null terminated */
353 strncpy(*t, type, t_size);
c826c0d1 354 (*t)[t_size - 1] = '\0';
39ed033a 355 strncpy(*p, params, p_size);
c826c0d1 356 (*p)[p_size - 1] = '\0';
39ed033a
AL
357
358 r = 1;
c826c0d1 359 /* FIXME Cope with multiple targets! */
39ed033a 360 break;
1951dba9 361 }
1951dba9 362
c826c0d1 363 } while (next);
1951dba9
AL
364
365 out:
366 dm_task_destroy(dmt);
367 return r;
368}
369
a9953411
AK
370static int _status(const char *name, const char *uuid,
371 unsigned long long *start, unsigned long long *length,
372 char **type, uint32_t type_size, char **params,
373 uint32_t param_size) __attribute__ ((unused));
374
39ed033a
AL
375static int _status(const char *name, const char *uuid,
376 unsigned long long *start, unsigned long long *length,
377 char **type, uint32_t type_size, char **params,
378 uint32_t param_size)
1951dba9 379{
878467cd
AK
380 if (uuid && *uuid) {
381 if (_status_run(NULL, uuid, start, length, type,
382 type_size, params, param_size) &&
383 *params)
384 return 1;
633f889c 385 else if (_status_run(NULL, uuid + sizeof(UUID_PREFIX) - 1, start,
878467cd
AK
386 length, type, type_size, params,
387 param_size) &&
388 *params)
389 return 1;
390 }
1951dba9 391
39ed033a 392 if (name && _status_run(name, NULL, start, length, type, type_size,
c826c0d1 393 params, param_size))
1951dba9 394 return 1;
c826c0d1 395
1951dba9
AL
396 return 0;
397}
bd43da4f 398#endif
1951dba9 399
1f661c5d
MS
400int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
401 const char *layer, const char *target_type)
c582e3c0
MS
402{
403 int r = 0;
404 char *dlid;
405 struct dm_task *dmt;
406 struct dm_info info;
407 void *next = NULL;
408 uint64_t start, length;
409 char *type = NULL;
410 char *params = NULL;
411
1f661c5d 412 if (!(dlid = build_dm_uuid(mem, lv->lvid.s, layer)))
c582e3c0
MS
413 return_0;
414
415 if (!(dmt = _setup_task(NULL, dlid, 0,
416 DM_DEVICE_STATUS, 0, 0)))
647c8edf 417 goto_bad;
c582e3c0
MS
418
419 if (!dm_task_no_open_count(dmt))
420 log_error("Failed to disable open_count");
421
422 if (!dm_task_run(dmt))
423 goto_out;
424
425 if (!dm_task_get_info(dmt, &info) || !info.exists)
426 goto_out;
427
428 do {
429 next = dm_get_next_target(dmt, next, &start, &length,
430 &type, &params);
431 if (type && strncmp(type, target_type,
432 strlen(target_type)) == 0) {
5cb0d45d 433 if (info.live_table)
c582e3c0
MS
434 r = 1;
435 break;
436 }
437 } while (next);
438
647c8edf 439out:
c582e3c0 440 dm_task_destroy(dmt);
647c8edf
ZK
441bad:
442 dm_pool_free(mem, dlid);
443
c582e3c0
MS
444 return r;
445}
446
8dd6036d
AK
447int add_linear_area_to_dtree(struct dm_tree_node *node, uint64_t size, uint32_t extent_size, int use_linear_target, const char *vgname, const char *lvname)
448{
449 uint32_t page_size;
450
451 /*
452 * Use striped or linear target?
453 */
454 if (!use_linear_target) {
455 page_size = lvm_getpagesize() >> SECTOR_SHIFT;
456
457 /*
458 * We'll use the extent size as the stripe size.
459 * Extent size and page size are always powers of 2.
460 * The striped target requires that the stripe size is
461 * divisible by the page size.
462 */
463 if (extent_size >= page_size) {
464 /* Use striped target */
465 if (!dm_tree_node_add_striped_target(node, size, extent_size))
466 return_0;
467 return 1;
468 } else
469 /* Some exotic cases are unsupported by striped. */
470 log_warn("WARNING: Using linear target for %s/%s: Striped requires extent size (%" PRIu32 " sectors) >= page size (%" PRIu32 ").",
471 vgname, lvname, extent_size, page_size);
472 }
473
474 /*
475 * Use linear target.
476 */
477 if (!dm_tree_node_add_linear_target(node, size))
478 return_0;
479
480 return 1;
481}
482
8191fe4f
PR
483static percent_range_t _combine_percent(percent_t a, percent_t b,
484 uint32_t numerator, uint32_t denominator)
78ad1549 485{
23e34c72
MS
486 if (a == PERCENT_MERGE_FAILED || b == PERCENT_MERGE_FAILED)
487 return PERCENT_MERGE_FAILED;
488
78ad1549
AK
489 if (a == PERCENT_INVALID || b == PERCENT_INVALID)
490 return PERCENT_INVALID;
491
492 if (a == PERCENT_100 && b == PERCENT_100)
493 return PERCENT_100;
494
495 if (a == PERCENT_0 && b == PERCENT_0)
496 return PERCENT_0;
497
fbf6b89a 498 return (percent_range_t) make_percent(numerator, denominator);
78ad1549
AK
499}
500
b9e67d4f 501static int _percent_run(struct dev_manager *dm, const char *name,
03b49fe1 502 const char *dlid,
b9e67d4f 503 const char *target_type, int wait,
8191fe4f 504 const struct logical_volume *lv, percent_t *overall_percent,
15816a3b 505 uint32_t *event_nr, int fail_if_percent_unsupported)
a9953411
AK
506{
507 int r = 0;
508 struct dm_task *dmt;
10b29b8d 509 struct dm_info info;
a9953411
AK
510 void *next = NULL;
511 uint64_t start, length;
512 char *type = NULL;
513 char *params = NULL;
d3b4a0f3 514 const struct dm_list *segh = lv ? &lv->segments : NULL;
b9e67d4f 515 struct lv_segment *seg = NULL;
4922197a 516 struct segment_type *segtype;
78ad1549 517 int first_time = 1;
8191fe4f 518 percent_t percent;
a9953411 519
a9953411
AK
520 uint64_t total_numerator = 0, total_denominator = 0;
521
8191fe4f 522 *overall_percent = PERCENT_INVALID;
a9953411 523
03b49fe1 524 if (!(dmt = _setup_task(name, dlid, event_nr,
0c8bdaf3 525 wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, 0, 0)))
c51b9fff 526 return_0;
a9953411 527
e9c761b8
AK
528 if (!dm_task_no_open_count(dmt))
529 log_error("Failed to disable open_count");
530
5f4b2acf
AK
531 if (!dm_task_run(dmt))
532 goto_out;
a9953411 533
5f4b2acf
AK
534 if (!dm_task_get_info(dmt, &info) || !info.exists)
535 goto_out;
10b29b8d
AK
536
537 if (event_nr)
538 *event_nr = info.event_nr;
539
a9953411
AK
540 do {
541 next = dm_get_next_target(dmt, next, &start, &length, &type,
542 &params);
b9e67d4f 543 if (lv) {
2c44337b 544 if (!(segh = dm_list_next(&lv->segments, segh))) {
b9e67d4f
AK
545 log_error("Number of segments in active LV %s "
546 "does not match metadata", lv->name);
547 goto out;
548 }
2c44337b 549 seg = dm_list_item(segh, struct lv_segment);
b9e67d4f 550 }
a9953411 551
e47a591d 552 if (!type || !params)
a9953411
AK
553 continue;
554
c21b944a 555 if (!(segtype = get_segtype_from_string(dm->cmd, target_type)))
10b29b8d 556 continue;
10b29b8d 557
e47a591d
MS
558 if (strcmp(type, target_type)) {
559 /* If kernel's type isn't an exact match is it compatible? */
560 if (!segtype->ops->target_status_compatible ||
561 !segtype->ops->target_status_compatible(type))
562 continue;
563 }
564
e8bed35d
AK
565 if (!segtype->ops->target_percent)
566 continue;
567
568 if (!segtype->ops->target_percent(&dm->target_state,
8191fe4f 569 &percent, dm->mem,
aba30ebc 570 dm->cmd, seg, params,
4922197a 571 &total_numerator,
82185ada 572 &total_denominator))
5f4b2acf 573 goto_out;
4922197a 574
78ad1549 575 if (first_time) {
8191fe4f 576 *overall_percent = percent;
78ad1549
AK
577 first_time = 0;
578 } else
8191fe4f
PR
579 *overall_percent =
580 _combine_percent(*overall_percent, percent,
581 total_numerator, total_denominator);
a9953411
AK
582 } while (next);
583
f2554b9d 584 if (lv && dm_list_next(&lv->segments, segh)) {
b9e67d4f
AK
585 log_error("Number of segments in active LV %s does not "
586 "match metadata", lv->name);
587 goto out;
588 }
589
8191fe4f
PR
590 if (first_time) {
591 /* above ->target_percent() was not executed! */
592 /* FIXME why return PERCENT_100 et. al. in this case? */
593 *overall_percent = PERCENT_100;
594 if (fail_if_percent_unsupported)
595 goto_out;
78ad1549 596 }
a9953411 597
8191fe4f 598 log_debug("LV percent: %f", percent_to_float(*overall_percent));
a9953411
AK
599 r = 1;
600
601 out:
602 dm_task_destroy(dmt);
603 return r;
604}
605
03b49fe1 606static int _percent(struct dev_manager *dm, const char *name, const char *dlid,
b9e67d4f 607 const char *target_type, int wait,
8191fe4f
PR
608 const struct logical_volume *lv, percent_t *percent,
609 uint32_t *event_nr, int fail_if_percent_unsupported)
a9953411 610{
878467cd
AK
611 if (dlid && *dlid) {
612 if (_percent_run(dm, NULL, dlid, target_type, wait, lv, percent,
8191fe4f 613 event_nr, fail_if_percent_unsupported))
878467cd 614 return 1;
633f889c 615 else if (_percent_run(dm, NULL, dlid + sizeof(UUID_PREFIX) - 1,
878467cd 616 target_type, wait, lv, percent,
8191fe4f 617 event_nr, fail_if_percent_unsupported))
878467cd
AK
618 return 1;
619 }
a9953411 620
b9e67d4f 621 if (name && _percent_run(dm, name, NULL, target_type, wait, lv, percent,
8191fe4f 622 event_nr, fail_if_percent_unsupported))
a9953411
AK
623 return 1;
624
625 return 0;
626}
627
85ed4030 628/* FIXME Merge with the percent function */
d345bf2c
PR
629int dev_manager_transient(struct dev_manager *dm, struct logical_volume *lv)
630{
631 int r = 0;
632 struct dm_task *dmt;
633 struct dm_info info;
634 void *next = NULL;
635 uint64_t start, length;
636 char *type = NULL;
637 char *params = NULL;
638 char *dlid = NULL;
2d6fcbf6 639 const char *layer = lv_is_origin(lv) ? "real" : NULL;
d345bf2c
PR
640 const struct dm_list *segh = &lv->segments;
641 struct lv_segment *seg = NULL;
642
2d6fcbf6 643 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
d345bf2c
PR
644 return_0;
645
646 if (!(dmt = _setup_task(0, dlid, NULL, DM_DEVICE_STATUS, 0, 0)))
647 return_0;
648
649 if (!dm_task_no_open_count(dmt))
650 log_error("Failed to disable open_count");
651
652 if (!dm_task_run(dmt))
653 goto_out;
654
655 if (!dm_task_get_info(dmt, &info) || !info.exists)
656 goto_out;
657
658 do {
659 next = dm_get_next_target(dmt, next, &start, &length, &type,
660 &params);
81e606ab
ZK
661
662 if (!(segh = dm_list_next(&lv->segments, segh))) {
663 log_error("Number of segments in active LV %s "
664 "does not match metadata", lv->name);
665 goto out;
d345bf2c 666 }
81e606ab 667 seg = dm_list_item(segh, struct lv_segment);
d345bf2c
PR
668
669 if (!type || !params)
670 continue;
671
672 if (seg->segtype->ops->check_transient_status &&
673 !seg->segtype->ops->check_transient_status(seg, params))
674 goto_out;
675
676 } while (next);
677
f2554b9d 678 if (dm_list_next(&lv->segments, segh)) {
d345bf2c
PR
679 log_error("Number of segments in active LV %s does not "
680 "match metadata", lv->name);
681 goto out;
682 }
683
684 r = 1;
685
686 out:
687 dm_task_destroy(dmt);
688 return r;
689}
690
5f4b2acf
AK
691/*
692 * dev_manager implementation.
693 */
694struct dev_manager *dev_manager_create(struct cmd_context *cmd,
df390f17
AK
695 const char *vg_name,
696 unsigned track_pvmove_deps)
2ed2a724 697{
5f4b2acf
AK
698 struct dm_pool *mem;
699 struct dev_manager *dm;
2ed2a724 700
c51b9fff
AK
701 if (!(mem = dm_pool_create("dev_manager", 16 * 1024)))
702 return_NULL;
2ed2a724 703
61064609 704 if (!(dm = dm_pool_zalloc(mem, sizeof(*dm))))
4f2f566b 705 goto_bad;
2ed2a724 706
5f4b2acf
AK
707 dm->cmd = cmd;
708 dm->mem = mem;
e9c761b8 709
4f2f566b
AK
710 if (!(dm->vg_name = dm_pool_strdup(dm->mem, vg_name)))
711 goto_bad;
36902810 712
df390f17
AK
713 /*
714 * When we manipulate (normally suspend/resume) the PVMOVE
715 * device directly, there's no need to touch the LVs above.
716 */
717 dm->track_pvmove_deps = track_pvmove_deps;
718
5f4b2acf 719 dm->target_state = NULL;
e9c761b8 720
cf8235e0
AK
721 dm_udev_set_sync_support(cmd->current_settings.udev_sync);
722
5f4b2acf 723 return dm;
36902810 724
5f4b2acf
AK
725 bad:
726 dm_pool_destroy(mem);
727 return NULL;
36902810
AK
728}
729
5f4b2acf 730void dev_manager_destroy(struct dev_manager *dm)
36902810 731{
5f4b2acf
AK
732 dm_pool_destroy(dm->mem);
733}
36902810 734
2293567c
AK
735void dev_manager_release(void)
736{
737 dm_lib_release();
738}
739
5f4b2acf
AK
740void dev_manager_exit(void)
741{
742 dm_lib_exit();
36902810
AK
743}
744
5f4b2acf 745int dev_manager_snapshot_percent(struct dev_manager *dm,
472ac5bd 746 const struct logical_volume *lv,
8191fe4f 747 percent_t *percent)
36902810 748{
861c624a 749 const struct logical_volume *snap_lv;
5f4b2acf
AK
750 char *name;
751 const char *dlid;
15816a3b
MS
752 int fail_if_percent_unsupported = 0;
753
754 if (lv_is_merging_origin(lv)) {
755 /*
756 * Set 'fail_if_percent_unsupported', otherwise passing
757 * unsupported LV types to _percent will lead to a default
758 * successful return with percent_range as PERCENT_100.
759 * - For a merging origin, this will result in a polldaemon
760 * that runs infinitely (because completion is PERCENT_0)
761 * - We unfortunately don't yet _know_ if a snapshot-merge
762 * target is active (activation is deferred if dev is open);
763 * so we can't short-circuit origin devices based purely on
764 * existing LVM LV attributes.
765 */
766 fail_if_percent_unsupported = 1;
767 }
5f4b2acf 768
861c624a
MS
769 if (lv_is_merging_cow(lv)) {
770 /* must check percent of origin for a merging snapshot */
771 snap_lv = origin_from_cow(lv);
772 } else
773 snap_lv = lv;
774
5f4b2acf
AK
775 /*
776 * Build a name for the top layer.
777 */
861c624a 778 if (!(name = dm_build_dm_name(dm->mem, snap_lv->vg->name, snap_lv->name, NULL)))
5f4b2acf
AK
779 return_0;
780
861c624a 781 if (!(dlid = build_dm_uuid(dm->mem, snap_lv->lvid.s, NULL)))
5f4b2acf 782 return_0;
36902810 783
5f4b2acf
AK
784 /*
785 * Try and get some info on this device.
786 */
787 log_debug("Getting device status percentage for %s", name);
15816a3b 788 if (!(_percent(dm, name, dlid, "snapshot", 0, NULL, percent,
8191fe4f 789 NULL, fail_if_percent_unsupported)))
c51b9fff 790 return_0;
36902810 791
5f4b2acf 792 /* If the snapshot isn't available, percent will be -1 */
36902810
AK
793 return 1;
794}
795
5f4b2acf
AK
796/* FIXME Merge with snapshot_percent, auto-detecting target type */
797/* FIXME Cope with more than one target */
798int dev_manager_mirror_percent(struct dev_manager *dm,
6c1c02dc 799 const struct logical_volume *lv, int wait,
8191fe4f 800 percent_t *percent, uint32_t *event_nr)
fc28b60f 801{
5f4b2acf
AK
802 char *name;
803 const char *dlid;
cac52ca4 804 const char *target_type = first_seg(lv)->segtype->name;
2d6fcbf6 805 const char *layer = (lv_is_origin(lv)) ? "real" : NULL;
fc28b60f 806
4bb3eccf 807 /*
5f4b2acf 808 * Build a name for the top layer.
4bb3eccf 809 */
e59e2f7c 810 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
5f4b2acf 811 return_0;
45267479 812
2d6fcbf6 813 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer))) {
5f4b2acf
AK
814 log_error("dlid build failed for %s", lv->name);
815 return 0;
914c9723 816 }
14a9cda6 817
cac52ca4
JEB
818 log_debug("Getting device %s status percentage for %s",
819 target_type, name);
820 if (!(_percent(dm, name, dlid, target_type, wait, lv, percent,
8191fe4f 821 event_nr, 0)))
c51b9fff 822 return_0;
14a9cda6 823
5f4b2acf
AK
824 return 1;
825}
914c9723 826
5f4b2acf
AK
827#if 0
828 log_very_verbose("%s %s", sus ? "Suspending" : "Resuming", name);
36902810 829
5f4b2acf
AK
830 log_verbose("Loading %s", dl->name);
831 log_very_verbose("Activating %s read-only", dl->name);
914c9723
AK
832 log_very_verbose("Activated %s %s %03u:%03u", dl->name,
833 dl->dlid, dl->info.major, dl->info.minor);
834
a69de491 835 if (_get_flag(dl, VISIBLE))
a76ba817 836 log_verbose("Removing %s", dl->name);
a69de491 837 else
a76ba817 838 log_very_verbose("Removing %s", dl->name);
a69de491 839
5f4b2acf
AK
840 log_debug("Adding target: %" PRIu64 " %" PRIu64 " %s %s",
841 extent_size * seg->le, extent_size * seg->len, target, params);
fc28b60f 842
5f4b2acf
AK
843 log_debug("Adding target: 0 %" PRIu64 " snapshot-origin %s",
844 dl->lv->size, params);
845 log_debug("Adding target: 0 %" PRIu64 " snapshot %s", size, params);
846 log_debug("Getting device info for %s", dl->name);
e9c761b8 847
5f4b2acf 848 /* Rename? */
13835b5f 849 if ((suffix = strrchr(dl->dlid + sizeof(UUID_PREFIX) - 1, '-')))
5f4b2acf 850 suffix++;
e59e2f7c 851 new_name = dm_build_dm_name(dm->mem, dm->vg_name, dl->lv->name,
5f4b2acf 852 suffix);
a69de491 853
5f4b2acf
AK
854static int _belong_to_vg(const char *vgname, const char *name)
855{
856 const char *v = vgname, *n = name;
fc28b60f 857
5f4b2acf
AK
858 while (*v) {
859 if ((*v != *n) || (*v == '-' && *(++n) != '-'))
860 return 0;
861 v++, n++;
69e8a53e 862 }
a76ba817 863
5f4b2acf
AK
864 if (*n == '-' && *(n + 1) != '-')
865 return 1;
866 else
867 return 0;
868}
69e8a53e 869
072893aa 870 if (!(snap_seg = find_cow(lv)))
69e8a53e
AK
871 return 1;
872
072893aa 873 old_origin = snap_seg->origin;
69e8a53e
AK
874
875 /* Was this the last active snapshot with this origin? */
2c44337b 876 dm_list_iterate_items(lvl, active_head) {
60f13f01 877 active = lvl->lv;
072893aa
AK
878 if ((snap_seg = find_cow(active)) &&
879 snap_seg->origin == old_origin) {
69e8a53e 880 return 1;
25b73380 881 }
a76ba817
AK
882 }
883
5f4b2acf
AK
884#endif
885
bdba904d
ZK
886int dev_manager_thin_pool_status(struct dev_manager *dm,
887 const struct logical_volume *lv,
888 struct dm_status_thin_pool **status)
889{
890 const char *dlid;
891 struct dm_task *dmt;
892 struct dm_info info;
893 uint64_t start, length;
894 char *type = NULL;
895 char *params = NULL;
896 int r = 0;
897
898 /* Build dlid for the thin pool layer */
899 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
900 return_0;
901
902 log_debug("Getting thin pool device status for %s.", lv->name);
903
904 if (!(dmt = _setup_task(NULL, dlid, 0, DM_DEVICE_STATUS, 0, 0)))
905 return_0;
906
907 if (!dm_task_no_open_count(dmt))
908 log_error("Failed to disable open_count.");
909
910 if (!dm_task_run(dmt))
911 goto_out;
912
913 if (!dm_task_get_info(dmt, &info) || !info.exists)
914 goto_out;
915
916 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
917
918 if (!dm_get_status_thin_pool(dm->mem, params, status))
919 goto_out;
920
921 r = 1;
922out:
923 dm_task_destroy(dmt);
924
925 return r;
926}
927
c0fcaacb
ZK
928int dev_manager_thin_pool_percent(struct dev_manager *dm,
929 const struct logical_volume *lv,
63368983 930 int metadata, percent_t *percent)
c0fcaacb
ZK
931{
932 char *name;
933 const char *dlid;
934
64e353da
ZK
935 /* Build a name for the top layer */
936 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name,
937 _thin_layer)))
c0fcaacb
ZK
938 return_0;
939
64e353da 940 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
c0fcaacb
ZK
941 return_0;
942
943 log_debug("Getting device status percentage for %s", name);
63368983
ZK
944 if (!(_percent(dm, name, dlid, "thin-pool", 0,
945 (metadata) ? lv : NULL, percent, NULL, 1)))
c0fcaacb
ZK
946 return_0;
947
948 return 1;
949}
950
76ee0899
ZK
951int dev_manager_thin_percent(struct dev_manager *dm,
952 const struct logical_volume *lv,
953 int mapped, percent_t *percent)
954{
955 char *name;
956 const char *dlid;
15fd61e4 957 const char *layer = lv_is_origin(lv) ? "real" : NULL;
76ee0899
ZK
958
959 /* Build a name for the top layer */
15fd61e4 960 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
76ee0899
ZK
961 return_0;
962
15fd61e4 963 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
76ee0899
ZK
964 return_0;
965
966 log_debug("Getting device status percentage for %s", name);
967 if (!(_percent(dm, name, dlid, "thin", 0,
968 (mapped) ? NULL : lv, percent, NULL, 1)))
969 return_0;
970
971 return 1;
972}
973
5f4b2acf
AK
974/*************************/
975/* NEW CODE STARTS HERE */
976/*************************/
977
ab9663f3 978static int _dev_manager_lv_mknodes(const struct logical_volume *lv)
5f4b2acf
AK
979{
980 char *name;
981
e59e2f7c 982 if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name,
5f4b2acf
AK
983 lv->name, NULL)))
984 return_0;
985
986 return fs_add_lv(lv, name);
a76ba817
AK
987}
988
ab9663f3 989static int _dev_manager_lv_rmnodes(const struct logical_volume *lv)
a9953411 990{
5f4b2acf
AK
991 return fs_del_lv(lv);
992}
993
ab9663f3
MB
994int dev_manager_mknodes(const struct logical_volume *lv)
995{
996 struct dm_info dminfo;
3eadbbeb 997 char *name;
ab9663f3
MB
998 int r = 0;
999
e59e2f7c 1000 if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name, lv->name, NULL)))
ab9663f3
MB
1001 return_0;
1002
1003 if ((r = _info_run(name, NULL, &dminfo, NULL, 1, 0, 0, 0, 0))) {
1004 if (dminfo.exists) {
1005 if (lv_is_visible(lv))
1006 r = _dev_manager_lv_mknodes(lv);
1007 } else
1008 r = _dev_manager_lv_rmnodes(lv);
1009 }
1010
3eadbbeb 1011 dm_pool_free(lv->vg->cmd->mem, name);
ab9663f3
MB
1012 return r;
1013}
1014
6ddb5ecd
PR
1015static uint16_t _get_udev_flags(struct dev_manager *dm, struct logical_volume *lv,
1016 const char *layer)
1017{
1018 uint16_t udev_flags = 0;
1019
418663b6
PR
1020 /*
1021 * Instruct also libdevmapper to disable udev
1022 * fallback in accordance to LVM2 settings.
1023 */
1024 if (!dm->cmd->current_settings.udev_fallback)
1025 udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
1026
6ddb5ecd
PR
1027 /*
1028 * Is this top-level and visible device?
1029 * If not, create just the /dev/mapper content.
1030 */
5cc2f9a2
ZK
1031 /* FIXME: add target's method for this */
1032 if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv))
6ddb5ecd
PR
1033 udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
1034 DM_UDEV_DISABLE_DISK_RULES_FLAG |
1035 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
1036 /*
1037 * There's no need for other udev rules to touch special LVs with
1038 * reserved names. We don't need to populate /dev/disk here either.
1039 * Even if they happen to be visible and top-level.
1040 */
1041 else if (is_reserved_lvname(lv->name))
1042 udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
1043 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
1044
1045 /*
1046 * Snapshots and origins could have the same rule applied that will
1047 * give symlinks exactly the same name (e.g. a name based on
1048 * filesystem UUID). We give preference to origins to make such
1049 * naming deterministic (e.g. symlinks in /dev/disk/by-uuid).
1050 */
1051 if (lv_is_cow(lv))
1052 udev_flags |= DM_UDEV_LOW_PRIORITY_FLAG;
1053
1054 /*
1055 * Finally, add flags to disable /dev/mapper and /dev/<vgname> content
1056 * to be created by udev if it is requested by user's configuration.
1057 * This is basically an explicit fallback to old node/symlink creation
1058 * without udev.
1059 */
1060 if (!dm->cmd->current_settings.udev_rules)
1061 udev_flags |= DM_UDEV_DISABLE_DM_RULES_FLAG |
1062 DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG;
1063
1064 return udev_flags;
1065}
1066
ad6b0ebb 1067static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
5f4b2acf
AK
1068 struct logical_volume *lv, const char *layer)
1069{
1070 char *dlid, *name;
0c8bdaf3 1071 struct dm_info info, info2;
a9953411 1072
e59e2f7c 1073 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
5f4b2acf 1074 return_0;
a9953411 1075
bda39820 1076 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
5f4b2acf
AK
1077 return_0;
1078
67cdbd7e 1079 log_debug("Getting device info for %s [%s]", name, dlid);
ab9663f3 1080 if (!_info(dlid, 1, 0, &info, NULL)) {
67cdbd7e
AK
1081 log_error("Failed to get info for %s [%s].", name, dlid);
1082 return 0;
1083 }
5f4b2acf 1084
0c8bdaf3
MB
1085 /*
1086 * For top level volumes verify that existing device match
1087 * requested major/minor and that major/minor pair is available for use
1088 */
1089 if (!layer && lv->major != -1 && lv->minor != -1) {
0f817d38
AK
1090 /*
1091 * FIXME compare info.major with lv->major if multiple major support
1092 */
1093 if (info.exists && (info.minor != lv->minor)) {
0c8bdaf3
MB
1094 log_error("Volume %s (%" PRIu32 ":%" PRIu32")"
1095 " differs from already active device "
1096 "(%" PRIu32 ":%" PRIu32")",
1097 lv->name, lv->major, lv->minor, info.major, info.minor);
1098 return 0;
1099 }
1100 if (!info.exists && _info_by_dev(lv->major, lv->minor, &info2) &&
1101 info2.exists) {
1102 log_error("The requested major:minor pair "
1103 "(%" PRIu32 ":%" PRIu32") is already used",
1104 lv->major, lv->minor);
1105 return 0;
1106 }
1107 }
1108
6ddb5ecd
PR
1109 if (info.exists && !dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
1110 _get_udev_flags(dm, lv, layer))) {
ad6b0ebb 1111 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree",
5f4b2acf
AK
1112 info.major, info.minor);
1113 return 0;
a9953411
AK
1114 }
1115
5f4b2acf
AK
1116 return 1;
1117}
1118
dc7d7776
ZK
1119/*
1120 * Add replicator devices
1121 *
1122 * Using _add_dev_to_dtree() directly instead of _add_lv_to_dtree()
1123 * to avoid extra checks with extensions.
1124 */
1125static int _add_partial_replicator_to_dtree(struct dev_manager *dm,
1126 struct dm_tree *dtree,
1127 struct logical_volume *lv)
1128{
1129 struct logical_volume *rlv = first_seg(lv)->replicator;
1130 struct replicator_device *rdev;
1131 struct replicator_site *rsite;
1132 struct dm_tree_node *rep_node, *rdev_node;
1133 const char *uuid;
1134
1135 if (!lv_is_active_replicator_dev(lv)) {
1136 if (!_add_dev_to_dtree(dm, dtree, lv->rdevice->lv,
1137 NULL))
1138 return_0;
1139 return 1;
1140 }
1141
1142 /* Add _rlog and replicator device */
1143 if (!_add_dev_to_dtree(dm, dtree, first_seg(rlv)->rlog_lv, NULL))
1144 return_0;
1145
1146 if (!_add_dev_to_dtree(dm, dtree, rlv, NULL))
1147 return_0;
1148
1149 if (!(uuid = build_dm_uuid(dm->mem, rlv->lvid.s, NULL)))
1150 return_0;
1151
1152 rep_node = dm_tree_find_node_by_uuid(dtree, uuid);
1153
1154 /* Add all related devices for replicator */
1155 dm_list_iterate_items(rsite, &rlv->rsites)
1156 dm_list_iterate_items(rdev, &rsite->rdevices) {
1157 if (rsite->state == REPLICATOR_STATE_ACTIVE) {
1158 /* Add _rimage LV */
1159 if (!_add_dev_to_dtree(dm, dtree, rdev->lv, NULL))
1160 return_0;
1161
1162 /* Add replicator-dev LV, except of the already added one */
1163 if ((lv != rdev->replicator_dev->lv) &&
1164 !_add_dev_to_dtree(dm, dtree,
1165 rdev->replicator_dev->lv, NULL))
1166 return_0;
1167
1168 /* If replicator exists - try connect existing heads */
1169 if (rep_node) {
1170 uuid = build_dm_uuid(dm->mem,
1171 rdev->replicator_dev->lv->lvid.s,
1172 NULL);
1173 if (!uuid)
1174 return_0;
1175
1176 rdev_node = dm_tree_find_node_by_uuid(dtree, uuid);
1177 if (rdev_node)
1178 dm_tree_node_set_presuspend_node(rdev_node,
1179 rep_node);
1180 }
1181 }
1182
1183 if (!rdev->rsite->vg_name)
1184 continue;
1185
1186 if (!_add_dev_to_dtree(dm, dtree, rdev->lv, NULL))
1187 return_0;
1188
1189 if (rdev->slog &&
1190 !_add_dev_to_dtree(dm, dtree, rdev->slog, NULL))
1191 return_0;
1192 }
1193
1194 return 1;
1195}
1196
6c7a6c07
ZK
1197struct thin_cb_data {
1198 const struct logical_volume *pool_lv;
1199 struct dev_manager *dm;
1200};
1201
1202static int _thin_pool_callback(struct dm_tree_node *node,
1203 dm_node_callback_t type, void *cb_data)
1204{
1205 int ret, status;
1206 const struct thin_cb_data *data = cb_data;
1207 const char *dmdir = dm_dir();
e8669311
ZK
1208 const struct dm_config_node *cn;
1209 const struct dm_config_value *cv;
6c7a6c07
ZK
1210 const char *thin_check =
1211 find_config_tree_str_allow_empty(data->pool_lv->vg->cmd,
1212 "global/thin_check_executable",
e8669311 1213 THIN_CHECK_CMD);
6c7a6c07 1214 const struct logical_volume *mlv = first_seg(data->pool_lv)->metadata_lv;
e8669311 1215 size_t len = strlen(dmdir) + 2 * (strlen(mlv->vg->name) + strlen(mlv->name)) + 3;
6c7a6c07 1216 char meta_path[len];
e8669311
ZK
1217 int args = 0;
1218 const char *argv[19]; /* Max supported 15 args */
1219 char *split, *dm_name;
6c7a6c07
ZK
1220
1221 if (!thin_check[0])
1222 return 1; /* Checking disabled */
1223
e8669311
ZK
1224 if (!(dm_name = dm_build_dm_name(data->dm->mem, mlv->vg->name,
1225 mlv->name, NULL)) ||
1226 (dm_snprintf(meta_path, len, "%s/%s", dmdir, dm_name) < 0)) {
6c7a6c07
ZK
1227 log_error("Failed to build thin metadata path.");
1228 return 0;
1229 }
1230
e8669311
ZK
1231 if ((cn = find_config_tree_node(mlv->vg->cmd, "global/thin_check_options"))) {
1232 for (cv = cn->v; cv && args < 16; cv = cv->next) {
1233 if (cv->type != DM_CFG_STRING) {
1234 log_error("Invalid string in config file: "
1235 "global/thin_check_options");
1236 return 0;
1237 }
1238 argv[++args] = cv->v.str;
1239 }
1240 } else {
1241 /* Use default options (no support for options with spaces) */
1242 if (!(split = dm_pool_strdup(data->dm->mem, DEFAULT_THIN_CHECK_OPTIONS))) {
1243 log_error("Failed to duplicate thin check string.");
1244 return 0;
1245 }
1246 args = dm_split_words(split, 16, 0, (char**) argv + 1);
6c7a6c07
ZK
1247 }
1248
975b5b42
ZK
1249 if (args == 16) {
1250 log_error("Too many options for thin check command.");
1251 return 0;
1252 }
e8669311
ZK
1253
1254 argv[0] = thin_check;
1255 argv[++args] = meta_path;
1256 argv[++args] = NULL;
6c7a6c07
ZK
1257
1258 if (!(ret = exec_cmd(data->pool_lv->vg->cmd, (const char * const *)argv,
1259 &status, 0))) {
975b5b42
ZK
1260 switch (type) {
1261 case DM_NODE_CALLBACK_PRELOADED:
1262 log_err_once("Check of thin pool %s/%s failed (status:%d). "
1263 "Manual repair required (thin_dump --repair %s)!",
1264 data->pool_lv->vg->name, data->pool_lv->name,
1265 status, meta_path);
1266 break;
1267 default:
1268 log_warn("WARNING: Integrity check of metadata for thin pool "
1269 "%s/%s failed.",
1270 data->pool_lv->vg->name, data->pool_lv->name);
1271 }
6c7a6c07
ZK
1272 /*
1273 * FIXME: What should we do here??
1274 *
1275 * Maybe mark the node, so it's not activating
1276 * as thin_pool but as error/linear and let the
1277 * dm tree resolve the issue.
1278 */
1279 }
1280
e8669311 1281 dm_pool_free(data->dm->mem, dm_name);
6c7a6c07
ZK
1282
1283 return ret;
1284}
1285
1286static int _thin_pool_register_callback(struct dev_manager *dm,
1287 struct dm_tree_node *node,
1288 const struct logical_volume *lv)
1289{
1290 struct thin_cb_data *data;
1291
1292 /* Skip metadata testing for unused pool. */
1293 if (!first_seg(lv)->transaction_id)
1294 return 1;
1295
1296 if (!(data = dm_pool_alloc(dm->mem, sizeof(*data)))) {
1297 log_error("Failed to allocated path for callback.");
1298 return 0;
1299 }
1300
1301 data->dm = dm;
1302 data->pool_lv = lv;
1303
1304 dm_tree_node_set_callback(node, _thin_pool_callback, data);
1305
1306 return 1;
1307}
1308
5f4b2acf
AK
1309/*
1310 * Add LV and any known dependencies
1311 */
ff58e019
JEB
1312static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
1313 struct logical_volume *lv, int origin_only)
5f4b2acf 1314{
ff58e019 1315 uint32_t s;
df390f17 1316 struct seg_list *sl;
ff58e019 1317 struct lv_segment *seg = first_seg(lv);
efc8ca10
ZK
1318 struct dm_tree_node *thin_node;
1319 const char *uuid;
df390f17 1320
efc8ca10
ZK
1321 if ((!origin_only || lv_is_thin_volume(lv)) &&
1322 !_add_dev_to_dtree(dm, dtree, lv, NULL))
5f4b2acf
AK
1323 return_0;
1324
1325 /* FIXME Can we avoid doing this every time? */
ad6b0ebb 1326 if (!_add_dev_to_dtree(dm, dtree, lv, "real"))
5f4b2acf
AK
1327 return_0;
1328
2d6fcbf6 1329 if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, "cow"))
5f4b2acf 1330 return_0;
a9953411 1331
ff58e019
JEB
1332 if ((lv->status & MIRRORED) && seg->log_lv &&
1333 !_add_dev_to_dtree(dm, dtree, seg->log_lv, NULL))
2f1489a6
AK
1334 return_0;
1335
ff58e019
JEB
1336 if (lv->status & RAID)
1337 for (s = 0; s < seg->area_count; s++)
d5617bcc
JEB
1338 if (!_add_dev_to_dtree(dm, dtree,
1339 seg_metalv(seg, s), NULL))
ff58e019
JEB
1340 return_0;
1341
df390f17
AK
1342 /* Add any LVs referencing a PVMOVE LV unless told not to. */
1343 if (dm->track_pvmove_deps && lv->status & PVMOVE)
1344 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
1345 if (!_add_lv_to_dtree(dm, dtree, sl->seg->lv, origin_only))
1346 return_0;
1347
dc7d7776
ZK
1348 /* Adding LV head of replicator adds all other related devs */
1349 if (lv_is_replicator_dev(lv) &&
1350 !_add_partial_replicator_to_dtree(dm, dtree, lv))
1351 return_0;
1352
efc8ca10 1353 if (lv_is_thin_volume(lv)) {
c3f0ed04
AK
1354#if 0
1355 /* FIXME Implement dm_tree_node_skip_children optimisation */
efc8ca10
ZK
1356 if (origin_only) {
1357 if (!(uuid = build_dm_uuid(dm->mem, lv->lvid.s, NULL)))
1358 return_0;
1359 if ((thin_node = dm_tree_find_node_by_uuid(dtree, uuid)))
c3f0ed04 1360 dm_tree_node_skip_children(thin_node, 1);
efc8ca10 1361 }
c3f0ed04 1362#endif
efc8ca10
ZK
1363 /* Add thin pool LV layer */
1364 lv = seg->pool_lv;
1365 seg = first_seg(lv);
1366 }
1367
87663d5f 1368 if (lv_is_thin_pool(lv)) {
efc8ca10 1369 if (!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
a0c4e85c
ZK
1370 return_0;
1371 /* FIXME code from _create_partial_dtree() should be moved here */
efc8ca10 1372 if (!_add_lv_to_dtree(dm, dtree, seg_lv(seg, 0), 0))
a0c4e85c 1373 return_0;
64e353da 1374 if (!_add_dev_to_dtree(dm, dtree, lv, _thin_layer))
a0c4e85c 1375 return_0;
6c7a6c07
ZK
1376 /* If the partial tree is used for deactivation, setup callback */
1377 if (!(uuid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
1378 return_0;
1379 if ((thin_node = dm_tree_find_node_by_uuid(dtree, uuid)) &&
1380 !_thin_pool_register_callback(dm, thin_node, lv))
1381 return_0;
efc8ca10 1382 }
87663d5f 1383
a9953411
AK
1384 return 1;
1385}
1386
81beded3 1387static struct dm_tree *_create_partial_dtree(struct dev_manager *dm, struct logical_volume *lv, int origin_only)
22456547 1388{
e88f56d9 1389 struct dm_tree *dtree;
ed236853 1390 struct dm_list *snh;
e178963c
AK
1391 struct lv_segment *seg;
1392 uint32_t s;
22456547 1393
e88f56d9 1394 if (!(dtree = dm_tree_create())) {
2abe28a8 1395 log_debug("Partial dtree creation failed for %s.", lv->name);
5f4b2acf
AK
1396 return NULL;
1397 }
22456547 1398
efc8ca10 1399 if (!_add_lv_to_dtree(dm, dtree, lv, (lv_is_origin(lv) || lv_is_thin_volume(lv)) ? origin_only : 0))
c51b9fff 1400 goto_bad;
22456547 1401
5f4b2acf 1402 /* Add any snapshots of this LV */
0f2a4ca2 1403 if (!origin_only && lv_is_origin(lv))
ed236853 1404 dm_list_iterate(snh, &lv->snapshot_segs)
2d6fcbf6
AK
1405 if (!_add_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow, 0))
1406 goto_bad;
22456547 1407
e178963c 1408 /* Add any LVs used by segments in this LV */
2c44337b 1409 dm_list_iterate_items(seg, &lv->segments)
e178963c
AK
1410 for (s = 0; s < seg->area_count; s++)
1411 if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s)) {
2d6fcbf6 1412 if (!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s), 0))
c51b9fff 1413 goto_bad;
e178963c
AK
1414 }
1415
5f4b2acf 1416 return dtree;
22456547 1417
c51b9fff 1418bad:
e88f56d9 1419 dm_tree_free(dtree);
5f4b2acf 1420 return NULL;
22456547
AK
1421}
1422
8c5bcdab
AK
1423static char *_add_error_device(struct dev_manager *dm, struct dm_tree *dtree,
1424 struct lv_segment *seg, int s)
1425{
067184f3 1426 char *dlid, *name;
8c5bcdab
AK
1427 char errid[32];
1428 struct dm_tree_node *node;
1429 struct lv_segment *seg_i;
067184f3 1430 struct dm_info info;
aaf92617 1431 int segno = -1, i = 0;
aeaec150 1432 uint64_t size = (uint64_t) seg->len * seg->lv->vg->extent_size;
8c5bcdab 1433
2c44337b 1434 dm_list_iterate_items(seg_i, &seg->lv->segments) {
8c5bcdab
AK
1435 if (seg == seg_i)
1436 segno = i;
1437 ++i;
1438 }
1439
1440 if (segno < 0) {
1441 log_error("_add_error_device called with bad segment");
3df790d9 1442 return NULL;
8c5bcdab
AK
1443 }
1444
1445 sprintf(errid, "missing_%d_%d", segno, s);
1446
067184f3 1447 if (!(dlid = build_dm_uuid(dm->mem, seg->lv->lvid.s, errid)))
8c5bcdab
AK
1448 return_NULL;
1449
e59e2f7c 1450 if (!(name = dm_build_dm_name(dm->mem, seg->lv->vg->name,
8c5bcdab
AK
1451 seg->lv->name, errid)))
1452 return_NULL;
8c5bcdab 1453
067184f3
AK
1454 log_debug("Getting device info for %s [%s]", name, dlid);
1455 if (!_info(dlid, 1, 0, &info, NULL)) {
1456 log_error("Failed to get info for %s [%s].", name, dlid);
1457 return 0;
1458 }
1459
1460 if (!info.exists) {
1461 /* Create new node */
1462 if (!(node = dm_tree_add_new_dev(dtree, name, dlid, 0, 0, 0, 0, 0)))
1463 return_NULL;
1464 if (!dm_tree_node_add_error_target(node, size))
1465 return_NULL;
1466 } else {
1467 /* Already exists */
1468 if (!dm_tree_add_dev(dtree, info.major, info.minor)) {
1469 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree",
1470 info.major, info.minor);
1471 return_NULL;
1472 }
1473 }
1474
1475 return dlid;
8c5bcdab
AK
1476}
1477
1478static int _add_error_area(struct dev_manager *dm, struct dm_tree_node *node,
1479 struct lv_segment *seg, int s)
1480{
1481 char *dlid;
1482 uint64_t extent_size = seg->lv->vg->extent_size;
1483
1484 if (!strcmp(dm->cmd->stripe_filler, "error")) {
1485 /*
1486 * FIXME, the tree pointer is first field of dm_tree_node, but
1487 * we don't have the struct definition available.
1488 */
1489 struct dm_tree **tree = (struct dm_tree **) node;
df251f14 1490 if (!(dlid = _add_error_device(dm, *tree, seg, s)))
8c5bcdab 1491 return_0;
86b15c7c
AK
1492 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
1493 return_0;
87ec9484 1494 } else
86b15c7c
AK
1495 if (!dm_tree_node_add_target_area(node, dm->cmd->stripe_filler, NULL, UINT64_C(0)))
1496 return_0;
87ec9484 1497
8c5bcdab
AK
1498 return 1;
1499}
1500
5f4b2acf 1501int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
72b2cb61
AK
1502 struct dm_tree_node *node, uint32_t start_area,
1503 uint32_t areas)
323a167b 1504{
5f4b2acf
AK
1505 uint64_t extent_size = seg->lv->vg->extent_size;
1506 uint32_t s;
2ed2a724 1507 char *dlid;
f5f3defc
AK
1508 struct stat info;
1509 const char *name;
ab852ffe
ZK
1510 unsigned num_error_areas = 0;
1511 unsigned num_existing_areas = 0;
323a167b 1512
f5f3defc 1513 /* FIXME Avoid repeating identical stat in dm_tree_node_add_target_area */
5f4b2acf
AK
1514 for (s = start_area; s < areas; s++) {
1515 if ((seg_type(seg, s) == AREA_PV &&
f5f3defc
AK
1516 (!seg_pvseg(seg, s) || !seg_pv(seg, s) || !seg_dev(seg, s) ||
1517 !(name = dev_name(seg_dev(seg, s))) || !*name ||
1518 stat(name, &info) < 0 || !S_ISBLK(info.st_mode))) ||
8c5bcdab 1519 (seg_type(seg, s) == AREA_LV && !seg_lv(seg, s))) {
f5f3defc
AK
1520 if (!seg->lv->vg->cmd->partial_activation) {
1521 log_error("Aborting. LV %s is now incomplete "
1522 "and --partial was not specified.", seg->lv->name);
1523 return 0;
1524 }
8c5bcdab
AK
1525 if (!_add_error_area(dm, node, seg, s))
1526 return_0;
ab852ffe 1527 num_error_areas++;
86b15c7c
AK
1528 } else if (seg_type(seg, s) == AREA_PV) {
1529 if (!dm_tree_node_add_target_area(node, dev_name(seg_dev(seg, s)), NULL,
1530 (seg_pv(seg, s)->pe_start + (extent_size * seg_pe(seg, s)))))
1531 return_0;
ab852ffe 1532 num_existing_areas++;
6d04311e
JEB
1533 } else if (seg_is_raid(seg)) {
1534 /*
1535 * RAID can handle unassigned areas. It simple puts
1536 * '- -' in for the metadata/data device pair. This
1537 * is a valid way to indicate to the RAID target that
1538 * the device is missing.
1539 *
1540 * If an image is marked as VISIBLE_LV and !LVM_WRITE,
1541 * it means the device has temporarily been extracted
1542 * from the array. It may come back at a future date,
1543 * so the bitmap must track differences. Again, '- -'
1544 * is used in the CTR table.
1545 */
1546 if ((seg_type(seg, s) == AREA_UNASSIGNED) ||
1547 ((seg_lv(seg, s)->status & VISIBLE_LV) &&
1548 !(seg_lv(seg, s)->status & LVM_WRITE))) {
1549 /* One each for metadata area and data area */
1550 if (!dm_tree_node_add_null_area(node, 0) ||
1551 !dm_tree_node_add_null_area(node, 0))
2100c90d 1552 return_0;
6d04311e 1553 continue;
cac52ca4 1554 }
6d04311e
JEB
1555 if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s)->lvid.s, NULL)))
1556 return_0;
1557 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
1558 return_0;
1559
1560 if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s)->lvid.s, NULL)))
1561 return_0;
1562 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
1563 return_0;
1564 } else if (seg_type(seg, s) == AREA_LV) {
1565
86b15c7c
AK
1566 if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s)->lvid.s, NULL)))
1567 return_0;
1568 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
5f4b2acf 1569 return_0;
5f4b2acf 1570 } else {
550cae23 1571 log_error(INTERNAL_ERROR "Unassigned area found in LV %s.",
5f4b2acf 1572 seg->lv->name);
323a167b
JT
1573 return 0;
1574 }
5f4b2acf 1575 }
323a167b 1576
ab852ffe
ZK
1577 if (num_error_areas) {
1578 /* Thins currently do not support partial activation */
1579 if (lv_is_thin_type(seg->lv)) {
1580 log_error("Cannot activate %s%s: pool incomplete.",
1581 seg->lv->vg->name, seg->lv->name);
1582 return 0;
1583 }
ab852ffe
ZK
1584 }
1585
5f4b2acf
AK
1586 return 1;
1587}
323a167b 1588
ad6b0ebb 1589static int _add_origin_target_to_dtree(struct dev_manager *dm,
e88f56d9 1590 struct dm_tree_node *dnode,
5f4b2acf
AK
1591 struct logical_volume *lv)
1592{
1593 const char *real_dlid;
2ec94d4d 1594
bda39820 1595 if (!(real_dlid = build_dm_uuid(dm->mem, lv->lvid.s, "real")))
5f4b2acf 1596 return_0;
a9953411 1597
e88f56d9 1598 if (!dm_tree_node_add_snapshot_origin_target(dnode, lv->size, real_dlid))
5f4b2acf 1599 return_0;
323a167b
JT
1600
1601 return 1;
1602}
1603
c21b944a
MS
1604static int _add_snapshot_merge_target_to_dtree(struct dev_manager *dm,
1605 struct dm_tree_node *dnode,
1606 struct logical_volume *lv)
1607{
1608 const char *origin_dlid, *cow_dlid, *merge_dlid;
c79b4251 1609 struct lv_segment *merging_cow_seg = find_merging_cow(lv);
c21b944a 1610
bda39820 1611 if (!(origin_dlid = build_dm_uuid(dm->mem, lv->lvid.s, "real")))
c21b944a
MS
1612 return_0;
1613
bda39820 1614 if (!(cow_dlid = build_dm_uuid(dm->mem, merging_cow_seg->cow->lvid.s, "cow")))
c21b944a
MS
1615 return_0;
1616
bda39820 1617 if (!(merge_dlid = build_dm_uuid(dm->mem, merging_cow_seg->cow->lvid.s, NULL)))
c21b944a
MS
1618 return_0;
1619
1620 if (!dm_tree_node_add_snapshot_merge_target(dnode, lv->size, origin_dlid,
1621 cow_dlid, merge_dlid,
c79b4251 1622 merging_cow_seg->chunk_size))
c21b944a
MS
1623 return_0;
1624
1625 return 1;
1626}
1627
ad6b0ebb 1628static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
81beded3
ZK
1629 struct dm_tree_node *dnode,
1630 struct logical_volume *lv,
1631 struct lv_activate_opts *laopts)
323a167b 1632{
5f4b2acf
AK
1633 const char *origin_dlid;
1634 const char *cow_dlid;
1635 struct lv_segment *snap_seg;
1636 uint64_t size;
323a167b 1637
5f4b2acf
AK
1638 if (!(snap_seg = find_cow(lv))) {
1639 log_error("Couldn't find snapshot for '%s'.", lv->name);
7a419a5d 1640 return 0;
323a167b
JT
1641 }
1642
bda39820 1643 if (!(origin_dlid = build_dm_uuid(dm->mem, snap_seg->origin->lvid.s, "real")))
5f4b2acf 1644 return_0;
a9953411 1645
bda39820 1646 if (!(cow_dlid = build_dm_uuid(dm->mem, snap_seg->cow->lvid.s, "cow")))
5f4b2acf 1647 return_0;
7bb6856a 1648
5f4b2acf 1649 size = (uint64_t) snap_seg->len * snap_seg->origin->vg->extent_size;
22456547 1650
c6168a14 1651 if (!laopts->no_merging && lv_is_merging_cow(lv)) {
a5ec3e38
MS
1652 /* cow is to be merged so load the error target */
1653 if (!dm_tree_node_add_error_target(dnode, size))
1654 return_0;
1655 }
1656 else if (!dm_tree_node_add_snapshot_target(dnode, size, origin_dlid,
1657 cow_dlid, 1, snap_seg->chunk_size))
5f4b2acf 1658 return_0;
323a167b
JT
1659
1660 return 1;
1661}
1662
ad6b0ebb 1663static int _add_target_to_dtree(struct dev_manager *dm,
81beded3
ZK
1664 struct dm_tree_node *dnode,
1665 struct lv_segment *seg,
1666 struct lv_activate_opts *laopts)
323a167b 1667{
5f4b2acf
AK
1668 uint64_t extent_size = seg->lv->vg->extent_size;
1669
1670 if (!seg->segtype->ops->add_target_line) {
550cae23 1671 log_error(INTERNAL_ERROR "_emit_target cannot handle "
5f4b2acf
AK
1672 "segment type %s", seg->segtype->name);
1673 return 0;
1674 }
1675
2293567c 1676 return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
5f4b2acf 1677 &dm->target_state, seg,
81beded3 1678 laopts, dnode,
5f4b2acf
AK
1679 extent_size * seg->len,
1680 &dm-> pvmove_mirror_count);
a69de491 1681}
323a167b 1682
ad6b0ebb 1683static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
81beded3
ZK
1684 struct logical_volume *lv,
1685 struct lv_activate_opts *laopts,
1686 const char *layer);
5f4b2acf 1687
591fc4d2
ZK
1688/* Add all replicators' LVs */
1689static int _add_replicator_dev_target_to_dtree(struct dev_manager *dm,
1690 struct dm_tree *dtree,
81beded3
ZK
1691 struct lv_segment *seg,
1692 struct lv_activate_opts *laopts)
591fc4d2
ZK
1693{
1694 struct replicator_device *rdev;
1695 struct replicator_site *rsite;
1696
1697 /* For inactive replicator add linear mapping */
1698 if (!lv_is_active_replicator_dev(seg->lv)) {
81beded3 1699 if (!_add_new_lv_to_dtree(dm, dtree, seg->lv->rdevice->lv, laopts, NULL))
591fc4d2
ZK
1700 return_0;
1701 return 1;
1702 }
1703
1704 /* Add rlog and replicator nodes */
1705 if (!seg->replicator ||
81beded3 1706 !first_seg(seg->replicator)->rlog_lv ||
591fc4d2 1707 !_add_new_lv_to_dtree(dm, dtree,
81beded3
ZK
1708 first_seg(seg->replicator)->rlog_lv,
1709 laopts, NULL) ||
1710 !_add_new_lv_to_dtree(dm, dtree, seg->replicator, laopts, NULL))
591fc4d2
ZK
1711 return_0;
1712
1713 /* Activation of one replicator_dev node activates all other nodes */
1714 dm_list_iterate_items(rsite, &seg->replicator->rsites) {
1715 dm_list_iterate_items(rdev, &rsite->rdevices) {
1716 if (rdev->lv &&
81beded3
ZK
1717 !_add_new_lv_to_dtree(dm, dtree, rdev->lv,
1718 laopts, NULL))
591fc4d2
ZK
1719 return_0;
1720
1721 if (rdev->slog &&
81beded3
ZK
1722 !_add_new_lv_to_dtree(dm, dtree, rdev->slog,
1723 laopts, NULL))
591fc4d2
ZK
1724 return_0;
1725 }
1726 }
1727 /* Add remaining replicator-dev nodes in the second loop
1728 * to avoid multiple retries for inserting all elements */
1729 dm_list_iterate_items(rsite, &seg->replicator->rsites) {
1730 if (rsite->state != REPLICATOR_STATE_ACTIVE)
1731 continue;
1732 dm_list_iterate_items(rdev, &rsite->rdevices) {
1733 if (rdev->replicator_dev->lv == seg->lv)
1734 continue;
1735 if (!rdev->replicator_dev->lv ||
1736 !_add_new_lv_to_dtree(dm, dtree,
1737 rdev->replicator_dev->lv,
81beded3 1738 laopts, NULL))
591fc4d2
ZK
1739 return_0;
1740 }
1741 }
1742
1743 return 1;
1744}
1745
ad6b0ebb 1746static int _add_segment_to_dtree(struct dev_manager *dm,
81beded3
ZK
1747 struct dm_tree *dtree,
1748 struct dm_tree_node *dnode,
1749 struct lv_segment *seg,
1750 struct lv_activate_opts *laopts,
1751 const char *layer)
f7dd6d84 1752{
5f4b2acf 1753 uint32_t s;
2c44337b 1754 struct dm_list *snh;
25375165 1755 struct lv_segment *seg_present;
9443b5d4 1756 const char *target_name;
a0c4e85c 1757 struct lv_activate_opts lva;
f7dd6d84 1758
5f4b2acf 1759 /* Ensure required device-mapper targets are loaded */
25375165 1760 seg_present = find_cow(seg->lv) ? : seg;
9443b5d4 1761 target_name = (seg_present->segtype->ops->target_name ?
81beded3 1762 seg_present->segtype->ops->target_name(seg_present, laopts) :
9443b5d4 1763 seg_present->segtype->name);
25375165
AK
1764
1765 log_debug("Checking kernel supports %s segment type for %s%s%s",
9443b5d4 1766 target_name, seg->lv->name,
25375165
AK
1767 layer ? "-" : "", layer ? : "");
1768
1769 if (seg_present->segtype->ops->target_present &&
81680dce
AK
1770 !seg_present->segtype->ops->target_present(seg_present->lv->vg->cmd,
1771 seg_present, NULL)) {
9443b5d4
MS
1772 log_error("Can't process LV %s: %s target support missing "
1773 "from kernel?", seg->lv->name, target_name);
f7dd6d84
AK
1774 return 0;
1775 }
1776
5f4b2acf
AK
1777 /* Add mirror log */
1778 if (seg->log_lv &&
81beded3 1779 !_add_new_lv_to_dtree(dm, dtree, seg->log_lv, laopts, NULL))
5f4b2acf 1780 return_0;
f7dd6d84 1781
591fc4d2 1782 if (seg_is_replicator_dev(seg)) {
81beded3 1783 if (!_add_replicator_dev_target_to_dtree(dm, dtree, seg, laopts))
591fc4d2 1784 return_0;
5f4b2acf 1785 /* If this is a snapshot origin, add real LV */
8dc351e8 1786 /* If this is a snapshot origin + merging snapshot, add cow + real LV */
591fc4d2 1787 } else if (lv_is_origin(seg->lv) && !layer) {
c6168a14 1788 if (!laopts->no_merging && lv_is_merging_origin(seg->lv)) {
c21b944a 1789 if (!_add_new_lv_to_dtree(dm, dtree,
81beded3 1790 find_merging_cow(seg->lv)->cow, laopts, "cow"))
c21b944a
MS
1791 return_0;
1792 /*
1793 * Must also add "real" LV for use when
1794 * snapshot-merge target is added
1795 */
1796 }
81beded3 1797 if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, laopts, "real"))
5f4b2acf 1798 return_0;
b5750a61 1799 } else if (lv_is_cow(seg->lv) && !layer) {
81beded3 1800 if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, laopts, "cow"))
5f4b2acf 1801 return_0;
64e353da 1802 } else if ((layer != _thin_layer) && seg_is_thin(seg)) {
a0c4e85c
ZK
1803 lva = *laopts;
1804 lva.real_pool = 1;
8ec01623 1805 if (!_add_new_lv_to_dtree(dm, dtree, seg_is_thin_pool(seg) ?
64e353da 1806 seg->lv : seg->pool_lv, &lva, _thin_layer))
aebf2d5c 1807 return_0;
5f4b2acf 1808 } else {
8ec01623 1809 if (seg_is_thin_pool(seg) &&
d8106dfe 1810 !_add_new_lv_to_dtree(dm, dtree, seg->metadata_lv, laopts, NULL))
aebf2d5c 1811 return_0;
a0c4e85c 1812
5f4b2acf 1813 /* Add any LVs used by this segment */
cac52ca4 1814 for (s = 0; s < seg->area_count; s++) {
5f4b2acf 1815 if ((seg_type(seg, s) == AREA_LV) &&
60184834 1816 (!_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
81beded3 1817 laopts, NULL)))
5f4b2acf 1818 return_0;
cac52ca4
JEB
1819 if (seg_is_raid(seg) &&
1820 !_add_new_lv_to_dtree(dm, dtree, seg_metalv(seg, s),
1821 laopts, NULL))
1822 return_0;
1823 }
5f4b2acf
AK
1824 }
1825
1826 /* Now we've added its dependencies, we can add the target itself */
1827 if (lv_is_origin(seg->lv) && !layer) {
c6168a14 1828 if (laopts->no_merging || !lv_is_merging_origin(seg->lv)) {
c21b944a
MS
1829 if (!_add_origin_target_to_dtree(dm, dnode, seg->lv))
1830 return_0;
1831 } else {
1832 if (!_add_snapshot_merge_target_to_dtree(dm, dnode, seg->lv))
1833 return_0;
1834 }
b5750a61 1835 } else if (lv_is_cow(seg->lv) && !layer) {
81beded3 1836 if (!_add_snapshot_target_to_dtree(dm, dnode, seg->lv, laopts))
5f4b2acf 1837 return_0;
81beded3 1838 } else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
5f4b2acf
AK
1839 return_0;
1840
1841 if (lv_is_origin(seg->lv) && !layer)
1842 /* Add any snapshots of this LV */
2c44337b 1843 dm_list_iterate(snh, &seg->lv->snapshot_segs)
81beded3
ZK
1844 if (!_add_new_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow,
1845 laopts, NULL))
5f4b2acf 1846 return_0;
f7dd6d84 1847
5f4b2acf 1848 return 1;
914c9723 1849}
f894b4b1 1850
83c606ae
JEB
1851static int _set_udev_flags_for_children(struct dev_manager *dm,
1852 struct volume_group *vg,
1853 struct dm_tree_node *dnode)
1854{
1855 char *p;
1856 const char *uuid;
1857 void *handle = NULL;
1858 struct dm_tree_node *child;
1859 const struct dm_info *info;
1860 struct lv_list *lvl;
1861
1862 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1863 /* Ignore root node */
1864 if (!(info = dm_tree_node_get_info(child)) || !info->exists)
1865 continue;
1866
1867 if (!(uuid = dm_tree_node_get_uuid(child))) {
1868 log_error(INTERNAL_ERROR
1869 "Failed to get uuid for %" PRIu32 ":%" PRIu32,
1870 info->major, info->minor);
1871 continue;
1872 }
1873
1874 /* Ignore non-LVM devices */
1875 if (!(p = strstr(uuid, UUID_PREFIX)))
1876 continue;
1877 p += strlen(UUID_PREFIX);
1878
1879 /* Ignore LVs that belong to different VGs (due to stacking) */
1880 if (strncmp(p, (char *)vg->id.uuid, ID_LEN))
1881 continue;
1882
1883 /* Ignore LVM devices with 'layer' suffixes */
1884 if (strrchr(p, '-'))
1885 continue;
1886
1887 if (!(lvl = find_lv_in_vg_by_lvid(vg, (const union lvid *)p))) {
1888 log_error(INTERNAL_ERROR
1889 "%s (%" PRIu32 ":%" PRIu32 ") not found in VG",
1890 dm_tree_node_get_name(child),
1891 info->major, info->minor);
1892 return 0;
1893 }
1894
1895 dm_tree_node_set_udev_flags(child,
1896 _get_udev_flags(dm, lvl->lv, NULL));
1897 }
1898
1899 return 1;
1900}
1901
ad6b0ebb 1902static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
81beded3
ZK
1903 struct logical_volume *lv, struct lv_activate_opts *laopts,
1904 const char *layer)
3e8479bd 1905{
5f4b2acf
AK
1906 struct lv_segment *seg;
1907 struct lv_layer *lvlayer;
df390f17 1908 struct seg_list *sl;
e88f56d9 1909 struct dm_tree_node *dnode;
537f7456 1910 const struct dm_info *dinfo;
a5ec3e38 1911 char *name, *dlid;
a6b22cf3
AK
1912 uint32_t max_stripe_size = UINT32_C(0);
1913 uint32_t read_ahead = lv->read_ahead;
69506f1d 1914 uint32_t read_ahead_flags = UINT32_C(0);
3e8479bd 1915
8dc351e8
AK
1916 /* FIXME Seek a simpler way to lay out the snapshot-merge tree. */
1917
c79b4251 1918 if (lv_is_origin(lv) && lv_is_merging_origin(lv) && !layer) {
c582e3c0
MS
1919 /*
1920 * Clear merge attributes if merge isn't currently possible:
1921 * either origin or merging snapshot are open
537f7456
MS
1922 * - but use "snapshot-merge" if it is already in use
1923 * - open_count is always retrieved (as of dm-ioctl 4.7.0)
1924 * so just use the tree's existing nodes' info
c582e3c0 1925 */
537f7456
MS
1926 if (((dinfo = _cached_info(dm->mem, lv,
1927 dtree)) && dinfo->open_count) ||
1928 ((dinfo = _cached_info(dm->mem, find_merging_cow(lv)->cow,
1929 dtree)) && dinfo->open_count)) {
5cb0d45d 1930 /* FIXME Is there anything simpler to check for instead? */
1f661c5d 1931 if (!lv_has_target_type(dm->mem, lv, NULL, "snapshot-merge"))
c6168a14 1932 laopts->no_merging = 1;
c582e3c0
MS
1933 }
1934 }
1935
e59e2f7c 1936 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
5f4b2acf 1937 return_0;
3e8479bd 1938
bda39820 1939 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
5f4b2acf 1940 return_0;
3e8479bd 1941
5f4b2acf 1942 /* We've already processed this node if it already has a context ptr */
e88f56d9
AK
1943 if ((dnode = dm_tree_find_node_by_uuid(dtree, dlid)) &&
1944 dm_tree_node_get_context(dnode))
5f4b2acf 1945 return 1;
3e8479bd 1946
5f4b2acf 1947 if (!(lvlayer = dm_pool_alloc(dm->mem, sizeof(*lvlayer)))) {
60184834 1948 log_error("_add_new_lv_to_dtree: pool alloc failed for %s %s.",
a5ec3e38 1949 lv->name, layer);
3e8479bd
AK
1950 return 0;
1951 }
1952
5f4b2acf
AK
1953 lvlayer->lv = lv;
1954
1955 /*
ad6b0ebb 1956 * Add LV to dtree.
5f4b2acf
AK
1957 * If we're working with precommitted metadata, clear any
1958 * existing inactive table left behind.
1959 * Major/minor settings only apply to the visible layer.
1960 */
df390f17
AK
1961 /* FIXME Move the clear from here until later, so we can leave
1962 * identical inactive tables untouched. (For pvmove.)
1963 */
f16aea9e 1964 if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid,
fe686a51
AK
1965 layer ? UINT32_C(0) : (uint32_t) lv->major,
1966 layer ? UINT32_C(0) : (uint32_t) lv->minor,
a18dcfb5 1967 read_only_lv(lv, laopts),
10d0d9c7 1968 ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
f16aea9e 1969 lvlayer,
6ddb5ecd 1970 _get_udev_flags(dm, lv, layer))))
5f4b2acf
AK
1971 return_0;
1972
1973 /* Store existing name so we can do rename later */
e88f56d9 1974 lvlayer->old_name = dm_tree_node_get_name(dnode);
5f4b2acf
AK
1975
1976 /* Create table */
1977 dm->pvmove_mirror_count = 0u;
2c44337b 1978 dm_list_iterate_items(seg, &lv->segments) {
81beded3 1979 if (!_add_segment_to_dtree(dm, dtree, dnode, seg, laopts, layer))
5f4b2acf
AK
1980 return_0;
1981 /* These aren't real segments in the LVM2 metadata */
1982 if (lv_is_origin(lv) && !layer)
1983 break;
c6168a14 1984 if (!laopts->no_merging && lv_is_cow(lv) && !layer)
5f4b2acf 1985 break;
5f2c1959
ZK
1986 if (max_stripe_size < seg->stripe_size * seg->area_count)
1987 max_stripe_size = seg->stripe_size * seg->area_count;
5f4b2acf 1988 }
3e8479bd 1989
9b6135dc 1990 if (read_ahead == DM_READ_AHEAD_AUTO) {
1c1b068f
ZK
1991 /* we need RA at least twice a whole stripe - see the comment in md/raid0.c */
1992 read_ahead = max_stripe_size * 2;
d3961002 1993 if (!read_ahead)
c1fdeec9 1994 lv_calculate_readahead(lv, &read_ahead);
69506f1d 1995 read_ahead_flags = DM_READ_AHEAD_MINIMUM_FLAG;
9b6135dc 1996 }
a6b22cf3 1997
69506f1d 1998 dm_tree_node_set_read_ahead(dnode, read_ahead, read_ahead_flags);
a6b22cf3 1999
6c7a6c07
ZK
2000 /* Setup thin pool callback */
2001 if (layer && lv_is_thin_pool(lv) &&
2002 !_thin_pool_register_callback(dm, dnode, lv))
2003 return_0;
2004
df390f17
AK
2005 /* Add any LVs referencing a PVMOVE LV unless told not to */
2006 if (dm->track_pvmove_deps && (lv->status & PVMOVE))
2007 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
81beded3 2008 if (!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv, laopts, NULL))
df390f17
AK
2009 return_0;
2010
83c606ae
JEB
2011 if (!_set_udev_flags_for_children(dm, lv->vg, dnode))
2012 return_0;
2013
9fdc84c3 2014 return 1;
3e8479bd
AK
2015}
2016
5efa3f1e
MB
2017/* FIXME: symlinks should be created/destroyed at the same time
2018 * as the kernel devices but we can't do that from within libdevmapper
2019 * at present so we must walk the tree twice instead. */
2020
5f4b2acf
AK
2021/*
2022 * Create LV symlinks for children of supplied root node.
2023 */
e88f56d9 2024static int _create_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
3e8479bd 2025{
5f4b2acf 2026 void *handle = NULL;
e88f56d9 2027 struct dm_tree_node *child;
5f4b2acf 2028 struct lv_layer *lvlayer;
3ad47d16
PR
2029 char *old_vgname, *old_lvname, *old_layer;
2030 char *new_vgname, *new_lvname, *new_layer;
5f4b2acf
AK
2031 const char *name;
2032 int r = 1;
3e8479bd 2033
418663b6 2034 /* Nothing to do if udev fallback is disabled. */
7f815706
ZK
2035 if (!dm->cmd->current_settings.udev_fallback) {
2036 fs_set_create();
418663b6 2037 return 1;
7f815706 2038 }
418663b6 2039
e88f56d9 2040 while ((child = dm_tree_next_child(&handle, root, 0))) {
6c8ffd03 2041 if (!(lvlayer = dm_tree_node_get_context(child)))
5f4b2acf 2042 continue;
3e8479bd 2043
5f4b2acf 2044 /* Detect rename */
e88f56d9 2045 name = dm_tree_node_get_name(child);
3e8479bd 2046
5f4b2acf 2047 if (name && lvlayer->old_name && *lvlayer->old_name && strcmp(name, lvlayer->old_name)) {
3ad47d16 2048 if (!dm_split_lvm_name(dm->mem, lvlayer->old_name, &old_vgname, &old_lvname, &old_layer)) {
67cdbd7e
AK
2049 log_error("_create_lv_symlinks: Couldn't split up old device name %s", lvlayer->old_name);
2050 return 0;
2051 }
3ad47d16
PR
2052 if (!dm_split_lvm_name(dm->mem, name, &new_vgname, &new_lvname, &new_layer)) {
2053 log_error("_create_lv_symlinks: Couldn't split up new device name %s", name);
2054 return 0;
2055 }
2056 if (!fs_rename_lv(lvlayer->lv, name, old_vgname, old_lvname))
2057 r = 0;
ca51e5d9
AK
2058 continue;
2059 }
2060 if (lv_is_visible(lvlayer->lv)) {
ab9663f3 2061 if (!_dev_manager_lv_mknodes(lvlayer->lv))
ca51e5d9
AK
2062 r = 0;
2063 continue;
2064 }
ab9663f3 2065 if (!_dev_manager_lv_rmnodes(lvlayer->lv))
5f4b2acf 2066 r = 0;
3e8479bd
AK
2067 }
2068
5f4b2acf
AK
2069 return r;
2070}
9fdc84c3 2071
5efa3f1e
MB
2072/*
2073 * Remove LV symlinks for children of supplied root node.
2074 */
2075static int _remove_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
2076{
2077 void *handle = NULL;
2078 struct dm_tree_node *child;
2079 char *vgname, *lvname, *layer;
2080 int r = 1;
2081
418663b6
PR
2082 /* Nothing to do if udev fallback is disabled. */
2083 if (!dm->cmd->current_settings.udev_fallback)
2084 return 1;
2085
5efa3f1e 2086 while ((child = dm_tree_next_child(&handle, root, 0))) {
67cdbd7e 2087 if (!dm_split_lvm_name(dm->mem, dm_tree_node_get_name(child), &vgname, &lvname, &layer)) {
5efa3f1e
MB
2088 r = 0;
2089 continue;
2090 }
2091
4bfa1324
AK
2092 if (!*vgname)
2093 continue;
2094
5efa3f1e
MB
2095 /* only top level layer has symlinks */
2096 if (*layer)
2097 continue;
2098
cda69e17
PR
2099 fs_del_lv_byname(dm->cmd->dev_dir, vgname, lvname,
2100 dm->cmd->current_settings.udev_rules);
5efa3f1e
MB
2101 }
2102
2103 return r;
2104}
2105
22149572 2106static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, char *non_toplevel_tree_dlid)
5f4b2acf
AK
2107{
2108 void *handle = NULL;
e88f56d9 2109 struct dm_tree_node *child;
5f4b2acf
AK
2110 char *vgname, *lvname, *layer;
2111 const char *name, *uuid;
2112
e88f56d9
AK
2113 while ((child = dm_tree_next_child(&handle, root, 0))) {
2114 if (!(name = dm_tree_node_get_name(child)))
5f4b2acf
AK
2115 continue;
2116
e88f56d9 2117 if (!(uuid = dm_tree_node_get_uuid(child)))
5f4b2acf
AK
2118 continue;
2119
67cdbd7e
AK
2120 if (!dm_split_lvm_name(dm->mem, name, &vgname, &lvname, &layer)) {
2121 log_error("_clean_tree: Couldn't split up device name %s.", name);
2122 return 0;
2123 }
5f4b2acf
AK
2124
2125 /* Not meant to be top level? */
2126 if (!*layer)
2127 continue;
2128
22149572
AK
2129 /* If operation was performed on a partial tree, don't remove it */
2130 if (non_toplevel_tree_dlid && !strcmp(non_toplevel_tree_dlid, uuid))
2131 continue;
2132
937a21f0 2133 if (!dm_tree_deactivate_children(root, uuid, strlen(uuid)))
a74be32b 2134 return_0;
5f4b2acf
AK
2135 }
2136
2137 return 1;
3e8479bd
AK
2138}
2139
2d6fcbf6 2140static int _tree_action(struct dev_manager *dm, struct logical_volume *lv,
81beded3 2141 struct lv_activate_opts *laopts, action_t action)
3e8479bd 2142{
7a6600b1 2143 const size_t DLID_SIZE = ID_LEN + sizeof(UUID_PREFIX) - 1;
e88f56d9
AK
2144 struct dm_tree *dtree;
2145 struct dm_tree_node *root;
9fdc84c3 2146 char *dlid;
3e8479bd
AK
2147 int r = 0;
2148
78c3b21b
ZK
2149 laopts->is_activate = (action == ACTIVATE);
2150
81beded3 2151 if (!(dtree = _create_partial_dtree(dm, lv, laopts->origin_only)))
5f4b2acf 2152 return_0;
3e8479bd 2153
e88f56d9 2154 if (!(root = dm_tree_find_node(dtree, 0, 0))) {
9fdc84c3 2155 log_error("Lost dependency tree root node");
937a21f0 2156 goto out_no_root;
3e8479bd
AK
2157 }
2158
937a21f0
ZK
2159 /* Restore fs cookie */
2160 dm_tree_set_cookie(root, fs_get_cookie());
2161
0f2a4ca2 2162 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, (lv_is_origin(lv) && laopts->origin_only) ? "real" : NULL)))
5f4b2acf 2163 goto_out;
3e8479bd 2164
03b49fe1 2165 /* Only process nodes with uuid of "LVM-" plus VG id. */
352a99b9 2166 switch(action) {
5f4b2acf
AK
2167 case CLEAN:
2168 /* Deactivate any unused non-toplevel nodes */
81beded3 2169 if (!_clean_tree(dm, root, laopts->origin_only ? dlid : NULL))
5f4b2acf
AK
2170 goto_out;
2171 break;
352a99b9 2172 case DEACTIVATE:
9fa1d30a
PR
2173 if (retry_deactivation())
2174 dm_tree_retry_remove(root);
409bf6e6 2175 /* Deactivate LV and all devices it references that nothing else has open. */
7a6600b1 2176 if (!dm_tree_deactivate_children(root, dlid, DLID_SIZE))
a74be32b 2177 goto_out;
5efa3f1e 2178 if (!_remove_lv_symlinks(dm, root))
4007ac81 2179 log_warn("Failed to remove all device symlinks associated with %s.", lv->name);
352a99b9
AK
2180 break;
2181 case SUSPEND:
9cd3426d 2182 dm_tree_skip_lockfs(root);
c62f9f0b
JEB
2183 if (!dm->flush_required && !seg_is_raid(first_seg(lv)) &&
2184 (lv->status & MIRRORED) && !(lv->status & PVMOVE))
33f732c5 2185 dm_tree_use_no_flush_suspend(root);
36653e89 2186 /* Fall through */
9cd3426d 2187 case SUSPEND_WITH_LOCKFS:
7a6600b1 2188 if (!dm_tree_suspend_children(root, dlid, DLID_SIZE))
5f4b2acf
AK
2189 goto_out;
2190 break;
2191 case PRELOAD:
2192 case ACTIVATE:
2193 /* Add all required new devices to tree */
0f2a4ca2 2194 if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, (lv_is_origin(lv) && laopts->origin_only) ? "real" : NULL))
5f4b2acf
AK
2195 goto_out;
2196
2197 /* Preload any devices required before any suspensions */
7a6600b1 2198 if (!dm_tree_preload_children(root, dlid, DLID_SIZE))
a74be32b 2199 goto_out;
5f4b2acf 2200
eb91c4ee
MB
2201 if (dm_tree_node_size_changed(root))
2202 dm->flush_required = 1;
2203
bd90c6b2 2204 if (action == ACTIVATE) {
7a6600b1 2205 if (!dm_tree_activate_children(root, dlid, DLID_SIZE))
a74be32b 2206 goto_out;
4007ac81
ZK
2207 if (!_create_lv_symlinks(dm, root))
2208 log_warn("Failed to create symlinks for %s.", lv->name);
bd90c6b2 2209 }
5f4b2acf 2210
352a99b9
AK
2211 break;
2212 default:
2213 log_error("_tree_action: Action %u not supported.", action);
3e8479bd 2214 goto out;
08e64ce5 2215 }
3e8479bd 2216
3e8479bd
AK
2217 r = 1;
2218
2219out:
937a21f0
ZK
2220 /* Save fs cookie for udev settle, do not wait here */
2221 fs_set_cookie(dm_tree_get_cookie(root));
2222out_no_root:
e88f56d9 2223 dm_tree_free(dtree);
3e8479bd
AK
2224
2225 return r;
2226}
2227
2d6fcbf6 2228/* origin_only may only be set if we are resuming (not activating) an origin LV */
81beded3
ZK
2229int dev_manager_activate(struct dev_manager *dm, struct logical_volume *lv,
2230 struct lv_activate_opts *laopts)
5f4b2acf 2231{
81beded3 2232 if (!_tree_action(dm, lv, laopts, ACTIVATE))
5f4b2acf
AK
2233 return_0;
2234
de75bc66
ZK
2235 if (!_tree_action(dm, lv, laopts, CLEAN))
2236 return_0;
2237
2238 return 1;
5f4b2acf
AK
2239}
2240
2d6fcbf6 2241/* origin_only may only be set if we are resuming (not activating) an origin LV */
eb91c4ee 2242int dev_manager_preload(struct dev_manager *dm, struct logical_volume *lv,
81beded3 2243 struct lv_activate_opts *laopts, int *flush_required)
5f4b2acf 2244{
81beded3 2245 if (!_tree_action(dm, lv, laopts, PRELOAD))
de75bc66 2246 return_0;
eb91c4ee
MB
2247
2248 *flush_required = dm->flush_required;
2249
2250 return 1;
5f4b2acf 2251}
b427ecee 2252
352a99b9
AK
2253int dev_manager_deactivate(struct dev_manager *dm, struct logical_volume *lv)
2254{
81beded3 2255 struct lv_activate_opts laopts = { 0 };
352a99b9 2256
de75bc66
ZK
2257 if (!_tree_action(dm, lv, &laopts, DEACTIVATE))
2258 return_0;
352a99b9 2259
de75bc66 2260 return 1;
352a99b9
AK
2261}
2262
9cd3426d 2263int dev_manager_suspend(struct dev_manager *dm, struct logical_volume *lv,
81beded3 2264 struct lv_activate_opts *laopts, int lockfs, int flush_required)
b427ecee 2265{
eb91c4ee
MB
2266 dm->flush_required = flush_required;
2267
de75bc66
ZK
2268 if (!_tree_action(dm, lv, laopts, lockfs ? SUSPEND_WITH_LOCKFS : SUSPEND))
2269 return_0;
2270
2271 return 1;
b427ecee
AK
2272}
2273
352a99b9
AK
2274/*
2275 * Does device use VG somewhere in its construction?
2276 * Returns 1 if uncertain.
2277 */
898e6f8e 2278int dev_manager_device_uses_vg(struct device *dev,
352a99b9
AK
2279 struct volume_group *vg)
2280{
e88f56d9
AK
2281 struct dm_tree *dtree;
2282 struct dm_tree_node *root;
08f1ddea 2283 char dlid[sizeof(UUID_PREFIX) + sizeof(struct id) - 1] __attribute__((aligned(8)));
352a99b9
AK
2284 int r = 1;
2285
e88f56d9 2286 if (!(dtree = dm_tree_create())) {
ad6b0ebb 2287 log_error("partial dtree creation failed");
352a99b9
AK
2288 return r;
2289 }
2290
898e6f8e 2291 if (!dm_tree_add_dev(dtree, (uint32_t) MAJOR(dev->dev), (uint32_t) MINOR(dev->dev))) {
ad6b0ebb 2292 log_error("Failed to add device %s (%" PRIu32 ":%" PRIu32") to dtree",
352a99b9
AK
2293 dev_name(dev), (uint32_t) MAJOR(dev->dev), (uint32_t) MINOR(dev->dev));
2294 goto out;
2295 }
2296
2297 memcpy(dlid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1);
2298 memcpy(dlid + sizeof(UUID_PREFIX) - 1, &vg->id.uuid[0], sizeof(vg->id));
2299
e88f56d9 2300 if (!(root = dm_tree_find_node(dtree, 0, 0))) {
352a99b9
AK
2301 log_error("Lost dependency tree root node");
2302 goto out;
2303 }
2304
e88f56d9 2305 if (dm_tree_children_use_uuid(root, dlid, sizeof(UUID_PREFIX) + sizeof(vg->id) - 1))
5f4b2acf 2306 goto_out;
352a99b9
AK
2307
2308 r = 0;
2309
2310out:
e88f56d9 2311 dm_tree_free(dtree);
352a99b9
AK
2312 return r;
2313}
This page took 0.434273 seconds and 5 git commands to generate.