]> sourceware.org Git - lvm2.git/blob - lib/activate/dev_manager.c
thin: fix recent commits
[lvm2.git] / lib / activate / dev_manager.c
1 /*
2 * Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "str_list.h"
18 #include "dev_manager.h"
19 #include "lvm-string.h"
20 #include "fs.h"
21 #include "defaults.h"
22 #include "segtype.h"
23 #include "display.h"
24 #include "toolcontext.h"
25 #include "targets.h"
26 #include "config.h"
27 #include "filter.h"
28 #include "activate.h"
29 #include "lvm-exec.h"
30
31 #include <limits.h>
32 #include <dirent.h>
33
34 #define MAX_TARGET_PARAMSIZE 50000
35
36 typedef enum {
37 PRELOAD,
38 ACTIVATE,
39 DEACTIVATE,
40 SUSPEND,
41 SUSPEND_WITH_LOCKFS,
42 CLEAN
43 } action_t;
44
45 struct dev_manager {
46 struct dm_pool *mem;
47
48 struct cmd_context *cmd;
49
50 void *target_state;
51 uint32_t pvmove_mirror_count;
52 int flush_required;
53 unsigned track_pvmove_deps;
54
55 char *vg_name;
56 };
57
58 struct lv_layer {
59 struct logical_volume *lv;
60 const char *old_name;
61 };
62
63 static const char _thin_layer[] = "tpool";
64
65 int read_only_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
66 {
67 return (laopts->read_only || !(lv->vg->status & LVM_WRITE) || !(lv->status & LVM_WRITE));
68 }
69
70 /*
71 * Low level device-layer operations.
72 */
73 static struct dm_task *_setup_task(const char *name, const char *uuid,
74 uint32_t *event_nr, int task,
75 uint32_t major, uint32_t minor)
76 {
77 struct dm_task *dmt;
78
79 if (!(dmt = dm_task_create(task)))
80 return_NULL;
81
82 if (name && !dm_task_set_name(dmt, name))
83 goto_out;
84
85 if (uuid && *uuid && !dm_task_set_uuid(dmt, uuid))
86 goto_out;
87
88 if (event_nr && !dm_task_set_event_nr(dmt, *event_nr))
89 goto_out;
90
91 if (major && !dm_task_set_major_minor(dmt, major, minor, 1))
92 goto_out;
93
94 if (activation_checks() && !dm_task_enable_checks(dmt))
95 goto_out;
96
97 return dmt;
98 out:
99 dm_task_destroy(dmt);
100 return NULL;
101 }
102
103 static int _info_run(const char *name, const char *dlid, struct dm_info *info,
104 uint32_t *read_ahead, int mknodes, int with_open_count,
105 int with_read_ahead, uint32_t major, uint32_t minor)
106 {
107 int r = 0;
108 struct dm_task *dmt;
109 int dmtask;
110
111 dmtask = mknodes ? DM_DEVICE_MKNODES : DM_DEVICE_INFO;
112
113 if (!(dmt = _setup_task(mknodes ? name : NULL, dlid, 0, dmtask, major, minor)))
114 return_0;
115
116 if (!with_open_count)
117 if (!dm_task_no_open_count(dmt))
118 log_error("Failed to disable open_count");
119
120 if (!dm_task_run(dmt))
121 goto_out;
122
123 if (!dm_task_get_info(dmt, info))
124 goto_out;
125
126 if (with_read_ahead && info->exists) {
127 if (!dm_task_get_read_ahead(dmt, read_ahead))
128 goto_out;
129 } else if (read_ahead)
130 *read_ahead = DM_READ_AHEAD_NONE;
131
132 r = 1;
133
134 out:
135 dm_task_destroy(dmt);
136 return r;
137 }
138
139 int device_is_usable(struct device *dev)
140 {
141 struct dm_task *dmt;
142 struct dm_info info;
143 const char *name, *uuid;
144 uint64_t start, length;
145 char *target_type = NULL;
146 char *params, *vgname = NULL, *lvname, *layer;
147 void *next = NULL;
148 int only_error_target = 1;
149 int r = 0;
150
151 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
152 return_0;
153
154 if (!dm_task_set_major_minor(dmt, MAJOR(dev->dev), MINOR(dev->dev), 1))
155 goto_out;
156
157 if (activation_checks() && !dm_task_enable_checks(dmt))
158 goto_out;
159
160 if (!dm_task_run(dmt)) {
161 log_error("Failed to get state of mapped device");
162 goto out;
163 }
164
165 if (!dm_task_get_info(dmt, &info))
166 goto_out;
167
168 if (!info.exists)
169 goto out;
170
171 name = dm_task_get_name(dmt);
172 uuid = dm_task_get_uuid(dmt);
173
174 if (!info.target_count) {
175 log_debug("%s: Empty device %s not usable.", dev_name(dev), name);
176 goto out;
177 }
178
179 if (info.suspended && ignore_suspended_devices()) {
180 log_debug("%s: Suspended device %s not usable.", dev_name(dev), name);
181 goto out;
182 }
183
184 /* FIXME Also check for mirror block_on_error and mpath no paths */
185 /* For now, we exclude all mirrors */
186
187 do {
188 next = dm_get_next_target(dmt, next, &start, &length,
189 &target_type, &params);
190 /* Skip if target type doesn't match */
191 if (target_type && !strcmp(target_type, "mirror") && ignore_suspended_devices()) {
192 log_debug("%s: Mirror device %s not usable.", dev_name(dev), name);
193 goto out;
194 }
195
196 /*
197 * Snapshot origin could be sitting on top of a mirror which
198 * could be blocking I/O. Skip snapshot origins entirely for
199 * now.
200 *
201 * FIXME: rather than skipping origin, check if mirror is
202 * underneath and if the mirror is blocking I/O.
203 */
204 if (target_type && !strcmp(target_type, "snapshot-origin") &&
205 ignore_suspended_devices()) {
206 log_debug("%s: Snapshot-origin device %s not usable.",
207 dev_name(dev), name);
208 goto out;
209 }
210
211 if (target_type && strcmp(target_type, "error"))
212 only_error_target = 0;
213 } while (next);
214
215 /* Skip devices consisting entirely of error targets. */
216 /* FIXME Deal with device stacked above error targets? */
217 if (only_error_target) {
218 log_debug("%s: Error device %s not usable.",
219 dev_name(dev), name);
220 goto out;
221 }
222
223 /* FIXME Also check dependencies? */
224
225 /* Check internal lvm devices */
226 if (uuid && !strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1)) {
227 if (!(vgname = dm_strdup(name)) ||
228 !dm_split_lvm_name(NULL, NULL, &vgname, &lvname, &layer))
229 goto_out;
230
231 if (lvname && (is_reserved_lvname(lvname) || *layer)) {
232 log_debug("%s: Reserved internal LV device %s/%s%s%s not usable.",
233 dev_name(dev), vgname, lvname, *layer ? "-" : "", layer);
234 goto out;
235 }
236 }
237
238 r = 1;
239
240 out:
241 dm_free(vgname);
242 dm_task_destroy(dmt);
243 return r;
244 }
245
246 static int _info(const char *dlid, int with_open_count, int with_read_ahead,
247 struct dm_info *info, uint32_t *read_ahead)
248 {
249 int r = 0;
250
251 if ((r = _info_run(NULL, dlid, info, read_ahead, 0, with_open_count,
252 with_read_ahead, 0, 0)) && info->exists)
253 return 1;
254 else if ((r = _info_run(NULL, dlid + sizeof(UUID_PREFIX) - 1, info,
255 read_ahead, 0, with_open_count,
256 with_read_ahead, 0, 0)) && info->exists)
257 return 1;
258
259 return r;
260 }
261
262 static int _info_by_dev(uint32_t major, uint32_t minor, struct dm_info *info)
263 {
264 return _info_run(NULL, NULL, info, NULL, 0, 0, 0, major, minor);
265 }
266
267 int dev_manager_info(struct dm_pool *mem, const struct logical_volume *lv,
268 const char *layer,
269 int with_open_count, int with_read_ahead,
270 struct dm_info *info, uint32_t *read_ahead)
271 {
272 char *dlid, *name;
273 int r;
274
275 if (!(name = dm_build_dm_name(mem, lv->vg->name, lv->name, layer))) {
276 log_error("name build failed for %s", lv->name);
277 return 0;
278 }
279
280 if (!(dlid = build_dm_uuid(mem, lv->lvid.s, layer))) {
281 log_error("dlid build failed for %s", name);
282 return 0;
283 }
284
285 log_debug("Getting device info for %s [%s]", name, dlid);
286 r = _info(dlid, with_open_count, with_read_ahead, info, read_ahead);
287
288 dm_pool_free(mem, name);
289 return r;
290 }
291
292 static const struct dm_info *_cached_info(struct dm_pool *mem,
293 const struct logical_volume *lv,
294 struct dm_tree *dtree)
295 {
296 const char *dlid;
297 struct dm_tree_node *dnode;
298 const struct dm_info *dinfo;
299
300 if (!(dlid = build_dm_uuid(mem, lv->lvid.s, NULL))) {
301 log_error("dlid build failed for %s", lv->name);
302 return NULL;
303 }
304
305 /* An activating merging origin won't have a node in the tree yet */
306 if (!(dnode = dm_tree_find_node_by_uuid(dtree, dlid)))
307 return NULL;
308
309 if (!(dinfo = dm_tree_node_get_info(dnode))) {
310 log_error("failed to get info from tree node for %s", lv->name);
311 return NULL;
312 }
313
314 if (!dinfo->exists)
315 return NULL;
316
317 return dinfo;
318 }
319
320 #if 0
321 /* FIXME Interface must cope with multiple targets */
322 static int _status_run(const char *name, const char *uuid,
323 unsigned long long *s, unsigned long long *l,
324 char **t, uint32_t t_size, char **p, uint32_t p_size)
325 {
326 int r = 0;
327 struct dm_task *dmt;
328 struct dm_info info;
329 void *next = NULL;
330 uint64_t start, length;
331 char *type = NULL;
332 char *params = NULL;
333
334 if (!(dmt = _setup_task(name, uuid, 0, DM_DEVICE_STATUS, 0, 0)))
335 return_0;
336
337 if (!dm_task_no_open_count(dmt))
338 log_error("Failed to disable open_count");
339
340 if (!dm_task_run(dmt))
341 goto_out;
342
343 if (!dm_task_get_info(dmt, &info) || !info.exists)
344 goto_out;
345
346 do {
347 next = dm_get_next_target(dmt, next, &start, &length,
348 &type, &params);
349 if (type) {
350 *s = start;
351 *l = length;
352 /* Make sure things are null terminated */
353 strncpy(*t, type, t_size);
354 (*t)[t_size - 1] = '\0';
355 strncpy(*p, params, p_size);
356 (*p)[p_size - 1] = '\0';
357
358 r = 1;
359 /* FIXME Cope with multiple targets! */
360 break;
361 }
362
363 } while (next);
364
365 out:
366 dm_task_destroy(dmt);
367 return r;
368 }
369
370 static int _status(const char *name, const char *uuid,
371 unsigned long long *start, unsigned long long *length,
372 char **type, uint32_t type_size, char **params,
373 uint32_t param_size) __attribute__ ((unused));
374
375 static int _status(const char *name, const char *uuid,
376 unsigned long long *start, unsigned long long *length,
377 char **type, uint32_t type_size, char **params,
378 uint32_t param_size)
379 {
380 if (uuid && *uuid) {
381 if (_status_run(NULL, uuid, start, length, type,
382 type_size, params, param_size) &&
383 *params)
384 return 1;
385 else if (_status_run(NULL, uuid + sizeof(UUID_PREFIX) - 1, start,
386 length, type, type_size, params,
387 param_size) &&
388 *params)
389 return 1;
390 }
391
392 if (name && _status_run(name, NULL, start, length, type, type_size,
393 params, param_size))
394 return 1;
395
396 return 0;
397 }
398 #endif
399
400 int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
401 const char *layer, const char *target_type)
402 {
403 int r = 0;
404 char *dlid;
405 struct dm_task *dmt;
406 struct dm_info info;
407 void *next = NULL;
408 uint64_t start, length;
409 char *type = NULL;
410 char *params = NULL;
411
412 if (!(dlid = build_dm_uuid(mem, lv->lvid.s, layer)))
413 return_0;
414
415 if (!(dmt = _setup_task(NULL, dlid, 0,
416 DM_DEVICE_STATUS, 0, 0)))
417 goto_bad;
418
419 if (!dm_task_no_open_count(dmt))
420 log_error("Failed to disable open_count");
421
422 if (!dm_task_run(dmt))
423 goto_out;
424
425 if (!dm_task_get_info(dmt, &info) || !info.exists)
426 goto_out;
427
428 do {
429 next = dm_get_next_target(dmt, next, &start, &length,
430 &type, &params);
431 if (type && strncmp(type, target_type,
432 strlen(target_type)) == 0) {
433 if (info.live_table)
434 r = 1;
435 break;
436 }
437 } while (next);
438
439 out:
440 dm_task_destroy(dmt);
441 bad:
442 dm_pool_free(mem, dlid);
443
444 return r;
445 }
446
447 int add_linear_area_to_dtree(struct dm_tree_node *node, uint64_t size, uint32_t extent_size, int use_linear_target, const char *vgname, const char *lvname)
448 {
449 uint32_t page_size;
450
451 /*
452 * Use striped or linear target?
453 */
454 if (!use_linear_target) {
455 page_size = lvm_getpagesize() >> SECTOR_SHIFT;
456
457 /*
458 * We'll use the extent size as the stripe size.
459 * Extent size and page size are always powers of 2.
460 * The striped target requires that the stripe size is
461 * divisible by the page size.
462 */
463 if (extent_size >= page_size) {
464 /* Use striped target */
465 if (!dm_tree_node_add_striped_target(node, size, extent_size))
466 return_0;
467 return 1;
468 } else
469 /* Some exotic cases are unsupported by striped. */
470 log_warn("WARNING: Using linear target for %s/%s: Striped requires extent size (%" PRIu32 " sectors) >= page size (%" PRIu32 ").",
471 vgname, lvname, extent_size, page_size);
472 }
473
474 /*
475 * Use linear target.
476 */
477 if (!dm_tree_node_add_linear_target(node, size))
478 return_0;
479
480 return 1;
481 }
482
483 static percent_range_t _combine_percent(percent_t a, percent_t b,
484 uint32_t numerator, uint32_t denominator)
485 {
486 if (a == PERCENT_MERGE_FAILED || b == PERCENT_MERGE_FAILED)
487 return PERCENT_MERGE_FAILED;
488
489 if (a == PERCENT_INVALID || b == PERCENT_INVALID)
490 return PERCENT_INVALID;
491
492 if (a == PERCENT_100 && b == PERCENT_100)
493 return PERCENT_100;
494
495 if (a == PERCENT_0 && b == PERCENT_0)
496 return PERCENT_0;
497
498 return (percent_range_t) make_percent(numerator, denominator);
499 }
500
501 static int _percent_run(struct dev_manager *dm, const char *name,
502 const char *dlid,
503 const char *target_type, int wait,
504 const struct logical_volume *lv, percent_t *overall_percent,
505 uint32_t *event_nr, int fail_if_percent_unsupported)
506 {
507 int r = 0;
508 struct dm_task *dmt;
509 struct dm_info info;
510 void *next = NULL;
511 uint64_t start, length;
512 char *type = NULL;
513 char *params = NULL;
514 const struct dm_list *segh = lv ? &lv->segments : NULL;
515 struct lv_segment *seg = NULL;
516 struct segment_type *segtype;
517 int first_time = 1;
518 percent_t percent;
519
520 uint64_t total_numerator = 0, total_denominator = 0;
521
522 *overall_percent = PERCENT_INVALID;
523
524 if (!(dmt = _setup_task(name, dlid, event_nr,
525 wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, 0, 0)))
526 return_0;
527
528 if (!dm_task_no_open_count(dmt))
529 log_error("Failed to disable open_count");
530
531 if (!dm_task_run(dmt))
532 goto_out;
533
534 if (!dm_task_get_info(dmt, &info) || !info.exists)
535 goto_out;
536
537 if (event_nr)
538 *event_nr = info.event_nr;
539
540 do {
541 next = dm_get_next_target(dmt, next, &start, &length, &type,
542 &params);
543 if (lv) {
544 if (!(segh = dm_list_next(&lv->segments, segh))) {
545 log_error("Number of segments in active LV %s "
546 "does not match metadata", lv->name);
547 goto out;
548 }
549 seg = dm_list_item(segh, struct lv_segment);
550 }
551
552 if (!type || !params)
553 continue;
554
555 if (!(segtype = get_segtype_from_string(dm->cmd, target_type)))
556 continue;
557
558 if (strcmp(type, target_type)) {
559 /* If kernel's type isn't an exact match is it compatible? */
560 if (!segtype->ops->target_status_compatible ||
561 !segtype->ops->target_status_compatible(type))
562 continue;
563 }
564
565 if (!segtype->ops->target_percent)
566 continue;
567
568 if (!segtype->ops->target_percent(&dm->target_state,
569 &percent, dm->mem,
570 dm->cmd, seg, params,
571 &total_numerator,
572 &total_denominator))
573 goto_out;
574
575 if (first_time) {
576 *overall_percent = percent;
577 first_time = 0;
578 } else
579 *overall_percent =
580 _combine_percent(*overall_percent, percent,
581 total_numerator, total_denominator);
582 } while (next);
583
584 if (lv && dm_list_next(&lv->segments, segh)) {
585 log_error("Number of segments in active LV %s does not "
586 "match metadata", lv->name);
587 goto out;
588 }
589
590 if (first_time) {
591 /* above ->target_percent() was not executed! */
592 /* FIXME why return PERCENT_100 et. al. in this case? */
593 *overall_percent = PERCENT_100;
594 if (fail_if_percent_unsupported)
595 goto_out;
596 }
597
598 log_debug("LV percent: %f", percent_to_float(*overall_percent));
599 r = 1;
600
601 out:
602 dm_task_destroy(dmt);
603 return r;
604 }
605
606 static int _percent(struct dev_manager *dm, const char *name, const char *dlid,
607 const char *target_type, int wait,
608 const struct logical_volume *lv, percent_t *percent,
609 uint32_t *event_nr, int fail_if_percent_unsupported)
610 {
611 if (dlid && *dlid) {
612 if (_percent_run(dm, NULL, dlid, target_type, wait, lv, percent,
613 event_nr, fail_if_percent_unsupported))
614 return 1;
615 else if (_percent_run(dm, NULL, dlid + sizeof(UUID_PREFIX) - 1,
616 target_type, wait, lv, percent,
617 event_nr, fail_if_percent_unsupported))
618 return 1;
619 }
620
621 if (name && _percent_run(dm, name, NULL, target_type, wait, lv, percent,
622 event_nr, fail_if_percent_unsupported))
623 return 1;
624
625 return 0;
626 }
627
628 /* FIXME Merge with the percent function */
629 int dev_manager_transient(struct dev_manager *dm, struct logical_volume *lv)
630 {
631 int r = 0;
632 struct dm_task *dmt;
633 struct dm_info info;
634 void *next = NULL;
635 uint64_t start, length;
636 char *type = NULL;
637 char *params = NULL;
638 char *dlid = NULL;
639 const char *layer = lv_is_origin(lv) ? "real" : NULL;
640 const struct dm_list *segh = &lv->segments;
641 struct lv_segment *seg = NULL;
642
643 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
644 return_0;
645
646 if (!(dmt = _setup_task(0, dlid, NULL, DM_DEVICE_STATUS, 0, 0)))
647 return_0;
648
649 if (!dm_task_no_open_count(dmt))
650 log_error("Failed to disable open_count");
651
652 if (!dm_task_run(dmt))
653 goto_out;
654
655 if (!dm_task_get_info(dmt, &info) || !info.exists)
656 goto_out;
657
658 do {
659 next = dm_get_next_target(dmt, next, &start, &length, &type,
660 &params);
661
662 if (!(segh = dm_list_next(&lv->segments, segh))) {
663 log_error("Number of segments in active LV %s "
664 "does not match metadata", lv->name);
665 goto out;
666 }
667 seg = dm_list_item(segh, struct lv_segment);
668
669 if (!type || !params)
670 continue;
671
672 if (seg->segtype->ops->check_transient_status &&
673 !seg->segtype->ops->check_transient_status(seg, params))
674 goto_out;
675
676 } while (next);
677
678 if (dm_list_next(&lv->segments, segh)) {
679 log_error("Number of segments in active LV %s does not "
680 "match metadata", lv->name);
681 goto out;
682 }
683
684 r = 1;
685
686 out:
687 dm_task_destroy(dmt);
688 return r;
689 }
690
691 /*
692 * dev_manager implementation.
693 */
694 struct dev_manager *dev_manager_create(struct cmd_context *cmd,
695 const char *vg_name,
696 unsigned track_pvmove_deps)
697 {
698 struct dm_pool *mem;
699 struct dev_manager *dm;
700
701 if (!(mem = dm_pool_create("dev_manager", 16 * 1024)))
702 return_NULL;
703
704 if (!(dm = dm_pool_zalloc(mem, sizeof(*dm))))
705 goto_bad;
706
707 dm->cmd = cmd;
708 dm->mem = mem;
709
710 if (!(dm->vg_name = dm_pool_strdup(dm->mem, vg_name)))
711 goto_bad;
712
713 /*
714 * When we manipulate (normally suspend/resume) the PVMOVE
715 * device directly, there's no need to touch the LVs above.
716 */
717 dm->track_pvmove_deps = track_pvmove_deps;
718
719 dm->target_state = NULL;
720
721 dm_udev_set_sync_support(cmd->current_settings.udev_sync);
722
723 return dm;
724
725 bad:
726 dm_pool_destroy(mem);
727 return NULL;
728 }
729
730 void dev_manager_destroy(struct dev_manager *dm)
731 {
732 dm_pool_destroy(dm->mem);
733 }
734
735 void dev_manager_release(void)
736 {
737 dm_lib_release();
738 }
739
740 void dev_manager_exit(void)
741 {
742 dm_lib_exit();
743 }
744
745 int dev_manager_snapshot_percent(struct dev_manager *dm,
746 const struct logical_volume *lv,
747 percent_t *percent)
748 {
749 const struct logical_volume *snap_lv;
750 char *name;
751 const char *dlid;
752 int fail_if_percent_unsupported = 0;
753
754 if (lv_is_merging_origin(lv)) {
755 /*
756 * Set 'fail_if_percent_unsupported', otherwise passing
757 * unsupported LV types to _percent will lead to a default
758 * successful return with percent_range as PERCENT_100.
759 * - For a merging origin, this will result in a polldaemon
760 * that runs infinitely (because completion is PERCENT_0)
761 * - We unfortunately don't yet _know_ if a snapshot-merge
762 * target is active (activation is deferred if dev is open);
763 * so we can't short-circuit origin devices based purely on
764 * existing LVM LV attributes.
765 */
766 fail_if_percent_unsupported = 1;
767 }
768
769 if (lv_is_merging_cow(lv)) {
770 /* must check percent of origin for a merging snapshot */
771 snap_lv = origin_from_cow(lv);
772 } else
773 snap_lv = lv;
774
775 /*
776 * Build a name for the top layer.
777 */
778 if (!(name = dm_build_dm_name(dm->mem, snap_lv->vg->name, snap_lv->name, NULL)))
779 return_0;
780
781 if (!(dlid = build_dm_uuid(dm->mem, snap_lv->lvid.s, NULL)))
782 return_0;
783
784 /*
785 * Try and get some info on this device.
786 */
787 log_debug("Getting device status percentage for %s", name);
788 if (!(_percent(dm, name, dlid, "snapshot", 0, NULL, percent,
789 NULL, fail_if_percent_unsupported)))
790 return_0;
791
792 /* If the snapshot isn't available, percent will be -1 */
793 return 1;
794 }
795
796 /* FIXME Merge with snapshot_percent, auto-detecting target type */
797 /* FIXME Cope with more than one target */
798 int dev_manager_mirror_percent(struct dev_manager *dm,
799 const struct logical_volume *lv, int wait,
800 percent_t *percent, uint32_t *event_nr)
801 {
802 char *name;
803 const char *dlid;
804 const char *target_type = first_seg(lv)->segtype->name;
805 const char *layer = (lv_is_origin(lv)) ? "real" : NULL;
806
807 /*
808 * Build a name for the top layer.
809 */
810 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
811 return_0;
812
813 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer))) {
814 log_error("dlid build failed for %s", lv->name);
815 return 0;
816 }
817
818 log_debug("Getting device %s status percentage for %s",
819 target_type, name);
820 if (!(_percent(dm, name, dlid, target_type, wait, lv, percent,
821 event_nr, 0)))
822 return_0;
823
824 return 1;
825 }
826
827 #if 0
828 log_very_verbose("%s %s", sus ? "Suspending" : "Resuming", name);
829
830 log_verbose("Loading %s", dl->name);
831 log_very_verbose("Activating %s read-only", dl->name);
832 log_very_verbose("Activated %s %s %03u:%03u", dl->name,
833 dl->dlid, dl->info.major, dl->info.minor);
834
835 if (_get_flag(dl, VISIBLE))
836 log_verbose("Removing %s", dl->name);
837 else
838 log_very_verbose("Removing %s", dl->name);
839
840 log_debug("Adding target: %" PRIu64 " %" PRIu64 " %s %s",
841 extent_size * seg->le, extent_size * seg->len, target, params);
842
843 log_debug("Adding target: 0 %" PRIu64 " snapshot-origin %s",
844 dl->lv->size, params);
845 log_debug("Adding target: 0 %" PRIu64 " snapshot %s", size, params);
846 log_debug("Getting device info for %s", dl->name);
847
848 /* Rename? */
849 if ((suffix = strrchr(dl->dlid + sizeof(UUID_PREFIX) - 1, '-')))
850 suffix++;
851 new_name = dm_build_dm_name(dm->mem, dm->vg_name, dl->lv->name,
852 suffix);
853
854 static int _belong_to_vg(const char *vgname, const char *name)
855 {
856 const char *v = vgname, *n = name;
857
858 while (*v) {
859 if ((*v != *n) || (*v == '-' && *(++n) != '-'))
860 return 0;
861 v++, n++;
862 }
863
864 if (*n == '-' && *(n + 1) != '-')
865 return 1;
866 else
867 return 0;
868 }
869
870 if (!(snap_seg = find_cow(lv)))
871 return 1;
872
873 old_origin = snap_seg->origin;
874
875 /* Was this the last active snapshot with this origin? */
876 dm_list_iterate_items(lvl, active_head) {
877 active = lvl->lv;
878 if ((snap_seg = find_cow(active)) &&
879 snap_seg->origin == old_origin) {
880 return 1;
881 }
882 }
883
884 #endif
885
886 int dev_manager_thin_pool_status(struct dev_manager *dm,
887 const struct logical_volume *lv,
888 struct dm_status_thin_pool **status)
889 {
890 const char *dlid;
891 struct dm_task *dmt;
892 struct dm_info info;
893 uint64_t start, length;
894 char *type = NULL;
895 char *params = NULL;
896 int r = 0;
897
898 /* Build dlid for the thin pool layer */
899 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
900 return_0;
901
902 log_debug("Getting thin pool device status for %s.", lv->name);
903
904 if (!(dmt = _setup_task(NULL, dlid, 0, DM_DEVICE_STATUS, 0, 0)))
905 return_0;
906
907 if (!dm_task_no_open_count(dmt))
908 log_error("Failed to disable open_count.");
909
910 if (!dm_task_run(dmt))
911 goto_out;
912
913 if (!dm_task_get_info(dmt, &info) || !info.exists)
914 goto_out;
915
916 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
917
918 if (!dm_get_status_thin_pool(dm->mem, params, status))
919 goto_out;
920
921 r = 1;
922 out:
923 dm_task_destroy(dmt);
924
925 return r;
926 }
927
928 int dev_manager_thin_pool_percent(struct dev_manager *dm,
929 const struct logical_volume *lv,
930 int metadata, percent_t *percent)
931 {
932 char *name;
933 const char *dlid;
934
935 /* Build a name for the top layer */
936 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name,
937 _thin_layer)))
938 return_0;
939
940 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
941 return_0;
942
943 log_debug("Getting device status percentage for %s", name);
944 if (!(_percent(dm, name, dlid, "thin-pool", 0,
945 (metadata) ? lv : NULL, percent, NULL, 1)))
946 return_0;
947
948 return 1;
949 }
950
951 int dev_manager_thin_percent(struct dev_manager *dm,
952 const struct logical_volume *lv,
953 int mapped, percent_t *percent)
954 {
955 char *name;
956 const char *dlid;
957 const char *layer = lv_is_origin(lv) ? "real" : NULL;
958
959 /* Build a name for the top layer */
960 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
961 return_0;
962
963 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
964 return_0;
965
966 log_debug("Getting device status percentage for %s", name);
967 if (!(_percent(dm, name, dlid, "thin", 0,
968 (mapped) ? NULL : lv, percent, NULL, 1)))
969 return_0;
970
971 return 1;
972 }
973
974 /*************************/
975 /* NEW CODE STARTS HERE */
976 /*************************/
977
978 static int _dev_manager_lv_mknodes(const struct logical_volume *lv)
979 {
980 char *name;
981
982 if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name,
983 lv->name, NULL)))
984 return_0;
985
986 return fs_add_lv(lv, name);
987 }
988
989 static int _dev_manager_lv_rmnodes(const struct logical_volume *lv)
990 {
991 return fs_del_lv(lv);
992 }
993
994 int dev_manager_mknodes(const struct logical_volume *lv)
995 {
996 struct dm_info dminfo;
997 char *name;
998 int r = 0;
999
1000 if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name, lv->name, NULL)))
1001 return_0;
1002
1003 if ((r = _info_run(name, NULL, &dminfo, NULL, 1, 0, 0, 0, 0))) {
1004 if (dminfo.exists) {
1005 if (lv_is_visible(lv))
1006 r = _dev_manager_lv_mknodes(lv);
1007 } else
1008 r = _dev_manager_lv_rmnodes(lv);
1009 }
1010
1011 dm_pool_free(lv->vg->cmd->mem, name);
1012 return r;
1013 }
1014
1015 static uint16_t _get_udev_flags(struct dev_manager *dm, struct logical_volume *lv,
1016 const char *layer)
1017 {
1018 uint16_t udev_flags = 0;
1019
1020 /*
1021 * Instruct also libdevmapper to disable udev
1022 * fallback in accordance to LVM2 settings.
1023 */
1024 if (!dm->cmd->current_settings.udev_fallback)
1025 udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
1026
1027 /*
1028 * Is this top-level and visible device?
1029 * If not, create just the /dev/mapper content.
1030 */
1031 /* FIXME: add target's method for this */
1032 if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv))
1033 udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
1034 DM_UDEV_DISABLE_DISK_RULES_FLAG |
1035 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
1036 /*
1037 * There's no need for other udev rules to touch special LVs with
1038 * reserved names. We don't need to populate /dev/disk here either.
1039 * Even if they happen to be visible and top-level.
1040 */
1041 else if (is_reserved_lvname(lv->name))
1042 udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
1043 DM_UDEV_DISABLE_OTHER_RULES_FLAG;
1044
1045 /*
1046 * Snapshots and origins could have the same rule applied that will
1047 * give symlinks exactly the same name (e.g. a name based on
1048 * filesystem UUID). We give preference to origins to make such
1049 * naming deterministic (e.g. symlinks in /dev/disk/by-uuid).
1050 */
1051 if (lv_is_cow(lv))
1052 udev_flags |= DM_UDEV_LOW_PRIORITY_FLAG;
1053
1054 /*
1055 * Finally, add flags to disable /dev/mapper and /dev/<vgname> content
1056 * to be created by udev if it is requested by user's configuration.
1057 * This is basically an explicit fallback to old node/symlink creation
1058 * without udev.
1059 */
1060 if (!dm->cmd->current_settings.udev_rules)
1061 udev_flags |= DM_UDEV_DISABLE_DM_RULES_FLAG |
1062 DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG;
1063
1064 return udev_flags;
1065 }
1066
1067 static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
1068 struct logical_volume *lv, const char *layer)
1069 {
1070 char *dlid, *name;
1071 struct dm_info info, info2;
1072
1073 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
1074 return_0;
1075
1076 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
1077 return_0;
1078
1079 log_debug("Getting device info for %s [%s]", name, dlid);
1080 if (!_info(dlid, 1, 0, &info, NULL)) {
1081 log_error("Failed to get info for %s [%s].", name, dlid);
1082 return 0;
1083 }
1084
1085 /*
1086 * For top level volumes verify that existing device match
1087 * requested major/minor and that major/minor pair is available for use
1088 */
1089 if (!layer && lv->major != -1 && lv->minor != -1) {
1090 /*
1091 * FIXME compare info.major with lv->major if multiple major support
1092 */
1093 if (info.exists && (info.minor != lv->minor)) {
1094 log_error("Volume %s (%" PRIu32 ":%" PRIu32")"
1095 " differs from already active device "
1096 "(%" PRIu32 ":%" PRIu32")",
1097 lv->name, lv->major, lv->minor, info.major, info.minor);
1098 return 0;
1099 }
1100 if (!info.exists && _info_by_dev(lv->major, lv->minor, &info2) &&
1101 info2.exists) {
1102 log_error("The requested major:minor pair "
1103 "(%" PRIu32 ":%" PRIu32") is already used",
1104 lv->major, lv->minor);
1105 return 0;
1106 }
1107 }
1108
1109 if (info.exists && !dm_tree_add_dev_with_udev_flags(dtree, info.major, info.minor,
1110 _get_udev_flags(dm, lv, layer))) {
1111 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree",
1112 info.major, info.minor);
1113 return 0;
1114 }
1115
1116 return 1;
1117 }
1118
1119 /*
1120 * Add replicator devices
1121 *
1122 * Using _add_dev_to_dtree() directly instead of _add_lv_to_dtree()
1123 * to avoid extra checks with extensions.
1124 */
1125 static int _add_partial_replicator_to_dtree(struct dev_manager *dm,
1126 struct dm_tree *dtree,
1127 struct logical_volume *lv)
1128 {
1129 struct logical_volume *rlv = first_seg(lv)->replicator;
1130 struct replicator_device *rdev;
1131 struct replicator_site *rsite;
1132 struct dm_tree_node *rep_node, *rdev_node;
1133 const char *uuid;
1134
1135 if (!lv_is_active_replicator_dev(lv)) {
1136 if (!_add_dev_to_dtree(dm, dtree, lv->rdevice->lv,
1137 NULL))
1138 return_0;
1139 return 1;
1140 }
1141
1142 /* Add _rlog and replicator device */
1143 if (!_add_dev_to_dtree(dm, dtree, first_seg(rlv)->rlog_lv, NULL))
1144 return_0;
1145
1146 if (!_add_dev_to_dtree(dm, dtree, rlv, NULL))
1147 return_0;
1148
1149 if (!(uuid = build_dm_uuid(dm->mem, rlv->lvid.s, NULL)))
1150 return_0;
1151
1152 rep_node = dm_tree_find_node_by_uuid(dtree, uuid);
1153
1154 /* Add all related devices for replicator */
1155 dm_list_iterate_items(rsite, &rlv->rsites)
1156 dm_list_iterate_items(rdev, &rsite->rdevices) {
1157 if (rsite->state == REPLICATOR_STATE_ACTIVE) {
1158 /* Add _rimage LV */
1159 if (!_add_dev_to_dtree(dm, dtree, rdev->lv, NULL))
1160 return_0;
1161
1162 /* Add replicator-dev LV, except of the already added one */
1163 if ((lv != rdev->replicator_dev->lv) &&
1164 !_add_dev_to_dtree(dm, dtree,
1165 rdev->replicator_dev->lv, NULL))
1166 return_0;
1167
1168 /* If replicator exists - try connect existing heads */
1169 if (rep_node) {
1170 uuid = build_dm_uuid(dm->mem,
1171 rdev->replicator_dev->lv->lvid.s,
1172 NULL);
1173 if (!uuid)
1174 return_0;
1175
1176 rdev_node = dm_tree_find_node_by_uuid(dtree, uuid);
1177 if (rdev_node)
1178 dm_tree_node_set_presuspend_node(rdev_node,
1179 rep_node);
1180 }
1181 }
1182
1183 if (!rdev->rsite->vg_name)
1184 continue;
1185
1186 if (!_add_dev_to_dtree(dm, dtree, rdev->lv, NULL))
1187 return_0;
1188
1189 if (rdev->slog &&
1190 !_add_dev_to_dtree(dm, dtree, rdev->slog, NULL))
1191 return_0;
1192 }
1193
1194 return 1;
1195 }
1196
1197 struct thin_cb_data {
1198 const struct logical_volume *pool_lv;
1199 struct dev_manager *dm;
1200 };
1201
1202 static int _thin_pool_callback(struct dm_tree_node *node,
1203 dm_node_callback_t type, void *cb_data)
1204 {
1205 int ret, status;
1206 const struct thin_cb_data *data = cb_data;
1207 const char *dmdir = dm_dir();
1208 const struct dm_config_node *cn;
1209 const struct dm_config_value *cv;
1210 const char *thin_check =
1211 find_config_tree_str_allow_empty(data->pool_lv->vg->cmd,
1212 "global/thin_check_executable",
1213 THIN_CHECK_CMD);
1214 const struct logical_volume *mlv = first_seg(data->pool_lv)->metadata_lv;
1215 size_t len = strlen(dmdir) + 2 * (strlen(mlv->vg->name) + strlen(mlv->name)) + 3;
1216 char meta_path[len];
1217 int args = 0;
1218 const char *argv[19]; /* Max supported 15 args */
1219 char *split, *dm_name;
1220
1221 if (!thin_check[0])
1222 return 1; /* Checking disabled */
1223
1224 if (!(dm_name = dm_build_dm_name(data->dm->mem, mlv->vg->name,
1225 mlv->name, NULL)) ||
1226 (dm_snprintf(meta_path, len, "%s/%s", dmdir, dm_name) < 0)) {
1227 log_error("Failed to build thin metadata path.");
1228 return 0;
1229 }
1230
1231 if ((cn = find_config_tree_node(mlv->vg->cmd, "global/thin_check_options"))) {
1232 for (cv = cn->v; cv && args < 16; cv = cv->next) {
1233 if (cv->type != DM_CFG_STRING) {
1234 log_error("Invalid string in config file: "
1235 "global/thin_check_options");
1236 return 0;
1237 }
1238 argv[++args] = cv->v.str;
1239 }
1240 } else {
1241 /* Use default options (no support for options with spaces) */
1242 if (!(split = dm_pool_strdup(data->dm->mem, DEFAULT_THIN_CHECK_OPTIONS))) {
1243 log_error("Failed to duplicate thin check string.");
1244 return 0;
1245 }
1246 args = dm_split_words(split, 16, 0, (char**) argv + 1);
1247 }
1248
1249 if (args == 16) {
1250 log_error("Too many options for thin check command.");
1251 return 0;
1252 }
1253
1254 argv[0] = thin_check;
1255 argv[++args] = meta_path;
1256 argv[++args] = NULL;
1257
1258 if (!(ret = exec_cmd(data->pool_lv->vg->cmd, (const char * const *)argv,
1259 &status, 0))) {
1260 switch (type) {
1261 case DM_NODE_CALLBACK_PRELOADED:
1262 log_err_once("Check of thin pool %s/%s failed (status:%d). "
1263 "Manual repair required (thin_dump --repair %s)!",
1264 data->pool_lv->vg->name, data->pool_lv->name,
1265 status, meta_path);
1266 break;
1267 default:
1268 log_warn("WARNING: Integrity check of metadata for thin pool "
1269 "%s/%s failed.",
1270 data->pool_lv->vg->name, data->pool_lv->name);
1271 }
1272 /*
1273 * FIXME: What should we do here??
1274 *
1275 * Maybe mark the node, so it's not activating
1276 * as thin_pool but as error/linear and let the
1277 * dm tree resolve the issue.
1278 */
1279 }
1280
1281 dm_pool_free(data->dm->mem, dm_name);
1282
1283 return ret;
1284 }
1285
1286 static int _thin_pool_register_callback(struct dev_manager *dm,
1287 struct dm_tree_node *node,
1288 const struct logical_volume *lv)
1289 {
1290 struct thin_cb_data *data;
1291
1292 /* Skip metadata testing for unused pool. */
1293 if (!first_seg(lv)->transaction_id)
1294 return 1;
1295
1296 if (!(data = dm_pool_alloc(dm->mem, sizeof(*data)))) {
1297 log_error("Failed to allocated path for callback.");
1298 return 0;
1299 }
1300
1301 data->dm = dm;
1302 data->pool_lv = lv;
1303
1304 dm_tree_node_set_callback(node, _thin_pool_callback, data);
1305
1306 return 1;
1307 }
1308
1309 /*
1310 * Add LV and any known dependencies
1311 */
1312 static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
1313 struct logical_volume *lv, int origin_only)
1314 {
1315 uint32_t s;
1316 struct seg_list *sl;
1317 struct lv_segment *seg = first_seg(lv);
1318 struct dm_tree_node *thin_node;
1319 const char *uuid;
1320
1321 if ((!origin_only || lv_is_thin_volume(lv)) &&
1322 !_add_dev_to_dtree(dm, dtree, lv, NULL))
1323 return_0;
1324
1325 /* FIXME Can we avoid doing this every time? */
1326 if (!_add_dev_to_dtree(dm, dtree, lv, "real"))
1327 return_0;
1328
1329 if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, "cow"))
1330 return_0;
1331
1332 if ((lv->status & MIRRORED) && seg->log_lv &&
1333 !_add_dev_to_dtree(dm, dtree, seg->log_lv, NULL))
1334 return_0;
1335
1336 if (lv->status & RAID)
1337 for (s = 0; s < seg->area_count; s++)
1338 if (!_add_dev_to_dtree(dm, dtree,
1339 seg_metalv(seg, s), NULL))
1340 return_0;
1341
1342 /* Add any LVs referencing a PVMOVE LV unless told not to. */
1343 if (dm->track_pvmove_deps && lv->status & PVMOVE)
1344 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
1345 if (!_add_lv_to_dtree(dm, dtree, sl->seg->lv, origin_only))
1346 return_0;
1347
1348 /* Adding LV head of replicator adds all other related devs */
1349 if (lv_is_replicator_dev(lv) &&
1350 !_add_partial_replicator_to_dtree(dm, dtree, lv))
1351 return_0;
1352
1353 if (lv_is_thin_volume(lv)) {
1354 #if 0
1355 /* FIXME Implement dm_tree_node_skip_children optimisation */
1356 if (origin_only) {
1357 if (!(uuid = build_dm_uuid(dm->mem, lv->lvid.s, NULL)))
1358 return_0;
1359 if ((thin_node = dm_tree_find_node_by_uuid(dtree, uuid)))
1360 dm_tree_node_skip_children(thin_node, 1);
1361 }
1362 #endif
1363 /* Add thin pool LV layer */
1364 lv = seg->pool_lv;
1365 seg = first_seg(lv);
1366 }
1367
1368 if (lv_is_thin_pool(lv)) {
1369 if (!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
1370 return_0;
1371 /* FIXME code from _create_partial_dtree() should be moved here */
1372 if (!_add_lv_to_dtree(dm, dtree, seg_lv(seg, 0), 0))
1373 return_0;
1374 if (!_add_dev_to_dtree(dm, dtree, lv, _thin_layer))
1375 return_0;
1376 /* If the partial tree is used for deactivation, setup callback */
1377 if (!(uuid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
1378 return_0;
1379 if ((thin_node = dm_tree_find_node_by_uuid(dtree, uuid)) &&
1380 !_thin_pool_register_callback(dm, thin_node, lv))
1381 return_0;
1382 }
1383
1384 return 1;
1385 }
1386
1387 static struct dm_tree *_create_partial_dtree(struct dev_manager *dm, struct logical_volume *lv, int origin_only)
1388 {
1389 struct dm_tree *dtree;
1390 struct dm_list *snh;
1391 struct lv_segment *seg;
1392 uint32_t s;
1393
1394 if (!(dtree = dm_tree_create())) {
1395 log_debug("Partial dtree creation failed for %s.", lv->name);
1396 return NULL;
1397 }
1398
1399 if (!_add_lv_to_dtree(dm, dtree, lv, (lv_is_origin(lv) || lv_is_thin_volume(lv)) ? origin_only : 0))
1400 goto_bad;
1401
1402 /* Add any snapshots of this LV */
1403 if (!origin_only && lv_is_origin(lv))
1404 dm_list_iterate(snh, &lv->snapshot_segs)
1405 if (!_add_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow, 0))
1406 goto_bad;
1407
1408 /* Add any LVs used by segments in this LV */
1409 dm_list_iterate_items(seg, &lv->segments)
1410 for (s = 0; s < seg->area_count; s++)
1411 if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s)) {
1412 if (!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s), 0))
1413 goto_bad;
1414 }
1415
1416 return dtree;
1417
1418 bad:
1419 dm_tree_free(dtree);
1420 return NULL;
1421 }
1422
1423 static char *_add_error_device(struct dev_manager *dm, struct dm_tree *dtree,
1424 struct lv_segment *seg, int s)
1425 {
1426 char *dlid, *name;
1427 char errid[32];
1428 struct dm_tree_node *node;
1429 struct lv_segment *seg_i;
1430 struct dm_info info;
1431 int segno = -1, i = 0;
1432 uint64_t size = (uint64_t) seg->len * seg->lv->vg->extent_size;
1433
1434 dm_list_iterate_items(seg_i, &seg->lv->segments) {
1435 if (seg == seg_i)
1436 segno = i;
1437 ++i;
1438 }
1439
1440 if (segno < 0) {
1441 log_error("_add_error_device called with bad segment");
1442 return NULL;
1443 }
1444
1445 sprintf(errid, "missing_%d_%d", segno, s);
1446
1447 if (!(dlid = build_dm_uuid(dm->mem, seg->lv->lvid.s, errid)))
1448 return_NULL;
1449
1450 if (!(name = dm_build_dm_name(dm->mem, seg->lv->vg->name,
1451 seg->lv->name, errid)))
1452 return_NULL;
1453
1454 log_debug("Getting device info for %s [%s]", name, dlid);
1455 if (!_info(dlid, 1, 0, &info, NULL)) {
1456 log_error("Failed to get info for %s [%s].", name, dlid);
1457 return 0;
1458 }
1459
1460 if (!info.exists) {
1461 /* Create new node */
1462 if (!(node = dm_tree_add_new_dev(dtree, name, dlid, 0, 0, 0, 0, 0)))
1463 return_NULL;
1464 if (!dm_tree_node_add_error_target(node, size))
1465 return_NULL;
1466 } else {
1467 /* Already exists */
1468 if (!dm_tree_add_dev(dtree, info.major, info.minor)) {
1469 log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree",
1470 info.major, info.minor);
1471 return_NULL;
1472 }
1473 }
1474
1475 return dlid;
1476 }
1477
1478 static int _add_error_area(struct dev_manager *dm, struct dm_tree_node *node,
1479 struct lv_segment *seg, int s)
1480 {
1481 char *dlid;
1482 uint64_t extent_size = seg->lv->vg->extent_size;
1483
1484 if (!strcmp(dm->cmd->stripe_filler, "error")) {
1485 /*
1486 * FIXME, the tree pointer is first field of dm_tree_node, but
1487 * we don't have the struct definition available.
1488 */
1489 struct dm_tree **tree = (struct dm_tree **) node;
1490 if (!(dlid = _add_error_device(dm, *tree, seg, s)))
1491 return_0;
1492 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
1493 return_0;
1494 } else
1495 if (!dm_tree_node_add_target_area(node, dm->cmd->stripe_filler, NULL, UINT64_C(0)))
1496 return_0;
1497
1498 return 1;
1499 }
1500
1501 int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
1502 struct dm_tree_node *node, uint32_t start_area,
1503 uint32_t areas)
1504 {
1505 uint64_t extent_size = seg->lv->vg->extent_size;
1506 uint32_t s;
1507 char *dlid;
1508 struct stat info;
1509 const char *name;
1510 unsigned num_error_areas = 0;
1511 unsigned num_existing_areas = 0;
1512
1513 /* FIXME Avoid repeating identical stat in dm_tree_node_add_target_area */
1514 for (s = start_area; s < areas; s++) {
1515 if ((seg_type(seg, s) == AREA_PV &&
1516 (!seg_pvseg(seg, s) || !seg_pv(seg, s) || !seg_dev(seg, s) ||
1517 !(name = dev_name(seg_dev(seg, s))) || !*name ||
1518 stat(name, &info) < 0 || !S_ISBLK(info.st_mode))) ||
1519 (seg_type(seg, s) == AREA_LV && !seg_lv(seg, s))) {
1520 if (!seg->lv->vg->cmd->partial_activation) {
1521 log_error("Aborting. LV %s is now incomplete "
1522 "and --partial was not specified.", seg->lv->name);
1523 return 0;
1524 }
1525 if (!_add_error_area(dm, node, seg, s))
1526 return_0;
1527 num_error_areas++;
1528 } else if (seg_type(seg, s) == AREA_PV) {
1529 if (!dm_tree_node_add_target_area(node, dev_name(seg_dev(seg, s)), NULL,
1530 (seg_pv(seg, s)->pe_start + (extent_size * seg_pe(seg, s)))))
1531 return_0;
1532 num_existing_areas++;
1533 } else if (seg_is_raid(seg)) {
1534 /*
1535 * RAID can handle unassigned areas. It simple puts
1536 * '- -' in for the metadata/data device pair. This
1537 * is a valid way to indicate to the RAID target that
1538 * the device is missing.
1539 *
1540 * If an image is marked as VISIBLE_LV and !LVM_WRITE,
1541 * it means the device has temporarily been extracted
1542 * from the array. It may come back at a future date,
1543 * so the bitmap must track differences. Again, '- -'
1544 * is used in the CTR table.
1545 */
1546 if ((seg_type(seg, s) == AREA_UNASSIGNED) ||
1547 ((seg_lv(seg, s)->status & VISIBLE_LV) &&
1548 !(seg_lv(seg, s)->status & LVM_WRITE))) {
1549 /* One each for metadata area and data area */
1550 if (!dm_tree_node_add_null_area(node, 0) ||
1551 !dm_tree_node_add_null_area(node, 0))
1552 return_0;
1553 continue;
1554 }
1555 if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s)->lvid.s, NULL)))
1556 return_0;
1557 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
1558 return_0;
1559
1560 if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s)->lvid.s, NULL)))
1561 return_0;
1562 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
1563 return_0;
1564 } else if (seg_type(seg, s) == AREA_LV) {
1565
1566 if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s)->lvid.s, NULL)))
1567 return_0;
1568 if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
1569 return_0;
1570 } else {
1571 log_error(INTERNAL_ERROR "Unassigned area found in LV %s.",
1572 seg->lv->name);
1573 return 0;
1574 }
1575 }
1576
1577 if (num_error_areas) {
1578 /* Thins currently do not support partial activation */
1579 if (lv_is_thin_type(seg->lv)) {
1580 log_error("Cannot activate %s%s: pool incomplete.",
1581 seg->lv->vg->name, seg->lv->name);
1582 return 0;
1583 }
1584 }
1585
1586 return 1;
1587 }
1588
1589 static int _add_origin_target_to_dtree(struct dev_manager *dm,
1590 struct dm_tree_node *dnode,
1591 struct logical_volume *lv)
1592 {
1593 const char *real_dlid;
1594
1595 if (!(real_dlid = build_dm_uuid(dm->mem, lv->lvid.s, "real")))
1596 return_0;
1597
1598 if (!dm_tree_node_add_snapshot_origin_target(dnode, lv->size, real_dlid))
1599 return_0;
1600
1601 return 1;
1602 }
1603
1604 static int _add_snapshot_merge_target_to_dtree(struct dev_manager *dm,
1605 struct dm_tree_node *dnode,
1606 struct logical_volume *lv)
1607 {
1608 const char *origin_dlid, *cow_dlid, *merge_dlid;
1609 struct lv_segment *merging_cow_seg = find_merging_cow(lv);
1610
1611 if (!(origin_dlid = build_dm_uuid(dm->mem, lv->lvid.s, "real")))
1612 return_0;
1613
1614 if (!(cow_dlid = build_dm_uuid(dm->mem, merging_cow_seg->cow->lvid.s, "cow")))
1615 return_0;
1616
1617 if (!(merge_dlid = build_dm_uuid(dm->mem, merging_cow_seg->cow->lvid.s, NULL)))
1618 return_0;
1619
1620 if (!dm_tree_node_add_snapshot_merge_target(dnode, lv->size, origin_dlid,
1621 cow_dlid, merge_dlid,
1622 merging_cow_seg->chunk_size))
1623 return_0;
1624
1625 return 1;
1626 }
1627
1628 static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
1629 struct dm_tree_node *dnode,
1630 struct logical_volume *lv,
1631 struct lv_activate_opts *laopts)
1632 {
1633 const char *origin_dlid;
1634 const char *cow_dlid;
1635 struct lv_segment *snap_seg;
1636 uint64_t size;
1637
1638 if (!(snap_seg = find_cow(lv))) {
1639 log_error("Couldn't find snapshot for '%s'.", lv->name);
1640 return 0;
1641 }
1642
1643 if (!(origin_dlid = build_dm_uuid(dm->mem, snap_seg->origin->lvid.s, "real")))
1644 return_0;
1645
1646 if (!(cow_dlid = build_dm_uuid(dm->mem, snap_seg->cow->lvid.s, "cow")))
1647 return_0;
1648
1649 size = (uint64_t) snap_seg->len * snap_seg->origin->vg->extent_size;
1650
1651 if (!laopts->no_merging && lv_is_merging_cow(lv)) {
1652 /* cow is to be merged so load the error target */
1653 if (!dm_tree_node_add_error_target(dnode, size))
1654 return_0;
1655 }
1656 else if (!dm_tree_node_add_snapshot_target(dnode, size, origin_dlid,
1657 cow_dlid, 1, snap_seg->chunk_size))
1658 return_0;
1659
1660 return 1;
1661 }
1662
1663 static int _add_target_to_dtree(struct dev_manager *dm,
1664 struct dm_tree_node *dnode,
1665 struct lv_segment *seg,
1666 struct lv_activate_opts *laopts)
1667 {
1668 uint64_t extent_size = seg->lv->vg->extent_size;
1669
1670 if (!seg->segtype->ops->add_target_line) {
1671 log_error(INTERNAL_ERROR "_emit_target cannot handle "
1672 "segment type %s", seg->segtype->name);
1673 return 0;
1674 }
1675
1676 return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
1677 &dm->target_state, seg,
1678 laopts, dnode,
1679 extent_size * seg->len,
1680 &dm-> pvmove_mirror_count);
1681 }
1682
1683 static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
1684 struct logical_volume *lv,
1685 struct lv_activate_opts *laopts,
1686 const char *layer);
1687
1688 /* Add all replicators' LVs */
1689 static int _add_replicator_dev_target_to_dtree(struct dev_manager *dm,
1690 struct dm_tree *dtree,
1691 struct lv_segment *seg,
1692 struct lv_activate_opts *laopts)
1693 {
1694 struct replicator_device *rdev;
1695 struct replicator_site *rsite;
1696
1697 /* For inactive replicator add linear mapping */
1698 if (!lv_is_active_replicator_dev(seg->lv)) {
1699 if (!_add_new_lv_to_dtree(dm, dtree, seg->lv->rdevice->lv, laopts, NULL))
1700 return_0;
1701 return 1;
1702 }
1703
1704 /* Add rlog and replicator nodes */
1705 if (!seg->replicator ||
1706 !first_seg(seg->replicator)->rlog_lv ||
1707 !_add_new_lv_to_dtree(dm, dtree,
1708 first_seg(seg->replicator)->rlog_lv,
1709 laopts, NULL) ||
1710 !_add_new_lv_to_dtree(dm, dtree, seg->replicator, laopts, NULL))
1711 return_0;
1712
1713 /* Activation of one replicator_dev node activates all other nodes */
1714 dm_list_iterate_items(rsite, &seg->replicator->rsites) {
1715 dm_list_iterate_items(rdev, &rsite->rdevices) {
1716 if (rdev->lv &&
1717 !_add_new_lv_to_dtree(dm, dtree, rdev->lv,
1718 laopts, NULL))
1719 return_0;
1720
1721 if (rdev->slog &&
1722 !_add_new_lv_to_dtree(dm, dtree, rdev->slog,
1723 laopts, NULL))
1724 return_0;
1725 }
1726 }
1727 /* Add remaining replicator-dev nodes in the second loop
1728 * to avoid multiple retries for inserting all elements */
1729 dm_list_iterate_items(rsite, &seg->replicator->rsites) {
1730 if (rsite->state != REPLICATOR_STATE_ACTIVE)
1731 continue;
1732 dm_list_iterate_items(rdev, &rsite->rdevices) {
1733 if (rdev->replicator_dev->lv == seg->lv)
1734 continue;
1735 if (!rdev->replicator_dev->lv ||
1736 !_add_new_lv_to_dtree(dm, dtree,
1737 rdev->replicator_dev->lv,
1738 laopts, NULL))
1739 return_0;
1740 }
1741 }
1742
1743 return 1;
1744 }
1745
1746 static int _add_segment_to_dtree(struct dev_manager *dm,
1747 struct dm_tree *dtree,
1748 struct dm_tree_node *dnode,
1749 struct lv_segment *seg,
1750 struct lv_activate_opts *laopts,
1751 const char *layer)
1752 {
1753 uint32_t s;
1754 struct dm_list *snh;
1755 struct lv_segment *seg_present;
1756 const char *target_name;
1757 struct lv_activate_opts lva;
1758
1759 /* Ensure required device-mapper targets are loaded */
1760 seg_present = find_cow(seg->lv) ? : seg;
1761 target_name = (seg_present->segtype->ops->target_name ?
1762 seg_present->segtype->ops->target_name(seg_present, laopts) :
1763 seg_present->segtype->name);
1764
1765 log_debug("Checking kernel supports %s segment type for %s%s%s",
1766 target_name, seg->lv->name,
1767 layer ? "-" : "", layer ? : "");
1768
1769 if (seg_present->segtype->ops->target_present &&
1770 !seg_present->segtype->ops->target_present(seg_present->lv->vg->cmd,
1771 seg_present, NULL)) {
1772 log_error("Can't process LV %s: %s target support missing "
1773 "from kernel?", seg->lv->name, target_name);
1774 return 0;
1775 }
1776
1777 /* Add mirror log */
1778 if (seg->log_lv &&
1779 !_add_new_lv_to_dtree(dm, dtree, seg->log_lv, laopts, NULL))
1780 return_0;
1781
1782 if (seg_is_replicator_dev(seg)) {
1783 if (!_add_replicator_dev_target_to_dtree(dm, dtree, seg, laopts))
1784 return_0;
1785 /* If this is a snapshot origin, add real LV */
1786 /* If this is a snapshot origin + merging snapshot, add cow + real LV */
1787 } else if (lv_is_origin(seg->lv) && !layer) {
1788 if (!laopts->no_merging && lv_is_merging_origin(seg->lv)) {
1789 if (!_add_new_lv_to_dtree(dm, dtree,
1790 find_merging_cow(seg->lv)->cow, laopts, "cow"))
1791 return_0;
1792 /*
1793 * Must also add "real" LV for use when
1794 * snapshot-merge target is added
1795 */
1796 }
1797 if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, laopts, "real"))
1798 return_0;
1799 } else if (lv_is_cow(seg->lv) && !layer) {
1800 if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, laopts, "cow"))
1801 return_0;
1802 } else if ((layer != _thin_layer) && seg_is_thin(seg)) {
1803 lva = *laopts;
1804 lva.real_pool = 1;
1805 if (!_add_new_lv_to_dtree(dm, dtree, seg_is_thin_pool(seg) ?
1806 seg->lv : seg->pool_lv, &lva, _thin_layer))
1807 return_0;
1808 } else {
1809 if (seg_is_thin_pool(seg) &&
1810 !_add_new_lv_to_dtree(dm, dtree, seg->metadata_lv, laopts, NULL))
1811 return_0;
1812
1813 /* Add any LVs used by this segment */
1814 for (s = 0; s < seg->area_count; s++) {
1815 if ((seg_type(seg, s) == AREA_LV) &&
1816 (!_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
1817 laopts, NULL)))
1818 return_0;
1819 if (seg_is_raid(seg) &&
1820 !_add_new_lv_to_dtree(dm, dtree, seg_metalv(seg, s),
1821 laopts, NULL))
1822 return_0;
1823 }
1824 }
1825
1826 /* Now we've added its dependencies, we can add the target itself */
1827 if (lv_is_origin(seg->lv) && !layer) {
1828 if (laopts->no_merging || !lv_is_merging_origin(seg->lv)) {
1829 if (!_add_origin_target_to_dtree(dm, dnode, seg->lv))
1830 return_0;
1831 } else {
1832 if (!_add_snapshot_merge_target_to_dtree(dm, dnode, seg->lv))
1833 return_0;
1834 }
1835 } else if (lv_is_cow(seg->lv) && !layer) {
1836 if (!_add_snapshot_target_to_dtree(dm, dnode, seg->lv, laopts))
1837 return_0;
1838 } else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
1839 return_0;
1840
1841 if (lv_is_origin(seg->lv) && !layer)
1842 /* Add any snapshots of this LV */
1843 dm_list_iterate(snh, &seg->lv->snapshot_segs)
1844 if (!_add_new_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow,
1845 laopts, NULL))
1846 return_0;
1847
1848 return 1;
1849 }
1850
1851 static int _set_udev_flags_for_children(struct dev_manager *dm,
1852 struct volume_group *vg,
1853 struct dm_tree_node *dnode)
1854 {
1855 char *p;
1856 const char *uuid;
1857 void *handle = NULL;
1858 struct dm_tree_node *child;
1859 const struct dm_info *info;
1860 struct lv_list *lvl;
1861
1862 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1863 /* Ignore root node */
1864 if (!(info = dm_tree_node_get_info(child)) || !info->exists)
1865 continue;
1866
1867 if (!(uuid = dm_tree_node_get_uuid(child))) {
1868 log_error(INTERNAL_ERROR
1869 "Failed to get uuid for %" PRIu32 ":%" PRIu32,
1870 info->major, info->minor);
1871 continue;
1872 }
1873
1874 /* Ignore non-LVM devices */
1875 if (!(p = strstr(uuid, UUID_PREFIX)))
1876 continue;
1877 p += strlen(UUID_PREFIX);
1878
1879 /* Ignore LVs that belong to different VGs (due to stacking) */
1880 if (strncmp(p, (char *)vg->id.uuid, ID_LEN))
1881 continue;
1882
1883 /* Ignore LVM devices with 'layer' suffixes */
1884 if (strrchr(p, '-'))
1885 continue;
1886
1887 if (!(lvl = find_lv_in_vg_by_lvid(vg, (const union lvid *)p))) {
1888 log_error(INTERNAL_ERROR
1889 "%s (%" PRIu32 ":%" PRIu32 ") not found in VG",
1890 dm_tree_node_get_name(child),
1891 info->major, info->minor);
1892 return 0;
1893 }
1894
1895 dm_tree_node_set_udev_flags(child,
1896 _get_udev_flags(dm, lvl->lv, NULL));
1897 }
1898
1899 return 1;
1900 }
1901
1902 static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
1903 struct logical_volume *lv, struct lv_activate_opts *laopts,
1904 const char *layer)
1905 {
1906 struct lv_segment *seg;
1907 struct lv_layer *lvlayer;
1908 struct seg_list *sl;
1909 struct dm_tree_node *dnode;
1910 const struct dm_info *dinfo;
1911 char *name, *dlid;
1912 uint32_t max_stripe_size = UINT32_C(0);
1913 uint32_t read_ahead = lv->read_ahead;
1914 uint32_t read_ahead_flags = UINT32_C(0);
1915
1916 /* FIXME Seek a simpler way to lay out the snapshot-merge tree. */
1917
1918 if (lv_is_origin(lv) && lv_is_merging_origin(lv) && !layer) {
1919 /*
1920 * Clear merge attributes if merge isn't currently possible:
1921 * either origin or merging snapshot are open
1922 * - but use "snapshot-merge" if it is already in use
1923 * - open_count is always retrieved (as of dm-ioctl 4.7.0)
1924 * so just use the tree's existing nodes' info
1925 */
1926 if (((dinfo = _cached_info(dm->mem, lv,
1927 dtree)) && dinfo->open_count) ||
1928 ((dinfo = _cached_info(dm->mem, find_merging_cow(lv)->cow,
1929 dtree)) && dinfo->open_count)) {
1930 /* FIXME Is there anything simpler to check for instead? */
1931 if (!lv_has_target_type(dm->mem, lv, NULL, "snapshot-merge"))
1932 laopts->no_merging = 1;
1933 }
1934 }
1935
1936 if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
1937 return_0;
1938
1939 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
1940 return_0;
1941
1942 /* We've already processed this node if it already has a context ptr */
1943 if ((dnode = dm_tree_find_node_by_uuid(dtree, dlid)) &&
1944 dm_tree_node_get_context(dnode))
1945 return 1;
1946
1947 if (!(lvlayer = dm_pool_alloc(dm->mem, sizeof(*lvlayer)))) {
1948 log_error("_add_new_lv_to_dtree: pool alloc failed for %s %s.",
1949 lv->name, layer);
1950 return 0;
1951 }
1952
1953 lvlayer->lv = lv;
1954
1955 /*
1956 * Add LV to dtree.
1957 * If we're working with precommitted metadata, clear any
1958 * existing inactive table left behind.
1959 * Major/minor settings only apply to the visible layer.
1960 */
1961 /* FIXME Move the clear from here until later, so we can leave
1962 * identical inactive tables untouched. (For pvmove.)
1963 */
1964 if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid,
1965 layer ? UINT32_C(0) : (uint32_t) lv->major,
1966 layer ? UINT32_C(0) : (uint32_t) lv->minor,
1967 read_only_lv(lv, laopts),
1968 ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
1969 lvlayer,
1970 _get_udev_flags(dm, lv, layer))))
1971 return_0;
1972
1973 /* Store existing name so we can do rename later */
1974 lvlayer->old_name = dm_tree_node_get_name(dnode);
1975
1976 /* Create table */
1977 dm->pvmove_mirror_count = 0u;
1978 dm_list_iterate_items(seg, &lv->segments) {
1979 if (!_add_segment_to_dtree(dm, dtree, dnode, seg, laopts, layer))
1980 return_0;
1981 /* These aren't real segments in the LVM2 metadata */
1982 if (lv_is_origin(lv) && !layer)
1983 break;
1984 if (!laopts->no_merging && lv_is_cow(lv) && !layer)
1985 break;
1986 if (max_stripe_size < seg->stripe_size * seg->area_count)
1987 max_stripe_size = seg->stripe_size * seg->area_count;
1988 }
1989
1990 if (read_ahead == DM_READ_AHEAD_AUTO) {
1991 /* we need RA at least twice a whole stripe - see the comment in md/raid0.c */
1992 read_ahead = max_stripe_size * 2;
1993 if (!read_ahead)
1994 lv_calculate_readahead(lv, &read_ahead);
1995 read_ahead_flags = DM_READ_AHEAD_MINIMUM_FLAG;
1996 }
1997
1998 dm_tree_node_set_read_ahead(dnode, read_ahead, read_ahead_flags);
1999
2000 /* Setup thin pool callback */
2001 if (layer && lv_is_thin_pool(lv) &&
2002 !_thin_pool_register_callback(dm, dnode, lv))
2003 return_0;
2004
2005 /* Add any LVs referencing a PVMOVE LV unless told not to */
2006 if (dm->track_pvmove_deps && (lv->status & PVMOVE))
2007 dm_list_iterate_items(sl, &lv->segs_using_this_lv)
2008 if (!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv, laopts, NULL))
2009 return_0;
2010
2011 if (!_set_udev_flags_for_children(dm, lv->vg, dnode))
2012 return_0;
2013
2014 return 1;
2015 }
2016
2017 /* FIXME: symlinks should be created/destroyed at the same time
2018 * as the kernel devices but we can't do that from within libdevmapper
2019 * at present so we must walk the tree twice instead. */
2020
2021 /*
2022 * Create LV symlinks for children of supplied root node.
2023 */
2024 static int _create_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
2025 {
2026 void *handle = NULL;
2027 struct dm_tree_node *child;
2028 struct lv_layer *lvlayer;
2029 char *old_vgname, *old_lvname, *old_layer;
2030 char *new_vgname, *new_lvname, *new_layer;
2031 const char *name;
2032 int r = 1;
2033
2034 /* Nothing to do if udev fallback is disabled. */
2035 if (!dm->cmd->current_settings.udev_fallback) {
2036 fs_set_create();
2037 return 1;
2038 }
2039
2040 while ((child = dm_tree_next_child(&handle, root, 0))) {
2041 if (!(lvlayer = dm_tree_node_get_context(child)))
2042 continue;
2043
2044 /* Detect rename */
2045 name = dm_tree_node_get_name(child);
2046
2047 if (name && lvlayer->old_name && *lvlayer->old_name && strcmp(name, lvlayer->old_name)) {
2048 if (!dm_split_lvm_name(dm->mem, lvlayer->old_name, &old_vgname, &old_lvname, &old_layer)) {
2049 log_error("_create_lv_symlinks: Couldn't split up old device name %s", lvlayer->old_name);
2050 return 0;
2051 }
2052 if (!dm_split_lvm_name(dm->mem, name, &new_vgname, &new_lvname, &new_layer)) {
2053 log_error("_create_lv_symlinks: Couldn't split up new device name %s", name);
2054 return 0;
2055 }
2056 if (!fs_rename_lv(lvlayer->lv, name, old_vgname, old_lvname))
2057 r = 0;
2058 continue;
2059 }
2060 if (lv_is_visible(lvlayer->lv)) {
2061 if (!_dev_manager_lv_mknodes(lvlayer->lv))
2062 r = 0;
2063 continue;
2064 }
2065 if (!_dev_manager_lv_rmnodes(lvlayer->lv))
2066 r = 0;
2067 }
2068
2069 return r;
2070 }
2071
2072 /*
2073 * Remove LV symlinks for children of supplied root node.
2074 */
2075 static int _remove_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root)
2076 {
2077 void *handle = NULL;
2078 struct dm_tree_node *child;
2079 char *vgname, *lvname, *layer;
2080 int r = 1;
2081
2082 /* Nothing to do if udev fallback is disabled. */
2083 if (!dm->cmd->current_settings.udev_fallback)
2084 return 1;
2085
2086 while ((child = dm_tree_next_child(&handle, root, 0))) {
2087 if (!dm_split_lvm_name(dm->mem, dm_tree_node_get_name(child), &vgname, &lvname, &layer)) {
2088 r = 0;
2089 continue;
2090 }
2091
2092 if (!*vgname)
2093 continue;
2094
2095 /* only top level layer has symlinks */
2096 if (*layer)
2097 continue;
2098
2099 fs_del_lv_byname(dm->cmd->dev_dir, vgname, lvname,
2100 dm->cmd->current_settings.udev_rules);
2101 }
2102
2103 return r;
2104 }
2105
2106 static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, char *non_toplevel_tree_dlid)
2107 {
2108 void *handle = NULL;
2109 struct dm_tree_node *child;
2110 char *vgname, *lvname, *layer;
2111 const char *name, *uuid;
2112
2113 while ((child = dm_tree_next_child(&handle, root, 0))) {
2114 if (!(name = dm_tree_node_get_name(child)))
2115 continue;
2116
2117 if (!(uuid = dm_tree_node_get_uuid(child)))
2118 continue;
2119
2120 if (!dm_split_lvm_name(dm->mem, name, &vgname, &lvname, &layer)) {
2121 log_error("_clean_tree: Couldn't split up device name %s.", name);
2122 return 0;
2123 }
2124
2125 /* Not meant to be top level? */
2126 if (!*layer)
2127 continue;
2128
2129 /* If operation was performed on a partial tree, don't remove it */
2130 if (non_toplevel_tree_dlid && !strcmp(non_toplevel_tree_dlid, uuid))
2131 continue;
2132
2133 if (!dm_tree_deactivate_children(root, uuid, strlen(uuid)))
2134 return_0;
2135 }
2136
2137 return 1;
2138 }
2139
2140 static int _tree_action(struct dev_manager *dm, struct logical_volume *lv,
2141 struct lv_activate_opts *laopts, action_t action)
2142 {
2143 const size_t DLID_SIZE = ID_LEN + sizeof(UUID_PREFIX) - 1;
2144 struct dm_tree *dtree;
2145 struct dm_tree_node *root;
2146 char *dlid;
2147 int r = 0;
2148
2149 laopts->is_activate = (action == ACTIVATE);
2150
2151 if (!(dtree = _create_partial_dtree(dm, lv, laopts->origin_only)))
2152 return_0;
2153
2154 if (!(root = dm_tree_find_node(dtree, 0, 0))) {
2155 log_error("Lost dependency tree root node");
2156 goto out_no_root;
2157 }
2158
2159 /* Restore fs cookie */
2160 dm_tree_set_cookie(root, fs_get_cookie());
2161
2162 if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, (lv_is_origin(lv) && laopts->origin_only) ? "real" : NULL)))
2163 goto_out;
2164
2165 /* Only process nodes with uuid of "LVM-" plus VG id. */
2166 switch(action) {
2167 case CLEAN:
2168 /* Deactivate any unused non-toplevel nodes */
2169 if (!_clean_tree(dm, root, laopts->origin_only ? dlid : NULL))
2170 goto_out;
2171 break;
2172 case DEACTIVATE:
2173 if (retry_deactivation())
2174 dm_tree_retry_remove(root);
2175 /* Deactivate LV and all devices it references that nothing else has open. */
2176 if (!dm_tree_deactivate_children(root, dlid, DLID_SIZE))
2177 goto_out;
2178 if (!_remove_lv_symlinks(dm, root))
2179 log_warn("Failed to remove all device symlinks associated with %s.", lv->name);
2180 break;
2181 case SUSPEND:
2182 dm_tree_skip_lockfs(root);
2183 if (!dm->flush_required && !seg_is_raid(first_seg(lv)) &&
2184 (lv->status & MIRRORED) && !(lv->status & PVMOVE))
2185 dm_tree_use_no_flush_suspend(root);
2186 /* Fall through */
2187 case SUSPEND_WITH_LOCKFS:
2188 if (!dm_tree_suspend_children(root, dlid, DLID_SIZE))
2189 goto_out;
2190 break;
2191 case PRELOAD:
2192 case ACTIVATE:
2193 /* Add all required new devices to tree */
2194 if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, (lv_is_origin(lv) && laopts->origin_only) ? "real" : NULL))
2195 goto_out;
2196
2197 /* Preload any devices required before any suspensions */
2198 if (!dm_tree_preload_children(root, dlid, DLID_SIZE))
2199 goto_out;
2200
2201 if (dm_tree_node_size_changed(root))
2202 dm->flush_required = 1;
2203
2204 if (action == ACTIVATE) {
2205 if (!dm_tree_activate_children(root, dlid, DLID_SIZE))
2206 goto_out;
2207 if (!_create_lv_symlinks(dm, root))
2208 log_warn("Failed to create symlinks for %s.", lv->name);
2209 }
2210
2211 break;
2212 default:
2213 log_error("_tree_action: Action %u not supported.", action);
2214 goto out;
2215 }
2216
2217 r = 1;
2218
2219 out:
2220 /* Save fs cookie for udev settle, do not wait here */
2221 fs_set_cookie(dm_tree_get_cookie(root));
2222 out_no_root:
2223 dm_tree_free(dtree);
2224
2225 return r;
2226 }
2227
2228 /* origin_only may only be set if we are resuming (not activating) an origin LV */
2229 int dev_manager_activate(struct dev_manager *dm, struct logical_volume *lv,
2230 struct lv_activate_opts *laopts)
2231 {
2232 if (!_tree_action(dm, lv, laopts, ACTIVATE))
2233 return_0;
2234
2235 if (!_tree_action(dm, lv, laopts, CLEAN))
2236 return_0;
2237
2238 return 1;
2239 }
2240
2241 /* origin_only may only be set if we are resuming (not activating) an origin LV */
2242 int dev_manager_preload(struct dev_manager *dm, struct logical_volume *lv,
2243 struct lv_activate_opts *laopts, int *flush_required)
2244 {
2245 if (!_tree_action(dm, lv, laopts, PRELOAD))
2246 return_0;
2247
2248 *flush_required = dm->flush_required;
2249
2250 return 1;
2251 }
2252
2253 int dev_manager_deactivate(struct dev_manager *dm, struct logical_volume *lv)
2254 {
2255 struct lv_activate_opts laopts = { 0 };
2256
2257 if (!_tree_action(dm, lv, &laopts, DEACTIVATE))
2258 return_0;
2259
2260 return 1;
2261 }
2262
2263 int dev_manager_suspend(struct dev_manager *dm, struct logical_volume *lv,
2264 struct lv_activate_opts *laopts, int lockfs, int flush_required)
2265 {
2266 dm->flush_required = flush_required;
2267
2268 if (!_tree_action(dm, lv, laopts, lockfs ? SUSPEND_WITH_LOCKFS : SUSPEND))
2269 return_0;
2270
2271 return 1;
2272 }
2273
2274 /*
2275 * Does device use VG somewhere in its construction?
2276 * Returns 1 if uncertain.
2277 */
2278 int dev_manager_device_uses_vg(struct device *dev,
2279 struct volume_group *vg)
2280 {
2281 struct dm_tree *dtree;
2282 struct dm_tree_node *root;
2283 char dlid[sizeof(UUID_PREFIX) + sizeof(struct id) - 1] __attribute__((aligned(8)));
2284 int r = 1;
2285
2286 if (!(dtree = dm_tree_create())) {
2287 log_error("partial dtree creation failed");
2288 return r;
2289 }
2290
2291 if (!dm_tree_add_dev(dtree, (uint32_t) MAJOR(dev->dev), (uint32_t) MINOR(dev->dev))) {
2292 log_error("Failed to add device %s (%" PRIu32 ":%" PRIu32") to dtree",
2293 dev_name(dev), (uint32_t) MAJOR(dev->dev), (uint32_t) MINOR(dev->dev));
2294 goto out;
2295 }
2296
2297 memcpy(dlid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1);
2298 memcpy(dlid + sizeof(UUID_PREFIX) - 1, &vg->id.uuid[0], sizeof(vg->id));
2299
2300 if (!(root = dm_tree_find_node(dtree, 0, 0))) {
2301 log_error("Lost dependency tree root node");
2302 goto out;
2303 }
2304
2305 if (dm_tree_children_use_uuid(root, dlid, sizeof(UUID_PREFIX) + sizeof(vg->id) - 1))
2306 goto_out;
2307
2308 r = 0;
2309
2310 out:
2311 dm_tree_free(dtree);
2312 return r;
2313 }
This page took 0.136848 seconds and 5 git commands to generate.