]> sourceware.org Git - lvm2.git/blob - libdm/libdm-deptree.c
Transaction_id is property of thin_pool
[lvm2.git] / libdm / libdm-deptree.c
1 /*
2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15 #include "dmlib.h"
16 #include "libdm-targets.h"
17 #include "libdm-common.h"
18 #include "kdev_t.h"
19 #include "dm-ioctl.h"
20
21 #include <stdarg.h>
22 #include <sys/param.h>
23 #include <sys/utsname.h>
24
25 #define MAX_TARGET_PARAMSIZE 500000
26
27 /* FIXME Fix interface so this is used only by LVM */
28 #define UUID_PREFIX "LVM-"
29
30 #define REPLICATOR_LOCAL_SITE 0
31
32 #define THIN_MIN_DATA_SIZE 128
33 #define THIN_MAX_DATA_SIZE 2097152
34 #define THIN_MAX_DEVICE_ID ((1 << 24) - 1)
35
36 #define QUOTE(x) #x
37
38 /* Supported segment types */
39 enum {
40 SEG_CRYPT,
41 SEG_ERROR,
42 SEG_LINEAR,
43 SEG_MIRRORED,
44 SEG_REPLICATOR,
45 SEG_REPLICATOR_DEV,
46 SEG_SNAPSHOT,
47 SEG_SNAPSHOT_ORIGIN,
48 SEG_SNAPSHOT_MERGE,
49 SEG_STRIPED,
50 SEG_ZERO,
51 SEG_THIN_POOL,
52 SEG_THIN,
53 SEG_RAID1,
54 SEG_RAID4,
55 SEG_RAID5_LA,
56 SEG_RAID5_RA,
57 SEG_RAID5_LS,
58 SEG_RAID5_RS,
59 SEG_RAID6_ZR,
60 SEG_RAID6_NR,
61 SEG_RAID6_NC,
62 SEG_LAST,
63 };
64
65 /* FIXME Add crypt and multipath support */
66
67 struct {
68 unsigned type;
69 const char *target;
70 } dm_segtypes[] = {
71 { SEG_CRYPT, "crypt" },
72 { SEG_ERROR, "error" },
73 { SEG_LINEAR, "linear" },
74 { SEG_MIRRORED, "mirror" },
75 { SEG_REPLICATOR, "replicator" },
76 { SEG_REPLICATOR_DEV, "replicator-dev" },
77 { SEG_SNAPSHOT, "snapshot" },
78 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
79 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
80 { SEG_STRIPED, "striped" },
81 { SEG_ZERO, "zero"},
82 { SEG_THIN_POOL, "thin-pool"},
83 { SEG_THIN, "thin"},
84 { SEG_RAID1, "raid1"},
85 { SEG_RAID4, "raid4"},
86 { SEG_RAID5_LA, "raid5_la"},
87 { SEG_RAID5_RA, "raid5_ra"},
88 { SEG_RAID5_LS, "raid5_ls"},
89 { SEG_RAID5_RS, "raid5_rs"},
90 { SEG_RAID6_ZR, "raid6_zr"},
91 { SEG_RAID6_NR, "raid6_nr"},
92 { SEG_RAID6_NC, "raid6_nc"},
93
94 /*
95 *WARNING: Since 'raid' target overloads this 1:1 mapping table
96 * for search do not add new enum elements past them!
97 */
98 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
99 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
100 { SEG_LAST, NULL },
101 };
102
103 /* Some segment types have a list of areas of other devices attached */
104 struct seg_area {
105 struct dm_list list;
106
107 struct dm_tree_node *dev_node;
108
109 uint64_t offset;
110
111 unsigned rsite_index; /* Replicator site index */
112 struct dm_tree_node *slog; /* Replicator sync log node */
113 uint64_t region_size; /* Replicator sync log size */
114 uint32_t flags; /* Replicator sync log flags */
115 };
116
117 /* Replicator-log has a list of sites */
118 /* FIXME: maybe move to seg_area too? */
119 struct replicator_site {
120 struct dm_list list;
121
122 unsigned rsite_index;
123 dm_replicator_mode_t mode;
124 uint32_t async_timeout;
125 uint32_t fall_behind_ios;
126 uint64_t fall_behind_data;
127 };
128
129 /* Per-segment properties */
130 struct load_segment {
131 struct dm_list list;
132
133 unsigned type;
134
135 uint64_t size;
136
137 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
138 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
139
140 uint32_t stripe_size; /* Striped + raid */
141
142 int persistent; /* Snapshot */
143 uint32_t chunk_size; /* Snapshot */
144 struct dm_tree_node *cow; /* Snapshot */
145 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
146 struct dm_tree_node *merge; /* Snapshot */
147
148 struct dm_tree_node *log; /* Mirror + Replicator */
149 uint32_t region_size; /* Mirror + raid */
150 unsigned clustered; /* Mirror */
151 unsigned mirror_area_count; /* Mirror */
152 uint32_t flags; /* Mirror log */
153 char *uuid; /* Clustered mirror log */
154
155 const char *cipher; /* Crypt */
156 const char *chainmode; /* Crypt */
157 const char *iv; /* Crypt */
158 uint64_t iv_offset; /* Crypt */
159 const char *key; /* Crypt */
160
161 const char *rlog_type; /* Replicator */
162 struct dm_list rsites; /* Replicator */
163 unsigned rsite_count; /* Replicator */
164 unsigned rdevice_count; /* Replicator */
165 struct dm_tree_node *replicator;/* Replicator-dev */
166 uint64_t rdevice_index; /* Replicator-dev */
167
168 uint64_t rebuilds; /* raid */
169
170 struct dm_tree_node *metadata; /* Thin_pool */
171 struct dm_tree_node *pool; /* Thin_pool, Thin */
172 uint32_t data_block_size; /* Thin_pool */
173 uint64_t low_water_mark; /* Thin_pool */
174 unsigned skip_block_zeroeing; /* Thin_pool */
175 uint32_t device_id; /* Thin */
176
177 };
178
179 /* Per-device properties */
180 struct load_properties {
181 int read_only;
182 uint32_t major;
183 uint32_t minor;
184
185 uint32_t read_ahead;
186 uint32_t read_ahead_flags;
187
188 unsigned segment_count;
189 unsigned size_changed;
190 struct dm_list segs;
191
192 const char *new_name;
193
194 /* If immediate_dev_node is set to 1, try to create the dev node
195 * as soon as possible (e.g. in preload stage even during traversal
196 * and processing of dm tree). This will also flush all stacked dev
197 * node operations, synchronizing with udev.
198 */
199 unsigned immediate_dev_node;
200
201 /*
202 * If the device size changed from zero and this is set,
203 * don't resume the device immediately, even if the device
204 * has parents. This works provided the parents do not
205 * validate the device size and is required by pvmove to
206 * avoid starting the mirror resync operation too early.
207 */
208 unsigned delay_resume_if_new;
209 };
210
211 /* Two of these used to join two nodes with uses and used_by. */
212 struct dm_tree_link {
213 struct dm_list list;
214 struct dm_tree_node *node;
215 };
216
217 struct dm_tree_node {
218 struct dm_tree *dtree;
219
220 const char *name;
221 const char *uuid;
222 struct dm_info info;
223
224 struct dm_list uses; /* Nodes this node uses */
225 struct dm_list used_by; /* Nodes that use this node */
226
227 int activation_priority; /* 0 gets activated first */
228
229 uint16_t udev_flags; /* Udev control flags */
230
231 void *context; /* External supplied context */
232
233 struct load_properties props; /* For creation/table (re)load */
234
235 /*
236 * If presuspend of child node is needed
237 * Note: only direct child is allowed
238 */
239 struct dm_tree_node *presuspend_node;
240 };
241
242 struct dm_tree {
243 struct dm_pool *mem;
244 struct dm_hash_table *devs;
245 struct dm_hash_table *uuids;
246 struct dm_tree_node root;
247 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
248 int no_flush; /* 1 sets noflush (mirrors/multipath) */
249 int retry_remove; /* 1 retries remove if not successful */
250 uint32_t cookie;
251 };
252
253 struct dm_tree *dm_tree_create(void)
254 {
255 struct dm_tree *dtree;
256
257 if (!(dtree = dm_zalloc(sizeof(*dtree)))) {
258 log_error("dm_tree_create malloc failed");
259 return NULL;
260 }
261
262 dtree->root.dtree = dtree;
263 dm_list_init(&dtree->root.uses);
264 dm_list_init(&dtree->root.used_by);
265 dtree->skip_lockfs = 0;
266 dtree->no_flush = 0;
267
268 if (!(dtree->mem = dm_pool_create("dtree", 1024))) {
269 log_error("dtree pool creation failed");
270 dm_free(dtree);
271 return NULL;
272 }
273
274 if (!(dtree->devs = dm_hash_create(8))) {
275 log_error("dtree hash creation failed");
276 dm_pool_destroy(dtree->mem);
277 dm_free(dtree);
278 return NULL;
279 }
280
281 if (!(dtree->uuids = dm_hash_create(32))) {
282 log_error("dtree uuid hash creation failed");
283 dm_hash_destroy(dtree->devs);
284 dm_pool_destroy(dtree->mem);
285 dm_free(dtree);
286 return NULL;
287 }
288
289 return dtree;
290 }
291
292 void dm_tree_free(struct dm_tree *dtree)
293 {
294 if (!dtree)
295 return;
296
297 dm_hash_destroy(dtree->uuids);
298 dm_hash_destroy(dtree->devs);
299 dm_pool_destroy(dtree->mem);
300 dm_free(dtree);
301 }
302
303 static int _nodes_are_linked(const struct dm_tree_node *parent,
304 const struct dm_tree_node *child)
305 {
306 struct dm_tree_link *dlink;
307
308 dm_list_iterate_items(dlink, &parent->uses)
309 if (dlink->node == child)
310 return 1;
311
312 return 0;
313 }
314
315 static int _link(struct dm_list *list, struct dm_tree_node *node)
316 {
317 struct dm_tree_link *dlink;
318
319 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
320 log_error("dtree link allocation failed");
321 return 0;
322 }
323
324 dlink->node = node;
325 dm_list_add(list, &dlink->list);
326
327 return 1;
328 }
329
330 static int _link_nodes(struct dm_tree_node *parent,
331 struct dm_tree_node *child)
332 {
333 if (_nodes_are_linked(parent, child))
334 return 1;
335
336 if (!_link(&parent->uses, child))
337 return 0;
338
339 if (!_link(&child->used_by, parent))
340 return 0;
341
342 return 1;
343 }
344
345 static void _unlink(struct dm_list *list, struct dm_tree_node *node)
346 {
347 struct dm_tree_link *dlink;
348
349 dm_list_iterate_items(dlink, list)
350 if (dlink->node == node) {
351 dm_list_del(&dlink->list);
352 break;
353 }
354 }
355
356 static void _unlink_nodes(struct dm_tree_node *parent,
357 struct dm_tree_node *child)
358 {
359 if (!_nodes_are_linked(parent, child))
360 return;
361
362 _unlink(&parent->uses, child);
363 _unlink(&child->used_by, parent);
364 }
365
366 static int _add_to_toplevel(struct dm_tree_node *node)
367 {
368 return _link_nodes(&node->dtree->root, node);
369 }
370
371 static void _remove_from_toplevel(struct dm_tree_node *node)
372 {
373 _unlink_nodes(&node->dtree->root, node);
374 }
375
376 static int _add_to_bottomlevel(struct dm_tree_node *node)
377 {
378 return _link_nodes(node, &node->dtree->root);
379 }
380
381 static void _remove_from_bottomlevel(struct dm_tree_node *node)
382 {
383 _unlink_nodes(node, &node->dtree->root);
384 }
385
386 static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
387 {
388 /* Don't link to root node if child already has a parent */
389 if (parent == &parent->dtree->root) {
390 if (dm_tree_node_num_children(child, 1))
391 return 1;
392 } else
393 _remove_from_toplevel(child);
394
395 if (child == &child->dtree->root) {
396 if (dm_tree_node_num_children(parent, 0))
397 return 1;
398 } else
399 _remove_from_bottomlevel(parent);
400
401 return _link_nodes(parent, child);
402 }
403
404 static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
405 const char *name,
406 const char *uuid,
407 struct dm_info *info,
408 void *context,
409 uint16_t udev_flags)
410 {
411 struct dm_tree_node *node;
412 uint64_t dev;
413
414 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
415 log_error("_create_dm_tree_node alloc failed");
416 return NULL;
417 }
418
419 node->dtree = dtree;
420
421 node->name = name;
422 node->uuid = uuid;
423 node->info = *info;
424 node->context = context;
425 node->udev_flags = udev_flags;
426 node->activation_priority = 0;
427
428 dm_list_init(&node->uses);
429 dm_list_init(&node->used_by);
430 dm_list_init(&node->props.segs);
431
432 dev = MKDEV(info->major, info->minor);
433
434 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
435 sizeof(dev), node)) {
436 log_error("dtree node hash insertion failed");
437 dm_pool_free(dtree->mem, node);
438 return NULL;
439 }
440
441 if (uuid && *uuid &&
442 !dm_hash_insert(dtree->uuids, uuid, node)) {
443 log_error("dtree uuid hash insertion failed");
444 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
445 sizeof(dev));
446 dm_pool_free(dtree->mem, node);
447 return NULL;
448 }
449
450 return node;
451 }
452
453 static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
454 uint32_t major, uint32_t minor)
455 {
456 uint64_t dev = MKDEV(major, minor);
457
458 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
459 sizeof(dev));
460 }
461
462 static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
463 const char *uuid)
464 {
465 struct dm_tree_node *node;
466
467 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
468 return node;
469
470 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
471 return NULL;
472
473 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
474 }
475
476 static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
477 const char **name, const char **uuid,
478 struct dm_info *info, struct dm_deps **deps)
479 {
480 memset(info, 0, sizeof(*info));
481
482 if (!dm_is_dm_major(major)) {
483 *name = "";
484 *uuid = "";
485 *deps = NULL;
486 info->major = major;
487 info->minor = minor;
488 info->exists = 0;
489 info->live_table = 0;
490 info->inactive_table = 0;
491 info->read_only = 0;
492 return 1;
493 }
494
495 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
496 log_error("deps dm_task creation failed");
497 return 0;
498 }
499
500 if (!dm_task_set_major(*dmt, major)) {
501 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
502 major, minor);
503 goto failed;
504 }
505
506 if (!dm_task_set_minor(*dmt, minor)) {
507 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
508 major, minor);
509 goto failed;
510 }
511
512 if (!dm_task_run(*dmt)) {
513 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
514 major, minor);
515 goto failed;
516 }
517
518 if (!dm_task_get_info(*dmt, info)) {
519 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
520 major, minor);
521 goto failed;
522 }
523
524 if (!info->exists) {
525 *name = "";
526 *uuid = "";
527 *deps = NULL;
528 } else {
529 if (info->major != major) {
530 log_error("Inconsistent dtree major number: %u != %u",
531 major, info->major);
532 goto failed;
533 }
534 if (info->minor != minor) {
535 log_error("Inconsistent dtree minor number: %u != %u",
536 minor, info->minor);
537 goto failed;
538 }
539 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
540 log_error("name pool_strdup failed");
541 goto failed;
542 }
543 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
544 log_error("uuid pool_strdup failed");
545 goto failed;
546 }
547 *deps = dm_task_get_deps(*dmt);
548 }
549
550 return 1;
551
552 failed:
553 dm_task_destroy(*dmt);
554 return 0;
555 }
556
557 static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
558 struct dm_tree_node *parent,
559 uint32_t major, uint32_t minor,
560 uint16_t udev_flags)
561 {
562 struct dm_task *dmt = NULL;
563 struct dm_info info;
564 struct dm_deps *deps = NULL;
565 const char *name = NULL;
566 const char *uuid = NULL;
567 struct dm_tree_node *node = NULL;
568 uint32_t i;
569 int new = 0;
570
571 /* Already in tree? */
572 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
573 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
574 return_NULL;
575
576 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
577 NULL, udev_flags)))
578 goto_out;
579 new = 1;
580 }
581
582 if (!_link_tree_nodes(parent, node)) {
583 node = NULL;
584 goto_out;
585 }
586
587 /* If node was already in tree, no need to recurse. */
588 if (!new)
589 goto out;
590
591 /* Can't recurse if not a mapped device or there are no dependencies */
592 if (!node->info.exists || !deps->count) {
593 if (!_add_to_bottomlevel(node)) {
594 stack;
595 node = NULL;
596 }
597 goto out;
598 }
599
600 /* Add dependencies to tree */
601 for (i = 0; i < deps->count; i++)
602 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
603 MINOR(deps->device[i]), udev_flags)) {
604 node = NULL;
605 goto_out;
606 }
607
608 out:
609 if (dmt)
610 dm_task_destroy(dmt);
611
612 return node;
613 }
614
615 static int _node_clear_table(struct dm_tree_node *dnode)
616 {
617 struct dm_task *dmt;
618 struct dm_info *info;
619 const char *name;
620 int r;
621
622 if (!(info = &dnode->info)) {
623 log_error("_node_clear_table failed: missing info");
624 return 0;
625 }
626
627 if (!(name = dm_tree_node_get_name(dnode))) {
628 log_error("_node_clear_table failed: missing name");
629 return 0;
630 }
631
632 /* Is there a table? */
633 if (!info->exists || !info->inactive_table)
634 return 1;
635
636 // FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
637
638 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
639 name, info->major, info->minor);
640
641 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
642 log_error("Table clear dm_task creation failed for %s", name);
643 return 0;
644 }
645
646 if (!dm_task_set_major(dmt, info->major) ||
647 !dm_task_set_minor(dmt, info->minor)) {
648 log_error("Failed to set device number for %s table clear", name);
649 dm_task_destroy(dmt);
650 return 0;
651 }
652
653 r = dm_task_run(dmt);
654
655 if (!dm_task_get_info(dmt, info)) {
656 log_error("_node_clear_table failed: info missing after running task for %s", name);
657 r = 0;
658 }
659
660 dm_task_destroy(dmt);
661
662 return r;
663 }
664
665 struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
666 const char *name,
667 const char *uuid,
668 uint32_t major, uint32_t minor,
669 int read_only,
670 int clear_inactive,
671 void *context)
672 {
673 struct dm_tree_node *dnode;
674 struct dm_info info;
675 const char *name2;
676 const char *uuid2;
677
678 /* Do we need to add node to tree? */
679 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
680 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
681 log_error("name pool_strdup failed");
682 return NULL;
683 }
684 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
685 log_error("uuid pool_strdup failed");
686 return NULL;
687 }
688
689 info.major = 0;
690 info.minor = 0;
691 info.exists = 0;
692 info.live_table = 0;
693 info.inactive_table = 0;
694 info.read_only = 0;
695
696 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
697 context, 0)))
698 return_NULL;
699
700 /* Attach to root node until a table is supplied */
701 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
702 return_NULL;
703
704 dnode->props.major = major;
705 dnode->props.minor = minor;
706 dnode->props.new_name = NULL;
707 dnode->props.size_changed = 0;
708 } else if (strcmp(name, dnode->name)) {
709 /* Do we need to rename node? */
710 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
711 log_error("name pool_strdup failed");
712 return 0;
713 }
714 }
715
716 dnode->props.read_only = read_only ? 1 : 0;
717 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
718 dnode->props.read_ahead_flags = 0;
719
720 if (clear_inactive && !_node_clear_table(dnode))
721 return_NULL;
722
723 dnode->context = context;
724 dnode->udev_flags = 0;
725
726 return dnode;
727 }
728
729 struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
730 const char *name,
731 const char *uuid,
732 uint32_t major,
733 uint32_t minor,
734 int read_only,
735 int clear_inactive,
736 void *context,
737 uint16_t udev_flags)
738 {
739 struct dm_tree_node *node;
740
741 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
742 clear_inactive, context)))
743 node->udev_flags = udev_flags;
744
745 return node;
746 }
747
748
749 void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
750 uint32_t read_ahead,
751 uint32_t read_ahead_flags)
752 {
753 dnode->props.read_ahead = read_ahead;
754 dnode->props.read_ahead_flags = read_ahead_flags;
755 }
756
757 void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
758 struct dm_tree_node *presuspend_node)
759 {
760 node->presuspend_node = presuspend_node;
761 }
762
763 int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
764 {
765 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
766 }
767
768 int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
769 uint32_t minor, uint16_t udev_flags)
770 {
771 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
772 }
773
774 const char *dm_tree_node_get_name(const struct dm_tree_node *node)
775 {
776 return node->info.exists ? node->name : "";
777 }
778
779 const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
780 {
781 return node->info.exists ? node->uuid : "";
782 }
783
784 const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
785 {
786 return &node->info;
787 }
788
789 void *dm_tree_node_get_context(const struct dm_tree_node *node)
790 {
791 return node->context;
792 }
793
794 int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
795 {
796 return dnode->props.size_changed;
797 }
798
799 int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
800 {
801 if (inverted) {
802 if (_nodes_are_linked(&node->dtree->root, node))
803 return 0;
804 return dm_list_size(&node->used_by);
805 }
806
807 if (_nodes_are_linked(node, &node->dtree->root))
808 return 0;
809
810 return dm_list_size(&node->uses);
811 }
812
813 /*
814 * Returns 1 if no prefix supplied
815 */
816 static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
817 {
818 if (!uuid_prefix)
819 return 1;
820
821 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
822 return 1;
823
824 /* Handle transition: active device uuids might be missing the prefix */
825 if (uuid_prefix_len <= 4)
826 return 0;
827
828 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
829 return 0;
830
831 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
832 return 0;
833
834 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
835 return 1;
836
837 return 0;
838 }
839
840 /*
841 * Returns 1 if no children.
842 */
843 static int _children_suspended(struct dm_tree_node *node,
844 uint32_t inverted,
845 const char *uuid_prefix,
846 size_t uuid_prefix_len)
847 {
848 struct dm_list *list;
849 struct dm_tree_link *dlink;
850 const struct dm_info *dinfo;
851 const char *uuid;
852
853 if (inverted) {
854 if (_nodes_are_linked(&node->dtree->root, node))
855 return 1;
856 list = &node->used_by;
857 } else {
858 if (_nodes_are_linked(node, &node->dtree->root))
859 return 1;
860 list = &node->uses;
861 }
862
863 dm_list_iterate_items(dlink, list) {
864 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
865 stack;
866 continue;
867 }
868
869 /* Ignore if it doesn't belong to this VG */
870 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
871 continue;
872
873 /* Ignore if parent node wants to presuspend this node */
874 if (dlink->node->presuspend_node == node)
875 continue;
876
877 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
878 stack; /* FIXME Is this normal? */
879 return 0;
880 }
881
882 if (!dinfo->suspended)
883 return 0;
884 }
885
886 return 1;
887 }
888
889 /*
890 * Set major and minor to zero for root of tree.
891 */
892 struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
893 uint32_t major,
894 uint32_t minor)
895 {
896 if (!major && !minor)
897 return &dtree->root;
898
899 return _find_dm_tree_node(dtree, major, minor);
900 }
901
902 /*
903 * Set uuid to NULL for root of tree.
904 */
905 struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
906 const char *uuid)
907 {
908 if (!uuid || !*uuid)
909 return &dtree->root;
910
911 return _find_dm_tree_node_by_uuid(dtree, uuid);
912 }
913
914 /*
915 * First time set *handle to NULL.
916 * Set inverted to invert the tree.
917 */
918 struct dm_tree_node *dm_tree_next_child(void **handle,
919 const struct dm_tree_node *parent,
920 uint32_t inverted)
921 {
922 struct dm_list **dlink = (struct dm_list **) handle;
923 const struct dm_list *use_list;
924
925 if (inverted)
926 use_list = &parent->used_by;
927 else
928 use_list = &parent->uses;
929
930 if (!*dlink)
931 *dlink = dm_list_first(use_list);
932 else
933 *dlink = dm_list_next(use_list, *dlink);
934
935 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
936 }
937
938 /*
939 * Deactivate a device with its dependencies if the uuid prefix matches.
940 */
941 static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
942 struct dm_info *info)
943 {
944 struct dm_task *dmt;
945 int r;
946
947 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
948 log_error("_info_by_dev: dm_task creation failed");
949 return 0;
950 }
951
952 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
953 log_error("_info_by_dev: Failed to set device number");
954 dm_task_destroy(dmt);
955 return 0;
956 }
957
958 if (!with_open_count && !dm_task_no_open_count(dmt))
959 log_error("Failed to disable open_count");
960
961 if ((r = dm_task_run(dmt)))
962 r = dm_task_get_info(dmt, info);
963
964 dm_task_destroy(dmt);
965
966 return r;
967 }
968
969 static int _check_device_not_in_use(struct dm_info *info)
970 {
971 if (!info->exists)
972 return 1;
973
974 /* If sysfs is not used, use open_count information only. */
975 if (!*dm_sysfs_dir()) {
976 if (info->open_count) {
977 log_error("Device %" PRIu32 ":%" PRIu32 " in use",
978 info->major, info->minor);
979 return 0;
980 }
981
982 return 1;
983 }
984
985 if (dm_device_has_holders(info->major, info->minor)) {
986 log_error("Device %" PRIu32 ":%" PRIu32 " is used "
987 "by another device.", info->major, info->minor);
988 return 0;
989 }
990
991 if (dm_device_has_mounted_fs(info->major, info->minor)) {
992 log_error("Device %" PRIu32 ":%" PRIu32 " contains "
993 "a filesystem in use.", info->major, info->minor);
994 return 0;
995 }
996
997 return 1;
998 }
999
1000 /* Check if all parent nodes of given node have open_count == 0 */
1001 static int _node_has_closed_parents(struct dm_tree_node *node,
1002 const char *uuid_prefix,
1003 size_t uuid_prefix_len)
1004 {
1005 struct dm_tree_link *dlink;
1006 const struct dm_info *dinfo;
1007 struct dm_info info;
1008 const char *uuid;
1009
1010 /* Iterate through parents of this node */
1011 dm_list_iterate_items(dlink, &node->used_by) {
1012 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1013 stack;
1014 continue;
1015 }
1016
1017 /* Ignore if it doesn't belong to this VG */
1018 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1019 continue;
1020
1021 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1022 stack; /* FIXME Is this normal? */
1023 return 0;
1024 }
1025
1026 /* Refresh open_count */
1027 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1028 !info.exists)
1029 continue;
1030
1031 if (info.open_count) {
1032 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1033 dinfo->major, dinfo->minor, info.open_count);
1034 return 0;
1035 }
1036 }
1037
1038 return 1;
1039 }
1040
1041 static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
1042 uint32_t *cookie, uint16_t udev_flags, int retry)
1043 {
1044 struct dm_task *dmt;
1045 int r = 0;
1046
1047 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1048
1049 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1050 log_error("Deactivation dm_task creation failed for %s", name);
1051 return 0;
1052 }
1053
1054 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1055 log_error("Failed to set device number for %s deactivation", name);
1056 goto out;
1057 }
1058
1059 if (!dm_task_no_open_count(dmt))
1060 log_error("Failed to disable open_count");
1061
1062 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1063 goto out;
1064
1065
1066 if (retry)
1067 dm_task_retry_remove(dmt);
1068
1069 r = dm_task_run(dmt);
1070
1071 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1072 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
1073 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
1074
1075 /* FIXME Remove node from tree or mark invalid? */
1076
1077 out:
1078 dm_task_destroy(dmt);
1079
1080 return r;
1081 }
1082
1083 static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
1084 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
1085 {
1086 struct dm_task *dmt;
1087 int r = 0;
1088
1089 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1090
1091 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1092 log_error("Rename dm_task creation failed for %s", old_name);
1093 return 0;
1094 }
1095
1096 if (!dm_task_set_name(dmt, old_name)) {
1097 log_error("Failed to set name for %s rename.", old_name);
1098 goto out;
1099 }
1100
1101 if (!dm_task_set_newname(dmt, new_name))
1102 goto_out;
1103
1104 if (!dm_task_no_open_count(dmt))
1105 log_error("Failed to disable open_count");
1106
1107 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1108 goto out;
1109
1110 r = dm_task_run(dmt);
1111
1112 out:
1113 dm_task_destroy(dmt);
1114
1115 return r;
1116 }
1117
1118 /* FIXME Merge with _suspend_node? */
1119 static int _resume_node(const char *name, uint32_t major, uint32_t minor,
1120 uint32_t read_ahead, uint32_t read_ahead_flags,
1121 struct dm_info *newinfo, uint32_t *cookie,
1122 uint16_t udev_flags, int already_suspended)
1123 {
1124 struct dm_task *dmt;
1125 int r = 0;
1126
1127 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1128
1129 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
1130 log_error("Suspend dm_task creation failed for %s", name);
1131 return 0;
1132 }
1133
1134 /* FIXME Kernel should fill in name on return instead */
1135 if (!dm_task_set_name(dmt, name)) {
1136 log_error("Failed to set readahead device name for %s", name);
1137 goto out;
1138 }
1139
1140 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1141 log_error("Failed to set device number for %s resumption.", name);
1142 goto out;
1143 }
1144
1145 if (!dm_task_no_open_count(dmt))
1146 log_error("Failed to disable open_count");
1147
1148 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1149 log_error("Failed to set read ahead");
1150
1151 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1152 goto out;
1153
1154 if ((r = dm_task_run(dmt))) {
1155 if (already_suspended)
1156 dec_suspended();
1157 r = dm_task_get_info(dmt, newinfo);
1158 }
1159
1160 out:
1161 dm_task_destroy(dmt);
1162
1163 return r;
1164 }
1165
1166 static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
1167 int skip_lockfs, int no_flush, struct dm_info *newinfo)
1168 {
1169 struct dm_task *dmt;
1170 int r;
1171
1172 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1173 name, major, minor,
1174 skip_lockfs ? "" : " with filesystem sync",
1175 no_flush ? "" : " with device flush");
1176
1177 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1178 log_error("Suspend dm_task creation failed for %s", name);
1179 return 0;
1180 }
1181
1182 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1183 log_error("Failed to set device number for %s suspension.", name);
1184 dm_task_destroy(dmt);
1185 return 0;
1186 }
1187
1188 if (!dm_task_no_open_count(dmt))
1189 log_error("Failed to disable open_count");
1190
1191 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1192 log_error("Failed to set skip_lockfs flag.");
1193
1194 if (no_flush && !dm_task_no_flush(dmt))
1195 log_error("Failed to set no_flush flag.");
1196
1197 if ((r = dm_task_run(dmt))) {
1198 inc_suspended();
1199 r = dm_task_get_info(dmt, newinfo);
1200 }
1201
1202 dm_task_destroy(dmt);
1203
1204 return r;
1205 }
1206
1207 /*
1208 * FIXME Don't attempt to deactivate known internal dependencies.
1209 */
1210 static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1211 const char *uuid_prefix,
1212 size_t uuid_prefix_len,
1213 unsigned level)
1214 {
1215 int r = 1;
1216 void *handle = NULL;
1217 struct dm_tree_node *child = dnode;
1218 struct dm_info info;
1219 const struct dm_info *dinfo;
1220 const char *name;
1221 const char *uuid;
1222
1223 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1224 if (!(dinfo = dm_tree_node_get_info(child))) {
1225 stack;
1226 continue;
1227 }
1228
1229 if (!(name = dm_tree_node_get_name(child))) {
1230 stack;
1231 continue;
1232 }
1233
1234 if (!(uuid = dm_tree_node_get_uuid(child))) {
1235 stack;
1236 continue;
1237 }
1238
1239 /* Ignore if it doesn't belong to this VG */
1240 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1241 continue;
1242
1243 /* Refresh open_count */
1244 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1245 !info.exists)
1246 continue;
1247
1248 if (!_check_device_not_in_use(&info))
1249 continue;
1250
1251 /* Also checking open_count in parent nodes of presuspend_node */
1252 if ((child->presuspend_node &&
1253 !_node_has_closed_parents(child->presuspend_node,
1254 uuid_prefix, uuid_prefix_len))) {
1255 /* Only report error from (likely non-internal) dependency at top level */
1256 if (!level) {
1257 log_error("Unable to deactivate open %s (%" PRIu32
1258 ":%" PRIu32 ")", name, info.major,
1259 info.minor);
1260 r = 0;
1261 }
1262 continue;
1263 }
1264
1265 /* Suspend child node first if requested */
1266 if (child->presuspend_node &&
1267 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1268 continue;
1269
1270 if (!_deactivate_node(name, info.major, info.minor,
1271 &child->dtree->cookie, child->udev_flags,
1272 child->dtree->retry_remove)) {
1273 log_error("Unable to deactivate %s (%" PRIu32
1274 ":%" PRIu32 ")", name, info.major,
1275 info.minor);
1276 r = 0;
1277 continue;
1278 } else if (info.suspended)
1279 dec_suspended();
1280
1281 if (dm_tree_node_num_children(child, 0)) {
1282 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
1283 return_0;
1284 }
1285 }
1286
1287 return r;
1288 }
1289
1290 int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1291 const char *uuid_prefix,
1292 size_t uuid_prefix_len)
1293 {
1294 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1295 }
1296
1297 void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1298 {
1299 dnode->dtree->skip_lockfs = 1;
1300 }
1301
1302 void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1303 {
1304 dnode->dtree->no_flush = 1;
1305 }
1306
1307 void dm_tree_retry_remove(struct dm_tree_node *dnode)
1308 {
1309 dnode->dtree->retry_remove = 1;
1310 }
1311
1312 int dm_tree_suspend_children(struct dm_tree_node *dnode,
1313 const char *uuid_prefix,
1314 size_t uuid_prefix_len)
1315 {
1316 int r = 1;
1317 void *handle = NULL;
1318 struct dm_tree_node *child = dnode;
1319 struct dm_info info, newinfo;
1320 const struct dm_info *dinfo;
1321 const char *name;
1322 const char *uuid;
1323
1324 /* Suspend nodes at this level of the tree */
1325 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1326 if (!(dinfo = dm_tree_node_get_info(child))) {
1327 stack;
1328 continue;
1329 }
1330
1331 if (!(name = dm_tree_node_get_name(child))) {
1332 stack;
1333 continue;
1334 }
1335
1336 if (!(uuid = dm_tree_node_get_uuid(child))) {
1337 stack;
1338 continue;
1339 }
1340
1341 /* Ignore if it doesn't belong to this VG */
1342 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1343 continue;
1344
1345 /* Ensure immediate parents are already suspended */
1346 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1347 continue;
1348
1349 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
1350 !info.exists || info.suspended)
1351 continue;
1352
1353 if (!_suspend_node(name, info.major, info.minor,
1354 child->dtree->skip_lockfs,
1355 child->dtree->no_flush, &newinfo)) {
1356 log_error("Unable to suspend %s (%" PRIu32
1357 ":%" PRIu32 ")", name, info.major,
1358 info.minor);
1359 r = 0;
1360 continue;
1361 }
1362
1363 /* Update cached info */
1364 child->info = newinfo;
1365 }
1366
1367 /* Then suspend any child nodes */
1368 handle = NULL;
1369
1370 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1371 if (!(uuid = dm_tree_node_get_uuid(child))) {
1372 stack;
1373 continue;
1374 }
1375
1376 /* Ignore if it doesn't belong to this VG */
1377 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1378 continue;
1379
1380 if (dm_tree_node_num_children(child, 0))
1381 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1382 return_0;
1383 }
1384
1385 return r;
1386 }
1387
1388 int dm_tree_activate_children(struct dm_tree_node *dnode,
1389 const char *uuid_prefix,
1390 size_t uuid_prefix_len)
1391 {
1392 int r = 1;
1393 void *handle = NULL;
1394 struct dm_tree_node *child = dnode;
1395 struct dm_info newinfo;
1396 const char *name;
1397 const char *uuid;
1398 int priority;
1399
1400 /* Activate children first */
1401 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1402 if (!(uuid = dm_tree_node_get_uuid(child))) {
1403 stack;
1404 continue;
1405 }
1406
1407 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1408 continue;
1409
1410 if (dm_tree_node_num_children(child, 0))
1411 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1412 return_0;
1413 }
1414
1415 handle = NULL;
1416
1417 for (priority = 0; priority < 3; priority++) {
1418 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1419 if (!(uuid = dm_tree_node_get_uuid(child))) {
1420 stack;
1421 continue;
1422 }
1423
1424 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1425 continue;
1426
1427 if (priority != child->activation_priority)
1428 continue;
1429
1430 if (!(name = dm_tree_node_get_name(child))) {
1431 stack;
1432 continue;
1433 }
1434
1435 /* Rename? */
1436 if (child->props.new_name) {
1437 if (!_rename_node(name, child->props.new_name, child->info.major,
1438 child->info.minor, &child->dtree->cookie,
1439 child->udev_flags)) {
1440 log_error("Failed to rename %s (%" PRIu32
1441 ":%" PRIu32 ") to %s", name, child->info.major,
1442 child->info.minor, child->props.new_name);
1443 return 0;
1444 }
1445 child->name = child->props.new_name;
1446 child->props.new_name = NULL;
1447 }
1448
1449 if (!child->info.inactive_table && !child->info.suspended)
1450 continue;
1451
1452 if (!_resume_node(child->name, child->info.major, child->info.minor,
1453 child->props.read_ahead, child->props.read_ahead_flags,
1454 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
1455 log_error("Unable to resume %s (%" PRIu32
1456 ":%" PRIu32 ")", child->name, child->info.major,
1457 child->info.minor);
1458 r = 0;
1459 continue;
1460 }
1461
1462 /* Update cached info */
1463 child->info = newinfo;
1464 }
1465 }
1466
1467 handle = NULL;
1468
1469 return r;
1470 }
1471
1472 static int _create_node(struct dm_tree_node *dnode)
1473 {
1474 int r = 0;
1475 struct dm_task *dmt;
1476
1477 log_verbose("Creating %s", dnode->name);
1478
1479 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1480 log_error("Create dm_task creation failed for %s", dnode->name);
1481 return 0;
1482 }
1483
1484 if (!dm_task_set_name(dmt, dnode->name)) {
1485 log_error("Failed to set device name for %s", dnode->name);
1486 goto out;
1487 }
1488
1489 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1490 log_error("Failed to set uuid for %s", dnode->name);
1491 goto out;
1492 }
1493
1494 if (dnode->props.major &&
1495 (!dm_task_set_major(dmt, dnode->props.major) ||
1496 !dm_task_set_minor(dmt, dnode->props.minor))) {
1497 log_error("Failed to set device number for %s creation.", dnode->name);
1498 goto out;
1499 }
1500
1501 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1502 log_error("Failed to set read only flag for %s", dnode->name);
1503 goto out;
1504 }
1505
1506 if (!dm_task_no_open_count(dmt))
1507 log_error("Failed to disable open_count");
1508
1509 if ((r = dm_task_run(dmt)))
1510 r = dm_task_get_info(dmt, &dnode->info);
1511
1512 out:
1513 dm_task_destroy(dmt);
1514
1515 return r;
1516 }
1517
1518
1519 static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
1520 {
1521 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
1522 log_error("Failed to format %s device number for %s as dm "
1523 "target (%u,%u)",
1524 node->name, node->uuid, node->info.major, node->info.minor);
1525 return 0;
1526 }
1527
1528 return 1;
1529 }
1530
1531 /* simplify string emiting code */
1532 #define EMIT_PARAMS(p, str...)\
1533 do {\
1534 int w;\
1535 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1536 stack; /* Out of space */\
1537 return -1;\
1538 }\
1539 p += w;\
1540 } while (0)
1541
1542 /*
1543 * _emit_areas_line
1544 *
1545 * Returns: 1 on success, 0 on failure
1546 */
1547 static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
1548 struct load_segment *seg, char *params,
1549 size_t paramsize, int *pos)
1550 {
1551 struct seg_area *area;
1552 char devbuf[DM_FORMAT_DEV_BUFSIZE];
1553 unsigned first_time = 1;
1554 const char *logtype, *synctype;
1555 unsigned log_parm_count;
1556
1557 dm_list_iterate_items(area, &seg->areas) {
1558 switch (seg->type) {
1559 case SEG_REPLICATOR_DEV:
1560 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1561 return_0;
1562
1563 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1564 if (first_time)
1565 EMIT_PARAMS(*pos, " nolog 0");
1566 else {
1567 /* Remote devices */
1568 log_parm_count = (area->flags &
1569 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1570
1571 if (!area->slog) {
1572 devbuf[0] = 0; /* Only core log parameters */
1573 logtype = "core";
1574 } else {
1575 devbuf[0] = ' '; /* Extra space before device name */
1576 if (!_build_dev_string(devbuf + 1,
1577 sizeof(devbuf) - 1,
1578 area->slog))
1579 return_0;
1580 logtype = "disk";
1581 log_parm_count++; /* Extra sync log device name parameter */
1582 }
1583
1584 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1585 log_parm_count, devbuf, area->region_size);
1586
1587 synctype = (area->flags & DM_NOSYNC) ?
1588 " nosync" : (area->flags & DM_FORCESYNC) ?
1589 " sync" : NULL;
1590
1591 if (synctype)
1592 EMIT_PARAMS(*pos, "%s", synctype);
1593 }
1594 break;
1595 case SEG_RAID1:
1596 case SEG_RAID4:
1597 case SEG_RAID5_LA:
1598 case SEG_RAID5_RA:
1599 case SEG_RAID5_LS:
1600 case SEG_RAID5_RS:
1601 case SEG_RAID6_ZR:
1602 case SEG_RAID6_NR:
1603 case SEG_RAID6_NC:
1604 if (!area->dev_node) {
1605 EMIT_PARAMS(*pos, " -");
1606 break;
1607 }
1608 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1609 return_0;
1610
1611 EMIT_PARAMS(*pos, " %s", devbuf);
1612 break;
1613 default:
1614 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1615 return_0;
1616
1617 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1618 devbuf, area->offset);
1619 }
1620
1621 first_time = 0;
1622 }
1623
1624 return 1;
1625 }
1626
1627 static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1628 size_t paramsize, int *pos)
1629 {
1630 const struct load_segment *rlog_seg;
1631 struct replicator_site *rsite;
1632 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1633 unsigned parm_count;
1634
1635 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1636 return_0;
1637
1638 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1639 struct load_segment);
1640
1641 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1642 seg->rlog_type, rlogbuf, rlog_seg->size);
1643
1644 dm_list_iterate_items(rsite, &seg->rsites) {
1645 parm_count = (rsite->fall_behind_data
1646 || rsite->fall_behind_ios
1647 || rsite->async_timeout) ? 4 : 2;
1648
1649 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1650 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1651
1652 if (rsite->fall_behind_data)
1653 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1654 else if (rsite->fall_behind_ios)
1655 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1656 else if (rsite->async_timeout)
1657 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1658 }
1659
1660 return 1;
1661 }
1662
1663 /*
1664 * Returns: 1 on success, 0 on failure
1665 */
1666 static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1667 char *params, size_t paramsize)
1668 {
1669 int block_on_error = 0;
1670 int handle_errors = 0;
1671 int dm_log_userspace = 0;
1672 struct utsname uts;
1673 unsigned log_parm_count;
1674 int pos = 0, parts;
1675 char logbuf[DM_FORMAT_DEV_BUFSIZE];
1676 const char *logtype;
1677 unsigned kmaj = 0, kmin = 0, krel = 0;
1678
1679 if (uname(&uts) == -1) {
1680 log_error("Cannot read kernel release version.");
1681 return 0;
1682 }
1683
1684 /* Kernels with a major number of 2 always had 3 parts. */
1685 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1686 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1687 log_error("Wrong kernel release version %s.", uts.release);
1688 return 0;
1689 }
1690
1691 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1692 /*
1693 * Originally, block_on_error was an argument to the log
1694 * portion of the mirror CTR table. It was renamed to
1695 * "handle_errors" and now resides in the 'features'
1696 * section of the mirror CTR table (i.e. at the end).
1697 *
1698 * We can identify whether to use "block_on_error" or
1699 * "handle_errors" by the dm-mirror module's version
1700 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1701 */
1702 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
1703 handle_errors = 1;
1704 else
1705 block_on_error = 1;
1706 }
1707
1708 if (seg->clustered) {
1709 /* Cluster mirrors require a UUID */
1710 if (!seg->uuid)
1711 return_0;
1712
1713 /*
1714 * Cluster mirrors used to have their own log
1715 * types. Now they are accessed through the
1716 * userspace log type.
1717 *
1718 * The dm-log-userspace module was added to the
1719 * 2.6.31 kernel.
1720 */
1721 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
1722 dm_log_userspace = 1;
1723 }
1724
1725 /* Region size */
1726 log_parm_count = 1;
1727
1728 /* [no]sync, block_on_error etc. */
1729 log_parm_count += hweight32(seg->flags);
1730
1731 /* "handle_errors" is a feature arg now */
1732 if (handle_errors)
1733 log_parm_count--;
1734
1735 /* DM_CORELOG does not count in the param list */
1736 if (seg->flags & DM_CORELOG)
1737 log_parm_count--;
1738
1739 if (seg->clustered) {
1740 log_parm_count++; /* For UUID */
1741
1742 if (!dm_log_userspace)
1743 EMIT_PARAMS(pos, "clustered-");
1744 else
1745 /* For clustered-* type field inserted later */
1746 log_parm_count++;
1747 }
1748
1749 if (!seg->log)
1750 logtype = "core";
1751 else {
1752 logtype = "disk";
1753 log_parm_count++;
1754 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1755 return_0;
1756 }
1757
1758 if (dm_log_userspace)
1759 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1760 log_parm_count, seg->uuid, logtype);
1761 else
1762 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
1763
1764 if (seg->log)
1765 EMIT_PARAMS(pos, " %s", logbuf);
1766
1767 EMIT_PARAMS(pos, " %u", seg->region_size);
1768
1769 if (seg->clustered && !dm_log_userspace)
1770 EMIT_PARAMS(pos, " %s", seg->uuid);
1771
1772 if ((seg->flags & DM_NOSYNC))
1773 EMIT_PARAMS(pos, " nosync");
1774 else if ((seg->flags & DM_FORCESYNC))
1775 EMIT_PARAMS(pos, " sync");
1776
1777 if (block_on_error)
1778 EMIT_PARAMS(pos, " block_on_error");
1779
1780 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1781
1782 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1783 return_0;
1784
1785 if (handle_errors)
1786 EMIT_PARAMS(pos, " 1 handle_errors");
1787
1788 return 1;
1789 }
1790
1791 static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1792 uint32_t minor, struct load_segment *seg,
1793 uint64_t *seg_start, char *params,
1794 size_t paramsize)
1795 {
1796 uint32_t i, *tmp;
1797 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1798 int pos = 0;
1799
1800 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1801 param_count++;
1802
1803 if (seg->region_size)
1804 param_count += 2;
1805
1806 tmp = (uint32_t *)(&seg->rebuilds); /* rebuilds is 64-bit */
1807 param_count += 2 * hweight32(tmp[0]);
1808 param_count += 2 * hweight32(tmp[1]);
1809
1810 if ((seg->type == SEG_RAID1) && seg->stripe_size)
1811 log_error("WARNING: Ignoring RAID1 stripe size");
1812
1813 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
1814 param_count, seg->stripe_size);
1815
1816 if (seg->flags & DM_NOSYNC)
1817 EMIT_PARAMS(pos, " nosync");
1818 else if (seg->flags & DM_FORCESYNC)
1819 EMIT_PARAMS(pos, " sync");
1820
1821 if (seg->region_size)
1822 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
1823
1824 for (i = 0; i < (seg->area_count / 2); i++)
1825 if (seg->rebuilds & (1 << i))
1826 EMIT_PARAMS(pos, " rebuild %u", i);
1827
1828 /* Print number of metadata/data device pairs */
1829 EMIT_PARAMS(pos, " %u", seg->area_count/2);
1830
1831 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1832 return_0;
1833
1834 return 1;
1835 }
1836
1837 static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1838 uint32_t minor, struct load_segment *seg,
1839 uint64_t *seg_start, char *params,
1840 size_t paramsize)
1841 {
1842 int pos = 0;
1843 int r;
1844 int target_type_is_raid = 0;
1845 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
1846 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
1847
1848 switch(seg->type) {
1849 case SEG_ERROR:
1850 case SEG_ZERO:
1851 case SEG_LINEAR:
1852 break;
1853 case SEG_MIRRORED:
1854 /* Mirrors are pretty complicated - now in separate function */
1855 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
1856 if (!r)
1857 return_0;
1858 break;
1859 case SEG_REPLICATOR:
1860 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1861 &pos)) <= 0) {
1862 stack;
1863 return r;
1864 }
1865 break;
1866 case SEG_REPLICATOR_DEV:
1867 if (!seg->replicator || !_build_dev_string(originbuf,
1868 sizeof(originbuf),
1869 seg->replicator))
1870 return_0;
1871
1872 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1873 break;
1874 case SEG_SNAPSHOT:
1875 case SEG_SNAPSHOT_MERGE:
1876 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1877 return_0;
1878 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1879 return_0;
1880 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1881 seg->persistent ? 'P' : 'N', seg->chunk_size);
1882 break;
1883 case SEG_SNAPSHOT_ORIGIN:
1884 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1885 return_0;
1886 EMIT_PARAMS(pos, "%s", originbuf);
1887 break;
1888 case SEG_STRIPED:
1889 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
1890 break;
1891 case SEG_CRYPT:
1892 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
1893 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1894 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1895 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1896 seg->iv_offset : *seg_start);
1897 break;
1898 case SEG_RAID1:
1899 case SEG_RAID4:
1900 case SEG_RAID5_LA:
1901 case SEG_RAID5_RA:
1902 case SEG_RAID5_LS:
1903 case SEG_RAID5_RS:
1904 case SEG_RAID6_ZR:
1905 case SEG_RAID6_NR:
1906 case SEG_RAID6_NC:
1907 target_type_is_raid = 1;
1908 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
1909 params, paramsize);
1910 if (!r)
1911 return_0;
1912
1913 break;
1914 case SEG_THIN_POOL:
1915 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
1916 return_0;
1917 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1918 return_0;
1919 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
1920 seg->data_block_size, seg->low_water_mark,
1921 seg->skip_block_zeroeing ? "1 skip_block_zeroing" : "");
1922 break;
1923 case SEG_THIN:
1924 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1925 return_0;
1926 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
1927 break;
1928 }
1929
1930 switch(seg->type) {
1931 case SEG_ERROR:
1932 case SEG_REPLICATOR:
1933 case SEG_SNAPSHOT:
1934 case SEG_SNAPSHOT_ORIGIN:
1935 case SEG_SNAPSHOT_MERGE:
1936 case SEG_ZERO:
1937 case SEG_THIN_POOL:
1938 case SEG_THIN:
1939 break;
1940 case SEG_CRYPT:
1941 case SEG_LINEAR:
1942 case SEG_REPLICATOR_DEV:
1943 case SEG_STRIPED:
1944 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
1945 stack;
1946 return r;
1947 }
1948 if (!params[0]) {
1949 log_error("No parameters supplied for %s target "
1950 "%u:%u.", dm_segtypes[seg->type].target,
1951 major, minor);
1952 return 0;
1953 }
1954 break;
1955 }
1956
1957 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
1958 " %" PRIu64 " %s %s", major, minor,
1959 *seg_start, seg->size, target_type_is_raid ? "raid" :
1960 dm_segtypes[seg->type].target, params);
1961
1962 if (!dm_task_add_target(dmt, *seg_start, seg->size,
1963 target_type_is_raid ? "raid" :
1964 dm_segtypes[seg->type].target, params))
1965 return_0;
1966
1967 *seg_start += seg->size;
1968
1969 return 1;
1970 }
1971
1972 #undef EMIT_PARAMS
1973
1974 static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
1975 struct load_segment *seg, uint64_t *seg_start)
1976 {
1977 char *params;
1978 size_t paramsize = 4096;
1979 int ret;
1980
1981 do {
1982 if (!(params = dm_malloc(paramsize))) {
1983 log_error("Insufficient space for target parameters.");
1984 return 0;
1985 }
1986
1987 params[0] = '\0';
1988 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
1989 params, paramsize);
1990 dm_free(params);
1991
1992 if (!ret)
1993 stack;
1994
1995 if (ret >= 0)
1996 return ret;
1997
1998 log_debug("Insufficient space in params[%" PRIsize_t
1999 "] for target parameters.", paramsize);
2000
2001 paramsize *= 2;
2002 } while (paramsize < MAX_TARGET_PARAMSIZE);
2003
2004 log_error("Target parameter size too big. Aborting.");
2005 return 0;
2006 }
2007
2008 static int _load_node(struct dm_tree_node *dnode)
2009 {
2010 int r = 0;
2011 struct dm_task *dmt;
2012 struct load_segment *seg;
2013 uint64_t seg_start = 0, existing_table_size;
2014
2015 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2016 dnode->info.major, dnode->info.minor);
2017
2018 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2019 log_error("Reload dm_task creation failed for %s", dnode->name);
2020 return 0;
2021 }
2022
2023 if (!dm_task_set_major(dmt, dnode->info.major) ||
2024 !dm_task_set_minor(dmt, dnode->info.minor)) {
2025 log_error("Failed to set device number for %s reload.", dnode->name);
2026 goto out;
2027 }
2028
2029 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2030 log_error("Failed to set read only flag for %s", dnode->name);
2031 goto out;
2032 }
2033
2034 if (!dm_task_no_open_count(dmt))
2035 log_error("Failed to disable open_count");
2036
2037 dm_list_iterate_items(seg, &dnode->props.segs)
2038 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2039 seg, &seg_start))
2040 goto_out;
2041
2042 if (!dm_task_suppress_identical_reload(dmt))
2043 log_error("Failed to suppress reload of identical tables.");
2044
2045 if ((r = dm_task_run(dmt))) {
2046 r = dm_task_get_info(dmt, &dnode->info);
2047 if (r && !dnode->info.inactive_table)
2048 log_verbose("Suppressed %s identical table reload.",
2049 dnode->name);
2050
2051 existing_table_size = dm_task_get_existing_table_size(dmt);
2052 if ((dnode->props.size_changed =
2053 (existing_table_size == seg_start) ? 0 : 1)) {
2054 log_debug("Table size changed from %" PRIu64 " to %"
2055 PRIu64 " for %s", existing_table_size,
2056 seg_start, dnode->name);
2057 /*
2058 * Kernel usually skips size validation on zero-length devices
2059 * now so no need to preload them.
2060 */
2061 /* FIXME In which kernel version did this begin? */
2062 if (!existing_table_size && dnode->props.delay_resume_if_new)
2063 dnode->props.size_changed = 0;
2064 }
2065 }
2066
2067 dnode->props.segment_count = 0;
2068
2069 out:
2070 dm_task_destroy(dmt);
2071
2072 return r;
2073 }
2074
2075 int dm_tree_preload_children(struct dm_tree_node *dnode,
2076 const char *uuid_prefix,
2077 size_t uuid_prefix_len)
2078 {
2079 int r = 1;
2080 void *handle = NULL;
2081 struct dm_tree_node *child;
2082 struct dm_info newinfo;
2083 int update_devs_flag = 0;
2084
2085 /* Preload children first */
2086 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2087 /* Skip existing non-device-mapper devices */
2088 if (!child->info.exists && child->info.major)
2089 continue;
2090
2091 /* Ignore if it doesn't belong to this VG */
2092 if (child->info.exists &&
2093 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
2094 continue;
2095
2096 if (dm_tree_node_num_children(child, 0))
2097 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2098 return_0;
2099
2100 /* FIXME Cope if name exists with no uuid? */
2101 if (!child->info.exists) {
2102 if (!_create_node(child)) {
2103 stack;
2104 return 0;
2105 }
2106 }
2107
2108 if (!child->info.inactive_table && child->props.segment_count) {
2109 if (!_load_node(child)) {
2110 stack;
2111 return 0;
2112 }
2113 }
2114
2115 /* Propagate device size change change */
2116 if (child->props.size_changed)
2117 dnode->props.size_changed = 1;
2118
2119 /* Resume device immediately if it has parents and its size changed */
2120 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
2121 continue;
2122
2123 if (!child->info.inactive_table && !child->info.suspended)
2124 continue;
2125
2126 if (!_resume_node(child->name, child->info.major, child->info.minor,
2127 child->props.read_ahead, child->props.read_ahead_flags,
2128 &newinfo, &child->dtree->cookie, child->udev_flags,
2129 child->info.suspended)) {
2130 log_error("Unable to resume %s (%" PRIu32
2131 ":%" PRIu32 ")", child->name, child->info.major,
2132 child->info.minor);
2133 r = 0;
2134 continue;
2135 }
2136
2137 /* Update cached info */
2138 child->info = newinfo;
2139
2140 /*
2141 * Prepare for immediate synchronization with udev and flush all stacked
2142 * dev node operations if requested by immediate_dev_node property. But
2143 * finish processing current level in the tree first.
2144 */
2145 if (child->props.immediate_dev_node)
2146 update_devs_flag = 1;
2147
2148 }
2149
2150 handle = NULL;
2151
2152 if (update_devs_flag) {
2153 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2154 stack;
2155 dm_tree_set_cookie(dnode, 0);
2156 }
2157
2158 return r;
2159 }
2160
2161 /*
2162 * Returns 1 if unsure.
2163 */
2164 int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
2165 const char *uuid_prefix,
2166 size_t uuid_prefix_len)
2167 {
2168 void *handle = NULL;
2169 struct dm_tree_node *child = dnode;
2170 const char *uuid;
2171
2172 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2173 if (!(uuid = dm_tree_node_get_uuid(child))) {
2174 log_error("Failed to get uuid for dtree node.");
2175 return 1;
2176 }
2177
2178 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
2179 return 1;
2180
2181 if (dm_tree_node_num_children(child, 0))
2182 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
2183 }
2184
2185 return 0;
2186 }
2187
2188 /*
2189 * Target functions
2190 */
2191 static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
2192 {
2193 struct load_segment *seg;
2194
2195 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2196 log_error("dtree node segment allocation failed");
2197 return NULL;
2198 }
2199
2200 seg->type = type;
2201 seg->size = size;
2202 seg->area_count = 0;
2203 dm_list_init(&seg->areas);
2204 seg->stripe_size = 0;
2205 seg->persistent = 0;
2206 seg->chunk_size = 0;
2207 seg->cow = NULL;
2208 seg->origin = NULL;
2209 seg->merge = NULL;
2210
2211 dm_list_add(&dnode->props.segs, &seg->list);
2212 dnode->props.segment_count++;
2213
2214 return seg;
2215 }
2216
2217 int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
2218 uint64_t size,
2219 const char *origin_uuid)
2220 {
2221 struct load_segment *seg;
2222 struct dm_tree_node *origin_node;
2223
2224 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2225 return_0;
2226
2227 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
2228 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2229 return 0;
2230 }
2231
2232 seg->origin = origin_node;
2233 if (!_link_tree_nodes(dnode, origin_node))
2234 return_0;
2235
2236 /* Resume snapshot origins after new snapshots */
2237 dnode->activation_priority = 1;
2238
2239 return 1;
2240 }
2241
2242 static int _add_snapshot_target(struct dm_tree_node *node,
2243 uint64_t size,
2244 const char *origin_uuid,
2245 const char *cow_uuid,
2246 const char *merge_uuid,
2247 int persistent,
2248 uint32_t chunk_size)
2249 {
2250 struct load_segment *seg;
2251 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2252 unsigned seg_type;
2253
2254 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
2255
2256 if (!(seg = _add_segment(node, seg_type, size)))
2257 return_0;
2258
2259 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
2260 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2261 return 0;
2262 }
2263
2264 seg->origin = origin_node;
2265 if (!_link_tree_nodes(node, origin_node))
2266 return_0;
2267
2268 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
2269 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
2270 return 0;
2271 }
2272
2273 seg->cow = cow_node;
2274 if (!_link_tree_nodes(node, cow_node))
2275 return_0;
2276
2277 seg->persistent = persistent ? 1 : 0;
2278 seg->chunk_size = chunk_size;
2279
2280 if (merge_uuid) {
2281 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2282 /* not a pure error, merging snapshot may have been deactivated */
2283 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2284 } else {
2285 seg->merge = merge_node;
2286 /* must not link merging snapshot, would undermine activation_priority below */
2287 }
2288
2289 /* Resume snapshot-merge (acting origin) after other snapshots */
2290 node->activation_priority = 1;
2291 if (seg->merge) {
2292 /* Resume merging snapshot after snapshot-merge */
2293 seg->merge->activation_priority = 2;
2294 }
2295 }
2296
2297 return 1;
2298 }
2299
2300
2301 int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2302 uint64_t size,
2303 const char *origin_uuid,
2304 const char *cow_uuid,
2305 int persistent,
2306 uint32_t chunk_size)
2307 {
2308 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2309 NULL, persistent, chunk_size);
2310 }
2311
2312 int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2313 uint64_t size,
2314 const char *origin_uuid,
2315 const char *cow_uuid,
2316 const char *merge_uuid,
2317 uint32_t chunk_size)
2318 {
2319 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2320 merge_uuid, 1, chunk_size);
2321 }
2322
2323 int dm_tree_node_add_error_target(struct dm_tree_node *node,
2324 uint64_t size)
2325 {
2326 if (!_add_segment(node, SEG_ERROR, size))
2327 return_0;
2328
2329 return 1;
2330 }
2331
2332 int dm_tree_node_add_zero_target(struct dm_tree_node *node,
2333 uint64_t size)
2334 {
2335 if (!_add_segment(node, SEG_ZERO, size))
2336 return_0;
2337
2338 return 1;
2339 }
2340
2341 int dm_tree_node_add_linear_target(struct dm_tree_node *node,
2342 uint64_t size)
2343 {
2344 if (!_add_segment(node, SEG_LINEAR, size))
2345 return_0;
2346
2347 return 1;
2348 }
2349
2350 int dm_tree_node_add_striped_target(struct dm_tree_node *node,
2351 uint64_t size,
2352 uint32_t stripe_size)
2353 {
2354 struct load_segment *seg;
2355
2356 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2357 return_0;
2358
2359 seg->stripe_size = stripe_size;
2360
2361 return 1;
2362 }
2363
2364 int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2365 uint64_t size,
2366 const char *cipher,
2367 const char *chainmode,
2368 const char *iv,
2369 uint64_t iv_offset,
2370 const char *key)
2371 {
2372 struct load_segment *seg;
2373
2374 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2375 return_0;
2376
2377 seg->cipher = cipher;
2378 seg->chainmode = chainmode;
2379 seg->iv = iv;
2380 seg->iv_offset = iv_offset;
2381 seg->key = key;
2382
2383 return 1;
2384 }
2385
2386 int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
2387 uint32_t region_size,
2388 unsigned clustered,
2389 const char *log_uuid,
2390 unsigned area_count,
2391 uint32_t flags)
2392 {
2393 struct dm_tree_node *log_node = NULL;
2394 struct load_segment *seg;
2395
2396 if (!node->props.segment_count) {
2397 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2398 return 0;
2399 }
2400
2401 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2402
2403 if (log_uuid) {
2404 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2405 log_error("log uuid pool_strdup failed");
2406 return 0;
2407 }
2408 if ((flags & DM_CORELOG))
2409 /* For pvmove: immediate resume (for size validation) isn't needed. */
2410 node->props.delay_resume_if_new = 1;
2411 else {
2412 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2413 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2414 return 0;
2415 }
2416
2417 if (clustered)
2418 log_node->props.immediate_dev_node = 1;
2419
2420 /* The kernel validates the size of disk logs. */
2421 /* FIXME Propagate to any devices below */
2422 log_node->props.delay_resume_if_new = 0;
2423
2424 if (!_link_tree_nodes(node, log_node))
2425 return_0;
2426 }
2427 }
2428
2429 seg->log = log_node;
2430 seg->region_size = region_size;
2431 seg->clustered = clustered;
2432 seg->mirror_area_count = area_count;
2433 seg->flags = flags;
2434
2435 return 1;
2436 }
2437
2438 int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
2439 uint64_t size)
2440 {
2441 if (!_add_segment(node, SEG_MIRRORED, size))
2442 return_0;
2443
2444 return 1;
2445 }
2446
2447 int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2448 uint64_t size,
2449 const char *raid_type,
2450 uint32_t region_size,
2451 uint32_t stripe_size,
2452 uint64_t rebuilds,
2453 uint64_t reserved2)
2454 {
2455 int i;
2456 struct load_segment *seg = NULL;
2457
2458 for (i = 0; dm_segtypes[i].target && !seg; i++)
2459 if (!strcmp(raid_type, dm_segtypes[i].target))
2460 if (!(seg = _add_segment(node,
2461 dm_segtypes[i].type, size)))
2462 return_0;
2463
2464 if (!seg)
2465 return_0;
2466
2467 seg->region_size = region_size;
2468 seg->stripe_size = stripe_size;
2469 seg->area_count = 0;
2470 seg->rebuilds = rebuilds;
2471
2472 return 1;
2473 }
2474
2475 int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2476 uint64_t size,
2477 const char *rlog_uuid,
2478 const char *rlog_type,
2479 unsigned rsite_index,
2480 dm_replicator_mode_t mode,
2481 uint32_t async_timeout,
2482 uint64_t fall_behind_data,
2483 uint32_t fall_behind_ios)
2484 {
2485 struct load_segment *rseg;
2486 struct replicator_site *rsite;
2487
2488 /* Local site0 - adds replicator segment and links rlog device */
2489 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2490 if (node->props.segment_count) {
2491 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2492 return 0;
2493 }
2494
2495 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2496 return_0;
2497
2498 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2499 log_error("Missing replicator log uuid %s.", rlog_uuid);
2500 return 0;
2501 }
2502
2503 if (!_link_tree_nodes(node, rseg->log))
2504 return_0;
2505
2506 if (strcmp(rlog_type, "ringbuffer") != 0) {
2507 log_error("Unsupported replicator log type %s.", rlog_type);
2508 return 0;
2509 }
2510
2511 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2512 return_0;
2513
2514 dm_list_init(&rseg->rsites);
2515 rseg->rdevice_count = 0;
2516 node->activation_priority = 1;
2517 }
2518
2519 /* Add site to segment */
2520 if (mode == DM_REPLICATOR_SYNC
2521 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2522 log_error("Async parameters passed for synchronnous replicator.");
2523 return 0;
2524 }
2525
2526 if (node->props.segment_count != 1) {
2527 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2528 return 0;
2529 }
2530
2531 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2532 if (rseg->type != SEG_REPLICATOR) {
2533 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2534 dm_segtypes[rseg->type].target);
2535 return 0;
2536 }
2537
2538 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2539 log_error("Failed to allocate remote site segment.");
2540 return 0;
2541 }
2542
2543 dm_list_add(&rseg->rsites, &rsite->list);
2544 rseg->rsite_count++;
2545
2546 rsite->mode = mode;
2547 rsite->async_timeout = async_timeout;
2548 rsite->fall_behind_data = fall_behind_data;
2549 rsite->fall_behind_ios = fall_behind_ios;
2550 rsite->rsite_index = rsite_index;
2551
2552 return 1;
2553 }
2554
2555 /* Appends device node to Replicator */
2556 int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2557 uint64_t size,
2558 const char *replicator_uuid,
2559 uint64_t rdevice_index,
2560 const char *rdev_uuid,
2561 unsigned rsite_index,
2562 const char *slog_uuid,
2563 uint32_t slog_flags,
2564 uint32_t slog_region_size)
2565 {
2566 struct seg_area *area;
2567 struct load_segment *rseg;
2568 struct load_segment *rep_seg;
2569
2570 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2571 /* Site index for local target */
2572 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2573 return_0;
2574
2575 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2576 log_error("Missing replicator uuid %s.", replicator_uuid);
2577 return 0;
2578 }
2579
2580 /* Local slink0 for replicator must be always initialized first */
2581 if (rseg->replicator->props.segment_count != 1) {
2582 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2583 return 0;
2584 }
2585
2586 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2587 if (rep_seg->type != SEG_REPLICATOR) {
2588 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2589 dm_segtypes[rep_seg->type].target);
2590 return 0;
2591 }
2592 rep_seg->rdevice_count++;
2593
2594 if (!_link_tree_nodes(node, rseg->replicator))
2595 return_0;
2596
2597 rseg->rdevice_index = rdevice_index;
2598 } else {
2599 /* Local slink0 for replicator must be always initialized first */
2600 if (node->props.segment_count != 1) {
2601 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2602 return 0;
2603 }
2604
2605 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2606 if (rseg->type != SEG_REPLICATOR_DEV) {
2607 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2608 dm_segtypes[rseg->type].target);
2609 return 0;
2610 }
2611 }
2612
2613 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2614 log_error("Unspecified sync log uuid.");
2615 return 0;
2616 }
2617
2618 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2619 return_0;
2620
2621 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2622
2623 if (!(slog_flags & DM_CORELOG)) {
2624 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2625 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2626 return 0;
2627 }
2628
2629 if (!_link_tree_nodes(node, area->slog))
2630 return_0;
2631 }
2632
2633 area->flags = slog_flags;
2634 area->region_size = slog_region_size;
2635 area->rsite_index = rsite_index;
2636
2637 return 1;
2638 }
2639
2640 int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2641 uint64_t size,
2642 uint64_t transation_id,
2643 const char *pool_uuid,
2644 const char *metadata_uuid,
2645 uint32_t data_block_size,
2646 uint64_t low_water_mark,
2647 unsigned skip_block_zeroeing)
2648 {
2649 struct load_segment *seg;
2650
2651 if (data_block_size < THIN_MIN_DATA_SIZE) {
2652 log_error("Data block size %d is lower then "
2653 QUOTE(THIN_MIN_DATA_SIZE) " sectors.",
2654 data_block_size);
2655 return 0;
2656 }
2657
2658 if (data_block_size > THIN_MAX_DATA_SIZE) {
2659 log_error("Data block size %d is higher then "
2660 QUOTE(THIN_MAX_DATA_SIZE) " sectors.",
2661 data_block_size);
2662 return 0;
2663 }
2664
2665 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2666 return_0;
2667
2668 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2669 log_error("Missing metadata uuid %s.", metadata_uuid);
2670 return 0;
2671 }
2672
2673 if (!_link_tree_nodes(node, seg->metadata))
2674 return_0;
2675
2676 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2677 log_error("Missing pool uuid %s.", pool_uuid);
2678 return 0;
2679 }
2680
2681 if (!_link_tree_nodes(node, seg->pool))
2682 return_0;
2683
2684 seg->data_block_size = data_block_size;
2685 seg->low_water_mark = low_water_mark;
2686 seg->skip_block_zeroeing = skip_block_zeroeing;
2687
2688 return 1;
2689 }
2690
2691 int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2692 uint64_t size,
2693 const char *thin_pool_uuid,
2694 uint32_t device_id)
2695 {
2696 struct load_segment *seg;
2697
2698 if (device_id > THIN_MAX_DEVICE_ID) {
2699 log_error("Device id %d is higher then " QUOTE(THIN_MAX_DEVICE_ID) ".",
2700 device_id);
2701 return 0;
2702 }
2703
2704 if (!(seg = _add_segment(node, SEG_THIN, size)))
2705 return_0;
2706
2707 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2708 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2709 return 0;
2710 }
2711
2712 if (!_link_tree_nodes(node, seg->pool))
2713 return_0;
2714
2715 seg->device_id = device_id;
2716
2717 return 1;
2718 }
2719
2720 static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
2721 {
2722 struct seg_area *area;
2723
2724 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
2725 log_error("Failed to allocate target segment area.");
2726 return 0;
2727 }
2728
2729 area->dev_node = dev_node;
2730 area->offset = offset;
2731
2732 dm_list_add(&seg->areas, &area->list);
2733 seg->area_count++;
2734
2735 return 1;
2736 }
2737
2738 int dm_tree_node_add_target_area(struct dm_tree_node *node,
2739 const char *dev_name,
2740 const char *uuid,
2741 uint64_t offset)
2742 {
2743 struct load_segment *seg;
2744 struct stat info;
2745 struct dm_tree_node *dev_node;
2746
2747 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
2748 log_error("dm_tree_node_add_target_area called without device");
2749 return 0;
2750 }
2751
2752 if (uuid) {
2753 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
2754 log_error("Couldn't find area uuid %s.", uuid);
2755 return 0;
2756 }
2757 if (!_link_tree_nodes(node, dev_node))
2758 return_0;
2759 } else {
2760 if (stat(dev_name, &info) < 0) {
2761 log_error("Device %s not found.", dev_name);
2762 return 0;
2763 }
2764
2765 if (!S_ISBLK(info.st_mode)) {
2766 log_error("Device %s is not a block device.", dev_name);
2767 return 0;
2768 }
2769
2770 /* FIXME Check correct macro use */
2771 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2772 MINOR(info.st_rdev), 0)))
2773 return_0;
2774 }
2775
2776 if (!node->props.segment_count) {
2777 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2778 return 0;
2779 }
2780
2781 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2782
2783 if (!_add_area(node, seg, dev_node, offset))
2784 return_0;
2785
2786 return 1;
2787 }
2788
2789 int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
2790 {
2791 struct load_segment *seg;
2792
2793 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2794
2795 switch (seg->type) {
2796 case SEG_RAID1:
2797 case SEG_RAID4:
2798 case SEG_RAID5_LA:
2799 case SEG_RAID5_RA:
2800 case SEG_RAID5_LS:
2801 case SEG_RAID5_RS:
2802 case SEG_RAID6_ZR:
2803 case SEG_RAID6_NR:
2804 case SEG_RAID6_NC:
2805 break;
2806 default:
2807 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
2808 return 0;
2809 }
2810
2811 if (!_add_area(node, seg, NULL, offset))
2812 return_0;
2813
2814 return 1;
2815 }
2816
2817 void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2818 {
2819 node->dtree->cookie = cookie;
2820 }
2821
2822 uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2823 {
2824 return node->dtree->cookie;
2825 }
This page took 0.169969 seconds and 6 git commands to generate.