]> sourceware.org Git - lvm2.git/blob - libdm/libdm-deptree.c
Name changes
[lvm2.git] / libdm / libdm-deptree.c
1 /*
2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15 #include "dmlib.h"
16 #include "libdm-targets.h"
17 #include "libdm-common.h"
18 #include "kdev_t.h"
19 #include "dm-ioctl.h"
20
21 #include <stdarg.h>
22 #include <sys/param.h>
23 #include <sys/utsname.h>
24
25 #define MAX_TARGET_PARAMSIZE 500000
26
27 /* FIXME Fix interface so this is used only by LVM */
28 #define UUID_PREFIX "LVM-"
29
30 #define REPLICATOR_LOCAL_SITE 0
31
32 #define THIN_MIN_DATA_SIZE 128
33 #define THIN_MAX_DATA_SIZE 2097152
34 #define THIN_MAX_DEVICE_ID ((1 << 24) - 1)
35
36 #define QUOTE(x) #x
37
38 /* Supported segment types */
39 enum {
40 SEG_CRYPT,
41 SEG_ERROR,
42 SEG_LINEAR,
43 SEG_MIRRORED,
44 SEG_REPLICATOR,
45 SEG_REPLICATOR_DEV,
46 SEG_SNAPSHOT,
47 SEG_SNAPSHOT_ORIGIN,
48 SEG_SNAPSHOT_MERGE,
49 SEG_STRIPED,
50 SEG_ZERO,
51 SEG_THIN_POOL,
52 SEG_THIN,
53 SEG_RAID1,
54 SEG_RAID4,
55 SEG_RAID5_LA,
56 SEG_RAID5_RA,
57 SEG_RAID5_LS,
58 SEG_RAID5_RS,
59 SEG_RAID6_ZR,
60 SEG_RAID6_NR,
61 SEG_RAID6_NC,
62 SEG_LAST,
63 };
64
65 /* FIXME Add crypt and multipath support */
66
67 struct {
68 unsigned type;
69 const char *target;
70 } dm_segtypes[] = {
71 { SEG_CRYPT, "crypt" },
72 { SEG_ERROR, "error" },
73 { SEG_LINEAR, "linear" },
74 { SEG_MIRRORED, "mirror" },
75 { SEG_REPLICATOR, "replicator" },
76 { SEG_REPLICATOR_DEV, "replicator-dev" },
77 { SEG_SNAPSHOT, "snapshot" },
78 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
79 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
80 { SEG_STRIPED, "striped" },
81 { SEG_ZERO, "zero"},
82 { SEG_THIN_POOL, "thin-pool"},
83 { SEG_THIN, "thin"},
84 { SEG_RAID1, "raid1"},
85 { SEG_RAID4, "raid4"},
86 { SEG_RAID5_LA, "raid5_la"},
87 { SEG_RAID5_RA, "raid5_ra"},
88 { SEG_RAID5_LS, "raid5_ls"},
89 { SEG_RAID5_RS, "raid5_rs"},
90 { SEG_RAID6_ZR, "raid6_zr"},
91 { SEG_RAID6_NR, "raid6_nr"},
92 { SEG_RAID6_NC, "raid6_nc"},
93
94 /*
95 *WARNING: Since 'raid' target overloads this 1:1 mapping table
96 * for search do not add new enum elements past them!
97 */
98 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
99 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
100 { SEG_LAST, NULL },
101 };
102
103 /* Some segment types have a list of areas of other devices attached */
104 struct seg_area {
105 struct dm_list list;
106
107 struct dm_tree_node *dev_node;
108
109 uint64_t offset;
110
111 unsigned rsite_index; /* Replicator site index */
112 struct dm_tree_node *slog; /* Replicator sync log node */
113 uint64_t region_size; /* Replicator sync log size */
114 uint32_t flags; /* Replicator sync log flags */
115 };
116
117 /* Replicator-log has a list of sites */
118 /* FIXME: maybe move to seg_area too? */
119 struct replicator_site {
120 struct dm_list list;
121
122 unsigned rsite_index;
123 dm_replicator_mode_t mode;
124 uint32_t async_timeout;
125 uint32_t fall_behind_ios;
126 uint64_t fall_behind_data;
127 };
128
129 /* Per-segment properties */
130 struct load_segment {
131 struct dm_list list;
132
133 unsigned type;
134
135 uint64_t size;
136
137 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
138 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
139
140 uint32_t stripe_size; /* Striped + raid */
141
142 int persistent; /* Snapshot */
143 uint32_t chunk_size; /* Snapshot */
144 struct dm_tree_node *cow; /* Snapshot */
145 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
146 struct dm_tree_node *merge; /* Snapshot */
147
148 struct dm_tree_node *log; /* Mirror + Replicator */
149 uint32_t region_size; /* Mirror + raid */
150 unsigned clustered; /* Mirror */
151 unsigned mirror_area_count; /* Mirror */
152 uint32_t flags; /* Mirror log */
153 char *uuid; /* Clustered mirror log */
154
155 const char *cipher; /* Crypt */
156 const char *chainmode; /* Crypt */
157 const char *iv; /* Crypt */
158 uint64_t iv_offset; /* Crypt */
159 const char *key; /* Crypt */
160
161 const char *rlog_type; /* Replicator */
162 struct dm_list rsites; /* Replicator */
163 unsigned rsite_count; /* Replicator */
164 unsigned rdevice_count; /* Replicator */
165 struct dm_tree_node *replicator;/* Replicator-dev */
166 uint64_t rdevice_index; /* Replicator-dev */
167
168 uint64_t rebuilds; /* raid */
169
170 struct dm_tree_node *metadata; /* Thin_pool */
171 struct dm_tree_node *pool; /* Thin_pool, Thin */
172 uint64_t low_water_mark_size; /* Thin_pool */
173 uint32_t data_block_size; /* Thin_pool */
174 unsigned skip_block_zeroing; /* Thin_pool */
175 uint32_t device_id; /* Thin */
176
177 };
178
179 /* Per-device properties */
180 struct load_properties {
181 int read_only;
182 uint32_t major;
183 uint32_t minor;
184
185 uint32_t read_ahead;
186 uint32_t read_ahead_flags;
187
188 uint64_t thin_pool_transaction_id; /* Thin_pool */
189
190 unsigned segment_count;
191 unsigned size_changed;
192 struct dm_list segs;
193
194 const char *new_name;
195
196 /* If immediate_dev_node is set to 1, try to create the dev node
197 * as soon as possible (e.g. in preload stage even during traversal
198 * and processing of dm tree). This will also flush all stacked dev
199 * node operations, synchronizing with udev.
200 */
201 unsigned immediate_dev_node;
202
203 /*
204 * If the device size changed from zero and this is set,
205 * don't resume the device immediately, even if the device
206 * has parents. This works provided the parents do not
207 * validate the device size and is required by pvmove to
208 * avoid starting the mirror resync operation too early.
209 */
210 unsigned delay_resume_if_new;
211 };
212
213 /* Two of these used to join two nodes with uses and used_by. */
214 struct dm_tree_link {
215 struct dm_list list;
216 struct dm_tree_node *node;
217 };
218
219 struct dm_tree_node {
220 struct dm_tree *dtree;
221
222 const char *name;
223 const char *uuid;
224 struct dm_info info;
225
226 struct dm_list uses; /* Nodes this node uses */
227 struct dm_list used_by; /* Nodes that use this node */
228
229 int activation_priority; /* 0 gets activated first */
230
231 uint16_t udev_flags; /* Udev control flags */
232
233 void *context; /* External supplied context */
234
235 struct load_properties props; /* For creation/table (re)load */
236
237 /*
238 * If presuspend of child node is needed
239 * Note: only direct child is allowed
240 */
241 struct dm_tree_node *presuspend_node;
242 };
243
244 struct dm_tree {
245 struct dm_pool *mem;
246 struct dm_hash_table *devs;
247 struct dm_hash_table *uuids;
248 struct dm_tree_node root;
249 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
250 int no_flush; /* 1 sets noflush (mirrors/multipath) */
251 int retry_remove; /* 1 retries remove if not successful */
252 uint32_t cookie;
253 };
254
255 struct dm_tree *dm_tree_create(void)
256 {
257 struct dm_tree *dtree;
258
259 if (!(dtree = dm_zalloc(sizeof(*dtree)))) {
260 log_error("dm_tree_create malloc failed");
261 return NULL;
262 }
263
264 dtree->root.dtree = dtree;
265 dm_list_init(&dtree->root.uses);
266 dm_list_init(&dtree->root.used_by);
267 dtree->skip_lockfs = 0;
268 dtree->no_flush = 0;
269
270 if (!(dtree->mem = dm_pool_create("dtree", 1024))) {
271 log_error("dtree pool creation failed");
272 dm_free(dtree);
273 return NULL;
274 }
275
276 if (!(dtree->devs = dm_hash_create(8))) {
277 log_error("dtree hash creation failed");
278 dm_pool_destroy(dtree->mem);
279 dm_free(dtree);
280 return NULL;
281 }
282
283 if (!(dtree->uuids = dm_hash_create(32))) {
284 log_error("dtree uuid hash creation failed");
285 dm_hash_destroy(dtree->devs);
286 dm_pool_destroy(dtree->mem);
287 dm_free(dtree);
288 return NULL;
289 }
290
291 return dtree;
292 }
293
294 void dm_tree_free(struct dm_tree *dtree)
295 {
296 if (!dtree)
297 return;
298
299 dm_hash_destroy(dtree->uuids);
300 dm_hash_destroy(dtree->devs);
301 dm_pool_destroy(dtree->mem);
302 dm_free(dtree);
303 }
304
305 static int _nodes_are_linked(const struct dm_tree_node *parent,
306 const struct dm_tree_node *child)
307 {
308 struct dm_tree_link *dlink;
309
310 dm_list_iterate_items(dlink, &parent->uses)
311 if (dlink->node == child)
312 return 1;
313
314 return 0;
315 }
316
317 static int _link(struct dm_list *list, struct dm_tree_node *node)
318 {
319 struct dm_tree_link *dlink;
320
321 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
322 log_error("dtree link allocation failed");
323 return 0;
324 }
325
326 dlink->node = node;
327 dm_list_add(list, &dlink->list);
328
329 return 1;
330 }
331
332 static int _link_nodes(struct dm_tree_node *parent,
333 struct dm_tree_node *child)
334 {
335 if (_nodes_are_linked(parent, child))
336 return 1;
337
338 if (!_link(&parent->uses, child))
339 return 0;
340
341 if (!_link(&child->used_by, parent))
342 return 0;
343
344 return 1;
345 }
346
347 static void _unlink(struct dm_list *list, struct dm_tree_node *node)
348 {
349 struct dm_tree_link *dlink;
350
351 dm_list_iterate_items(dlink, list)
352 if (dlink->node == node) {
353 dm_list_del(&dlink->list);
354 break;
355 }
356 }
357
358 static void _unlink_nodes(struct dm_tree_node *parent,
359 struct dm_tree_node *child)
360 {
361 if (!_nodes_are_linked(parent, child))
362 return;
363
364 _unlink(&parent->uses, child);
365 _unlink(&child->used_by, parent);
366 }
367
368 static int _add_to_toplevel(struct dm_tree_node *node)
369 {
370 return _link_nodes(&node->dtree->root, node);
371 }
372
373 static void _remove_from_toplevel(struct dm_tree_node *node)
374 {
375 _unlink_nodes(&node->dtree->root, node);
376 }
377
378 static int _add_to_bottomlevel(struct dm_tree_node *node)
379 {
380 return _link_nodes(node, &node->dtree->root);
381 }
382
383 static void _remove_from_bottomlevel(struct dm_tree_node *node)
384 {
385 _unlink_nodes(node, &node->dtree->root);
386 }
387
388 static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
389 {
390 /* Don't link to root node if child already has a parent */
391 if (parent == &parent->dtree->root) {
392 if (dm_tree_node_num_children(child, 1))
393 return 1;
394 } else
395 _remove_from_toplevel(child);
396
397 if (child == &child->dtree->root) {
398 if (dm_tree_node_num_children(parent, 0))
399 return 1;
400 } else
401 _remove_from_bottomlevel(parent);
402
403 return _link_nodes(parent, child);
404 }
405
406 static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
407 const char *name,
408 const char *uuid,
409 struct dm_info *info,
410 void *context,
411 uint16_t udev_flags)
412 {
413 struct dm_tree_node *node;
414 uint64_t dev;
415
416 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
417 log_error("_create_dm_tree_node alloc failed");
418 return NULL;
419 }
420
421 node->dtree = dtree;
422
423 node->name = name;
424 node->uuid = uuid;
425 node->info = *info;
426 node->context = context;
427 node->udev_flags = udev_flags;
428 node->activation_priority = 0;
429
430 dm_list_init(&node->uses);
431 dm_list_init(&node->used_by);
432 dm_list_init(&node->props.segs);
433
434 dev = MKDEV(info->major, info->minor);
435
436 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
437 sizeof(dev), node)) {
438 log_error("dtree node hash insertion failed");
439 dm_pool_free(dtree->mem, node);
440 return NULL;
441 }
442
443 if (uuid && *uuid &&
444 !dm_hash_insert(dtree->uuids, uuid, node)) {
445 log_error("dtree uuid hash insertion failed");
446 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
447 sizeof(dev));
448 dm_pool_free(dtree->mem, node);
449 return NULL;
450 }
451
452 return node;
453 }
454
455 static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
456 uint32_t major, uint32_t minor)
457 {
458 uint64_t dev = MKDEV(major, minor);
459
460 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
461 sizeof(dev));
462 }
463
464 static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
465 const char *uuid)
466 {
467 struct dm_tree_node *node;
468
469 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
470 return node;
471
472 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
473 return NULL;
474
475 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
476 }
477
478 static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
479 const char **name, const char **uuid,
480 struct dm_info *info, struct dm_deps **deps)
481 {
482 memset(info, 0, sizeof(*info));
483
484 if (!dm_is_dm_major(major)) {
485 *name = "";
486 *uuid = "";
487 *deps = NULL;
488 info->major = major;
489 info->minor = minor;
490 info->exists = 0;
491 info->live_table = 0;
492 info->inactive_table = 0;
493 info->read_only = 0;
494 return 1;
495 }
496
497 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
498 log_error("deps dm_task creation failed");
499 return 0;
500 }
501
502 if (!dm_task_set_major(*dmt, major)) {
503 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
504 major, minor);
505 goto failed;
506 }
507
508 if (!dm_task_set_minor(*dmt, minor)) {
509 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
510 major, minor);
511 goto failed;
512 }
513
514 if (!dm_task_run(*dmt)) {
515 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
516 major, minor);
517 goto failed;
518 }
519
520 if (!dm_task_get_info(*dmt, info)) {
521 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
522 major, minor);
523 goto failed;
524 }
525
526 if (!info->exists) {
527 *name = "";
528 *uuid = "";
529 *deps = NULL;
530 } else {
531 if (info->major != major) {
532 log_error("Inconsistent dtree major number: %u != %u",
533 major, info->major);
534 goto failed;
535 }
536 if (info->minor != minor) {
537 log_error("Inconsistent dtree minor number: %u != %u",
538 minor, info->minor);
539 goto failed;
540 }
541 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
542 log_error("name pool_strdup failed");
543 goto failed;
544 }
545 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
546 log_error("uuid pool_strdup failed");
547 goto failed;
548 }
549 *deps = dm_task_get_deps(*dmt);
550 }
551
552 return 1;
553
554 failed:
555 dm_task_destroy(*dmt);
556 return 0;
557 }
558
559 static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
560 struct dm_tree_node *parent,
561 uint32_t major, uint32_t minor,
562 uint16_t udev_flags)
563 {
564 struct dm_task *dmt = NULL;
565 struct dm_info info;
566 struct dm_deps *deps = NULL;
567 const char *name = NULL;
568 const char *uuid = NULL;
569 struct dm_tree_node *node = NULL;
570 uint32_t i;
571 int new = 0;
572
573 /* Already in tree? */
574 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
575 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
576 return_NULL;
577
578 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
579 NULL, udev_flags)))
580 goto_out;
581 new = 1;
582 }
583
584 if (!_link_tree_nodes(parent, node)) {
585 node = NULL;
586 goto_out;
587 }
588
589 /* If node was already in tree, no need to recurse. */
590 if (!new)
591 goto out;
592
593 /* Can't recurse if not a mapped device or there are no dependencies */
594 if (!node->info.exists || !deps->count) {
595 if (!_add_to_bottomlevel(node)) {
596 stack;
597 node = NULL;
598 }
599 goto out;
600 }
601
602 /* Add dependencies to tree */
603 for (i = 0; i < deps->count; i++)
604 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
605 MINOR(deps->device[i]), udev_flags)) {
606 node = NULL;
607 goto_out;
608 }
609
610 out:
611 if (dmt)
612 dm_task_destroy(dmt);
613
614 return node;
615 }
616
617 static int _node_clear_table(struct dm_tree_node *dnode)
618 {
619 struct dm_task *dmt;
620 struct dm_info *info;
621 const char *name;
622 int r;
623
624 if (!(info = &dnode->info)) {
625 log_error("_node_clear_table failed: missing info");
626 return 0;
627 }
628
629 if (!(name = dm_tree_node_get_name(dnode))) {
630 log_error("_node_clear_table failed: missing name");
631 return 0;
632 }
633
634 /* Is there a table? */
635 if (!info->exists || !info->inactive_table)
636 return 1;
637
638 // FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
639
640 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
641 name, info->major, info->minor);
642
643 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
644 log_error("Table clear dm_task creation failed for %s", name);
645 return 0;
646 }
647
648 if (!dm_task_set_major(dmt, info->major) ||
649 !dm_task_set_minor(dmt, info->minor)) {
650 log_error("Failed to set device number for %s table clear", name);
651 dm_task_destroy(dmt);
652 return 0;
653 }
654
655 r = dm_task_run(dmt);
656
657 if (!dm_task_get_info(dmt, info)) {
658 log_error("_node_clear_table failed: info missing after running task for %s", name);
659 r = 0;
660 }
661
662 dm_task_destroy(dmt);
663
664 return r;
665 }
666
667 struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
668 const char *name,
669 const char *uuid,
670 uint32_t major, uint32_t minor,
671 int read_only,
672 int clear_inactive,
673 void *context)
674 {
675 struct dm_tree_node *dnode;
676 struct dm_info info;
677 const char *name2;
678 const char *uuid2;
679
680 /* Do we need to add node to tree? */
681 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
682 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
683 log_error("name pool_strdup failed");
684 return NULL;
685 }
686 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
687 log_error("uuid pool_strdup failed");
688 return NULL;
689 }
690
691 info.major = 0;
692 info.minor = 0;
693 info.exists = 0;
694 info.live_table = 0;
695 info.inactive_table = 0;
696 info.read_only = 0;
697
698 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
699 context, 0)))
700 return_NULL;
701
702 /* Attach to root node until a table is supplied */
703 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
704 return_NULL;
705
706 dnode->props.major = major;
707 dnode->props.minor = minor;
708 dnode->props.new_name = NULL;
709 dnode->props.size_changed = 0;
710 } else if (strcmp(name, dnode->name)) {
711 /* Do we need to rename node? */
712 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
713 log_error("name pool_strdup failed");
714 return 0;
715 }
716 }
717
718 dnode->props.read_only = read_only ? 1 : 0;
719 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
720 dnode->props.read_ahead_flags = 0;
721
722 if (clear_inactive && !_node_clear_table(dnode))
723 return_NULL;
724
725 dnode->context = context;
726 dnode->udev_flags = 0;
727
728 return dnode;
729 }
730
731 struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
732 const char *name,
733 const char *uuid,
734 uint32_t major,
735 uint32_t minor,
736 int read_only,
737 int clear_inactive,
738 void *context,
739 uint16_t udev_flags)
740 {
741 struct dm_tree_node *node;
742
743 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
744 clear_inactive, context)))
745 node->udev_flags = udev_flags;
746
747 return node;
748 }
749
750
751 void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
752 uint32_t read_ahead,
753 uint32_t read_ahead_flags)
754 {
755 dnode->props.read_ahead = read_ahead;
756 dnode->props.read_ahead_flags = read_ahead_flags;
757 }
758
759 void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
760 struct dm_tree_node *presuspend_node)
761 {
762 node->presuspend_node = presuspend_node;
763 }
764
765 int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
766 {
767 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
768 }
769
770 int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
771 uint32_t minor, uint16_t udev_flags)
772 {
773 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
774 }
775
776 const char *dm_tree_node_get_name(const struct dm_tree_node *node)
777 {
778 return node->info.exists ? node->name : "";
779 }
780
781 const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
782 {
783 return node->info.exists ? node->uuid : "";
784 }
785
786 const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
787 {
788 return &node->info;
789 }
790
791 void *dm_tree_node_get_context(const struct dm_tree_node *node)
792 {
793 return node->context;
794 }
795
796 int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
797 {
798 return dnode->props.size_changed;
799 }
800
801 int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
802 {
803 if (inverted) {
804 if (_nodes_are_linked(&node->dtree->root, node))
805 return 0;
806 return dm_list_size(&node->used_by);
807 }
808
809 if (_nodes_are_linked(node, &node->dtree->root))
810 return 0;
811
812 return dm_list_size(&node->uses);
813 }
814
815 /*
816 * Returns 1 if no prefix supplied
817 */
818 static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
819 {
820 if (!uuid_prefix)
821 return 1;
822
823 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
824 return 1;
825
826 /* Handle transition: active device uuids might be missing the prefix */
827 if (uuid_prefix_len <= 4)
828 return 0;
829
830 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
831 return 0;
832
833 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
834 return 0;
835
836 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
837 return 1;
838
839 return 0;
840 }
841
842 /*
843 * Returns 1 if no children.
844 */
845 static int _children_suspended(struct dm_tree_node *node,
846 uint32_t inverted,
847 const char *uuid_prefix,
848 size_t uuid_prefix_len)
849 {
850 struct dm_list *list;
851 struct dm_tree_link *dlink;
852 const struct dm_info *dinfo;
853 const char *uuid;
854
855 if (inverted) {
856 if (_nodes_are_linked(&node->dtree->root, node))
857 return 1;
858 list = &node->used_by;
859 } else {
860 if (_nodes_are_linked(node, &node->dtree->root))
861 return 1;
862 list = &node->uses;
863 }
864
865 dm_list_iterate_items(dlink, list) {
866 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
867 stack;
868 continue;
869 }
870
871 /* Ignore if it doesn't belong to this VG */
872 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
873 continue;
874
875 /* Ignore if parent node wants to presuspend this node */
876 if (dlink->node->presuspend_node == node)
877 continue;
878
879 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
880 stack; /* FIXME Is this normal? */
881 return 0;
882 }
883
884 if (!dinfo->suspended)
885 return 0;
886 }
887
888 return 1;
889 }
890
891 /*
892 * Set major and minor to zero for root of tree.
893 */
894 struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
895 uint32_t major,
896 uint32_t minor)
897 {
898 if (!major && !minor)
899 return &dtree->root;
900
901 return _find_dm_tree_node(dtree, major, minor);
902 }
903
904 /*
905 * Set uuid to NULL for root of tree.
906 */
907 struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
908 const char *uuid)
909 {
910 if (!uuid || !*uuid)
911 return &dtree->root;
912
913 return _find_dm_tree_node_by_uuid(dtree, uuid);
914 }
915
916 /*
917 * First time set *handle to NULL.
918 * Set inverted to invert the tree.
919 */
920 struct dm_tree_node *dm_tree_next_child(void **handle,
921 const struct dm_tree_node *parent,
922 uint32_t inverted)
923 {
924 struct dm_list **dlink = (struct dm_list **) handle;
925 const struct dm_list *use_list;
926
927 if (inverted)
928 use_list = &parent->used_by;
929 else
930 use_list = &parent->uses;
931
932 if (!*dlink)
933 *dlink = dm_list_first(use_list);
934 else
935 *dlink = dm_list_next(use_list, *dlink);
936
937 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
938 }
939
940 /*
941 * Deactivate a device with its dependencies if the uuid prefix matches.
942 */
943 static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
944 struct dm_info *info)
945 {
946 struct dm_task *dmt;
947 int r;
948
949 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
950 log_error("_info_by_dev: dm_task creation failed");
951 return 0;
952 }
953
954 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
955 log_error("_info_by_dev: Failed to set device number");
956 dm_task_destroy(dmt);
957 return 0;
958 }
959
960 if (!with_open_count && !dm_task_no_open_count(dmt))
961 log_error("Failed to disable open_count");
962
963 if ((r = dm_task_run(dmt)))
964 r = dm_task_get_info(dmt, info);
965
966 dm_task_destroy(dmt);
967
968 return r;
969 }
970
971 static int _check_device_not_in_use(struct dm_info *info)
972 {
973 if (!info->exists)
974 return 1;
975
976 /* If sysfs is not used, use open_count information only. */
977 if (!*dm_sysfs_dir()) {
978 if (info->open_count) {
979 log_error("Device %" PRIu32 ":%" PRIu32 " in use",
980 info->major, info->minor);
981 return 0;
982 }
983
984 return 1;
985 }
986
987 if (dm_device_has_holders(info->major, info->minor)) {
988 log_error("Device %" PRIu32 ":%" PRIu32 " is used "
989 "by another device.", info->major, info->minor);
990 return 0;
991 }
992
993 if (dm_device_has_mounted_fs(info->major, info->minor)) {
994 log_error("Device %" PRIu32 ":%" PRIu32 " contains "
995 "a filesystem in use.", info->major, info->minor);
996 return 0;
997 }
998
999 return 1;
1000 }
1001
1002 /* Check if all parent nodes of given node have open_count == 0 */
1003 static int _node_has_closed_parents(struct dm_tree_node *node,
1004 const char *uuid_prefix,
1005 size_t uuid_prefix_len)
1006 {
1007 struct dm_tree_link *dlink;
1008 const struct dm_info *dinfo;
1009 struct dm_info info;
1010 const char *uuid;
1011
1012 /* Iterate through parents of this node */
1013 dm_list_iterate_items(dlink, &node->used_by) {
1014 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1015 stack;
1016 continue;
1017 }
1018
1019 /* Ignore if it doesn't belong to this VG */
1020 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1021 continue;
1022
1023 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1024 stack; /* FIXME Is this normal? */
1025 return 0;
1026 }
1027
1028 /* Refresh open_count */
1029 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1030 !info.exists)
1031 continue;
1032
1033 if (info.open_count) {
1034 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1035 dinfo->major, dinfo->minor, info.open_count);
1036 return 0;
1037 }
1038 }
1039
1040 return 1;
1041 }
1042
1043 static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
1044 uint32_t *cookie, uint16_t udev_flags, int retry)
1045 {
1046 struct dm_task *dmt;
1047 int r = 0;
1048
1049 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1050
1051 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1052 log_error("Deactivation dm_task creation failed for %s", name);
1053 return 0;
1054 }
1055
1056 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1057 log_error("Failed to set device number for %s deactivation", name);
1058 goto out;
1059 }
1060
1061 if (!dm_task_no_open_count(dmt))
1062 log_error("Failed to disable open_count");
1063
1064 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1065 goto out;
1066
1067
1068 if (retry)
1069 dm_task_retry_remove(dmt);
1070
1071 r = dm_task_run(dmt);
1072
1073 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1074 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
1075 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
1076
1077 /* FIXME Remove node from tree or mark invalid? */
1078
1079 out:
1080 dm_task_destroy(dmt);
1081
1082 return r;
1083 }
1084
1085 static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
1086 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
1087 {
1088 struct dm_task *dmt;
1089 int r = 0;
1090
1091 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1092
1093 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1094 log_error("Rename dm_task creation failed for %s", old_name);
1095 return 0;
1096 }
1097
1098 if (!dm_task_set_name(dmt, old_name)) {
1099 log_error("Failed to set name for %s rename.", old_name);
1100 goto out;
1101 }
1102
1103 if (!dm_task_set_newname(dmt, new_name))
1104 goto_out;
1105
1106 if (!dm_task_no_open_count(dmt))
1107 log_error("Failed to disable open_count");
1108
1109 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1110 goto out;
1111
1112 r = dm_task_run(dmt);
1113
1114 out:
1115 dm_task_destroy(dmt);
1116
1117 return r;
1118 }
1119
1120 /* FIXME Merge with _suspend_node? */
1121 static int _resume_node(const char *name, uint32_t major, uint32_t minor,
1122 uint32_t read_ahead, uint32_t read_ahead_flags,
1123 struct dm_info *newinfo, uint32_t *cookie,
1124 uint16_t udev_flags, int already_suspended)
1125 {
1126 struct dm_task *dmt;
1127 int r = 0;
1128
1129 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1130
1131 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
1132 log_debug("Suspend dm_task creation failed for %s.", name);
1133 return 0;
1134 }
1135
1136 /* FIXME Kernel should fill in name on return instead */
1137 if (!dm_task_set_name(dmt, name)) {
1138 log_debug("Failed to set device name for %s resumption.", name);
1139 goto out;
1140 }
1141
1142 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1143 log_error("Failed to set device number for %s resumption.", name);
1144 goto out;
1145 }
1146
1147 if (!dm_task_no_open_count(dmt))
1148 log_error("Failed to disable open_count");
1149
1150 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1151 log_error("Failed to set read ahead");
1152
1153 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1154 goto_out;
1155
1156 if (!(r = dm_task_run(dmt)))
1157 goto_out;
1158
1159 if (already_suspended)
1160 dec_suspended();
1161
1162 if (!(r = dm_task_get_info(dmt, newinfo)))
1163 stack;
1164
1165 out:
1166 dm_task_destroy(dmt);
1167
1168 return r;
1169 }
1170
1171 static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
1172 int skip_lockfs, int no_flush, struct dm_info *newinfo)
1173 {
1174 struct dm_task *dmt;
1175 int r;
1176
1177 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1178 name, major, minor,
1179 skip_lockfs ? "" : " with filesystem sync",
1180 no_flush ? "" : " with device flush");
1181
1182 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1183 log_error("Suspend dm_task creation failed for %s", name);
1184 return 0;
1185 }
1186
1187 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1188 log_error("Failed to set device number for %s suspension.", name);
1189 dm_task_destroy(dmt);
1190 return 0;
1191 }
1192
1193 if (!dm_task_no_open_count(dmt))
1194 log_error("Failed to disable open_count");
1195
1196 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1197 log_error("Failed to set skip_lockfs flag.");
1198
1199 if (no_flush && !dm_task_no_flush(dmt))
1200 log_error("Failed to set no_flush flag.");
1201
1202 if ((r = dm_task_run(dmt))) {
1203 inc_suspended();
1204 r = dm_task_get_info(dmt, newinfo);
1205 }
1206
1207 dm_task_destroy(dmt);
1208
1209 return r;
1210 }
1211
1212 static int _check_thin_pool_transaction_id(const char *name, uint32_t major, uint32_t minor,
1213 uint64_t transaction_id)
1214 {
1215 struct dm_task *dmt;
1216 int r = 0;
1217 uint64_t start, length;
1218 char *type = NULL;
1219 char *params = NULL;
1220 uint64_t t_id = transaction_id; // FIXME: fake
1221
1222 log_verbose("Checking transaction id %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1223
1224 if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) {
1225 log_debug("Device status dm_task creation failed for %s.", name);
1226 return 0;
1227 }
1228
1229 if (!dm_task_set_name(dmt, name)) {
1230 log_debug("Failed to set device name for %s status.", name);
1231 goto out;
1232 }
1233
1234 if (!dm_task_set_major_minor(dmt, major, minor, 1)) {
1235 log_error("Failed to set device number for %s status.", name);
1236 goto out;
1237 }
1238
1239 if (!dm_task_no_open_count(dmt))
1240 log_error("Failed to disable open_count");
1241
1242 if (!(r = dm_task_run(dmt)))
1243 goto_out;
1244
1245 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1246 log_verbose("PARSE params %s", params); // FIXME: parse status
1247
1248 r = (transaction_id == t_id);
1249
1250 out:
1251 dm_task_destroy(dmt);
1252
1253 return r;
1254 }
1255
1256 /*
1257 * FIXME Don't attempt to deactivate known internal dependencies.
1258 */
1259 static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1260 const char *uuid_prefix,
1261 size_t uuid_prefix_len,
1262 unsigned level)
1263 {
1264 int r = 1;
1265 void *handle = NULL;
1266 struct dm_tree_node *child = dnode;
1267 struct dm_info info;
1268 const struct dm_info *dinfo;
1269 const char *name;
1270 const char *uuid;
1271
1272 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1273 if (!(dinfo = dm_tree_node_get_info(child))) {
1274 stack;
1275 continue;
1276 }
1277
1278 if (!(name = dm_tree_node_get_name(child))) {
1279 stack;
1280 continue;
1281 }
1282
1283 if (!(uuid = dm_tree_node_get_uuid(child))) {
1284 stack;
1285 continue;
1286 }
1287
1288 /* Ignore if it doesn't belong to this VG */
1289 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1290 continue;
1291
1292 /* Refresh open_count */
1293 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1294 !info.exists)
1295 continue;
1296
1297 if (!_check_device_not_in_use(&info))
1298 continue;
1299
1300 /* Also checking open_count in parent nodes of presuspend_node */
1301 if ((child->presuspend_node &&
1302 !_node_has_closed_parents(child->presuspend_node,
1303 uuid_prefix, uuid_prefix_len))) {
1304 /* Only report error from (likely non-internal) dependency at top level */
1305 if (!level) {
1306 log_error("Unable to deactivate open %s (%" PRIu32
1307 ":%" PRIu32 ")", name, info.major,
1308 info.minor);
1309 r = 0;
1310 }
1311 continue;
1312 }
1313
1314 /* Suspend child node first if requested */
1315 if (child->presuspend_node &&
1316 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1317 continue;
1318
1319 if (!_deactivate_node(name, info.major, info.minor,
1320 &child->dtree->cookie, child->udev_flags,
1321 child->dtree->retry_remove)) {
1322 log_error("Unable to deactivate %s (%" PRIu32
1323 ":%" PRIu32 ")", name, info.major,
1324 info.minor);
1325 r = 0;
1326 continue;
1327 } else if (info.suspended)
1328 dec_suspended();
1329
1330 if (dm_tree_node_num_children(child, 0)) {
1331 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
1332 return_0;
1333 }
1334 }
1335
1336 return r;
1337 }
1338
1339 int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1340 const char *uuid_prefix,
1341 size_t uuid_prefix_len)
1342 {
1343 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1344 }
1345
1346 void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1347 {
1348 dnode->dtree->skip_lockfs = 1;
1349 }
1350
1351 void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1352 {
1353 dnode->dtree->no_flush = 1;
1354 }
1355
1356 void dm_tree_retry_remove(struct dm_tree_node *dnode)
1357 {
1358 dnode->dtree->retry_remove = 1;
1359 }
1360
1361 int dm_tree_suspend_children(struct dm_tree_node *dnode,
1362 const char *uuid_prefix,
1363 size_t uuid_prefix_len)
1364 {
1365 int r = 1;
1366 void *handle = NULL;
1367 struct dm_tree_node *child = dnode;
1368 struct dm_info info, newinfo;
1369 const struct dm_info *dinfo;
1370 const char *name;
1371 const char *uuid;
1372
1373 /* Suspend nodes at this level of the tree */
1374 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1375 if (!(dinfo = dm_tree_node_get_info(child))) {
1376 stack;
1377 continue;
1378 }
1379
1380 if (!(name = dm_tree_node_get_name(child))) {
1381 stack;
1382 continue;
1383 }
1384
1385 if (!(uuid = dm_tree_node_get_uuid(child))) {
1386 stack;
1387 continue;
1388 }
1389
1390 /* Ignore if it doesn't belong to this VG */
1391 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1392 continue;
1393
1394 /* Ensure immediate parents are already suspended */
1395 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1396 continue;
1397
1398 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
1399 !info.exists || info.suspended)
1400 continue;
1401
1402 if (!_suspend_node(name, info.major, info.minor,
1403 child->dtree->skip_lockfs,
1404 child->dtree->no_flush, &newinfo)) {
1405 log_error("Unable to suspend %s (%" PRIu32
1406 ":%" PRIu32 ")", name, info.major,
1407 info.minor);
1408 r = 0;
1409 continue;
1410 }
1411
1412 /* Update cached info */
1413 child->info = newinfo;
1414 }
1415
1416 /* Then suspend any child nodes */
1417 handle = NULL;
1418
1419 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1420 if (!(uuid = dm_tree_node_get_uuid(child))) {
1421 stack;
1422 continue;
1423 }
1424
1425 /* Ignore if it doesn't belong to this VG */
1426 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1427 continue;
1428
1429 if (dm_tree_node_num_children(child, 0))
1430 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1431 return_0;
1432 }
1433
1434 return r;
1435 }
1436
1437 int dm_tree_activate_children(struct dm_tree_node *dnode,
1438 const char *uuid_prefix,
1439 size_t uuid_prefix_len)
1440 {
1441 int r = 1;
1442 void *handle = NULL;
1443 struct dm_tree_node *child = dnode;
1444 struct dm_info newinfo;
1445 const char *name;
1446 const char *uuid;
1447 int priority;
1448
1449 /* Activate children first */
1450 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1451 if (!(uuid = dm_tree_node_get_uuid(child))) {
1452 stack;
1453 continue;
1454 }
1455
1456 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1457 continue;
1458
1459 if (dm_tree_node_num_children(child, 0))
1460 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1461 return_0;
1462 }
1463
1464 handle = NULL;
1465
1466 for (priority = 0; priority < 3; priority++) {
1467 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1468 if (priority != child->activation_priority)
1469 continue;
1470
1471 if (!(uuid = dm_tree_node_get_uuid(child))) {
1472 stack;
1473 continue;
1474 }
1475
1476 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1477 continue;
1478
1479 if (!(name = dm_tree_node_get_name(child))) {
1480 stack;
1481 continue;
1482 }
1483
1484 /* Rename? */
1485 if (child->props.new_name) {
1486 if (!_rename_node(name, child->props.new_name, child->info.major,
1487 child->info.minor, &child->dtree->cookie,
1488 child->udev_flags)) {
1489 log_error("Failed to rename %s (%" PRIu32
1490 ":%" PRIu32 ") to %s", name, child->info.major,
1491 child->info.minor, child->props.new_name);
1492 return 0;
1493 }
1494 child->name = child->props.new_name;
1495 child->props.new_name = NULL;
1496 }
1497
1498 if (!child->info.inactive_table && !child->info.suspended)
1499 continue;
1500
1501 if (!_resume_node(child->name, child->info.major, child->info.minor,
1502 child->props.read_ahead, child->props.read_ahead_flags,
1503 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
1504 log_error("Unable to resume %s (%" PRIu32
1505 ":%" PRIu32 ")", child->name, child->info.major,
1506 child->info.minor);
1507 r = 0;
1508 continue;
1509 }
1510
1511 /* Update cached info */
1512 child->info = newinfo;
1513
1514 /* FIXME: trial version - to avoid use of unsynchronized thin_pool transaction_id */
1515 if (child->props.thin_pool_transaction_id &&
1516 !_check_thin_pool_transaction_id(child->name, child->info.major,
1517 child->info.minor,
1518 child->props.thin_pool_transaction_id)) {
1519 stack;
1520 if (!(dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len)))
1521 log_error("Failed to deactivate %s", child->name);
1522 r = 0;
1523 continue;
1524 }
1525 }
1526 }
1527
1528 handle = NULL;
1529
1530 return r;
1531 }
1532
1533 static int _create_node(struct dm_tree_node *dnode)
1534 {
1535 int r = 0;
1536 struct dm_task *dmt;
1537
1538 log_verbose("Creating %s", dnode->name);
1539
1540 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1541 log_error("Create dm_task creation failed for %s", dnode->name);
1542 return 0;
1543 }
1544
1545 if (!dm_task_set_name(dmt, dnode->name)) {
1546 log_error("Failed to set device name for %s", dnode->name);
1547 goto out;
1548 }
1549
1550 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1551 log_error("Failed to set uuid for %s", dnode->name);
1552 goto out;
1553 }
1554
1555 if (dnode->props.major &&
1556 (!dm_task_set_major(dmt, dnode->props.major) ||
1557 !dm_task_set_minor(dmt, dnode->props.minor))) {
1558 log_error("Failed to set device number for %s creation.", dnode->name);
1559 goto out;
1560 }
1561
1562 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1563 log_error("Failed to set read only flag for %s", dnode->name);
1564 goto out;
1565 }
1566
1567 if (!dm_task_no_open_count(dmt))
1568 log_error("Failed to disable open_count");
1569
1570 if ((r = dm_task_run(dmt)))
1571 r = dm_task_get_info(dmt, &dnode->info);
1572
1573 out:
1574 dm_task_destroy(dmt);
1575
1576 return r;
1577 }
1578
1579
1580 static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
1581 {
1582 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
1583 log_error("Failed to format %s device number for %s as dm "
1584 "target (%u,%u)",
1585 node->name, node->uuid, node->info.major, node->info.minor);
1586 return 0;
1587 }
1588
1589 return 1;
1590 }
1591
1592 /* simplify string emiting code */
1593 #define EMIT_PARAMS(p, str...)\
1594 do {\
1595 int w;\
1596 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1597 stack; /* Out of space */\
1598 return -1;\
1599 }\
1600 p += w;\
1601 } while (0)
1602
1603 /*
1604 * _emit_areas_line
1605 *
1606 * Returns: 1 on success, 0 on failure
1607 */
1608 static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
1609 struct load_segment *seg, char *params,
1610 size_t paramsize, int *pos)
1611 {
1612 struct seg_area *area;
1613 char devbuf[DM_FORMAT_DEV_BUFSIZE];
1614 unsigned first_time = 1;
1615 const char *logtype, *synctype;
1616 unsigned log_parm_count;
1617
1618 dm_list_iterate_items(area, &seg->areas) {
1619 switch (seg->type) {
1620 case SEG_REPLICATOR_DEV:
1621 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1622 return_0;
1623
1624 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1625 if (first_time)
1626 EMIT_PARAMS(*pos, " nolog 0");
1627 else {
1628 /* Remote devices */
1629 log_parm_count = (area->flags &
1630 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1631
1632 if (!area->slog) {
1633 devbuf[0] = 0; /* Only core log parameters */
1634 logtype = "core";
1635 } else {
1636 devbuf[0] = ' '; /* Extra space before device name */
1637 if (!_build_dev_string(devbuf + 1,
1638 sizeof(devbuf) - 1,
1639 area->slog))
1640 return_0;
1641 logtype = "disk";
1642 log_parm_count++; /* Extra sync log device name parameter */
1643 }
1644
1645 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1646 log_parm_count, devbuf, area->region_size);
1647
1648 synctype = (area->flags & DM_NOSYNC) ?
1649 " nosync" : (area->flags & DM_FORCESYNC) ?
1650 " sync" : NULL;
1651
1652 if (synctype)
1653 EMIT_PARAMS(*pos, "%s", synctype);
1654 }
1655 break;
1656 case SEG_RAID1:
1657 case SEG_RAID4:
1658 case SEG_RAID5_LA:
1659 case SEG_RAID5_RA:
1660 case SEG_RAID5_LS:
1661 case SEG_RAID5_RS:
1662 case SEG_RAID6_ZR:
1663 case SEG_RAID6_NR:
1664 case SEG_RAID6_NC:
1665 if (!area->dev_node) {
1666 EMIT_PARAMS(*pos, " -");
1667 break;
1668 }
1669 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1670 return_0;
1671
1672 EMIT_PARAMS(*pos, " %s", devbuf);
1673 break;
1674 default:
1675 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1676 return_0;
1677
1678 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1679 devbuf, area->offset);
1680 }
1681
1682 first_time = 0;
1683 }
1684
1685 return 1;
1686 }
1687
1688 static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1689 size_t paramsize, int *pos)
1690 {
1691 const struct load_segment *rlog_seg;
1692 struct replicator_site *rsite;
1693 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1694 unsigned parm_count;
1695
1696 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1697 return_0;
1698
1699 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1700 struct load_segment);
1701
1702 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1703 seg->rlog_type, rlogbuf, rlog_seg->size);
1704
1705 dm_list_iterate_items(rsite, &seg->rsites) {
1706 parm_count = (rsite->fall_behind_data
1707 || rsite->fall_behind_ios
1708 || rsite->async_timeout) ? 4 : 2;
1709
1710 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1711 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1712
1713 if (rsite->fall_behind_data)
1714 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1715 else if (rsite->fall_behind_ios)
1716 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1717 else if (rsite->async_timeout)
1718 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1719 }
1720
1721 return 1;
1722 }
1723
1724 /*
1725 * Returns: 1 on success, 0 on failure
1726 */
1727 static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1728 char *params, size_t paramsize)
1729 {
1730 int block_on_error = 0;
1731 int handle_errors = 0;
1732 int dm_log_userspace = 0;
1733 struct utsname uts;
1734 unsigned log_parm_count;
1735 int pos = 0, parts;
1736 char logbuf[DM_FORMAT_DEV_BUFSIZE];
1737 const char *logtype;
1738 unsigned kmaj = 0, kmin = 0, krel = 0;
1739
1740 if (uname(&uts) == -1) {
1741 log_error("Cannot read kernel release version.");
1742 return 0;
1743 }
1744
1745 /* Kernels with a major number of 2 always had 3 parts. */
1746 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1747 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1748 log_error("Wrong kernel release version %s.", uts.release);
1749 return 0;
1750 }
1751
1752 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1753 /*
1754 * Originally, block_on_error was an argument to the log
1755 * portion of the mirror CTR table. It was renamed to
1756 * "handle_errors" and now resides in the 'features'
1757 * section of the mirror CTR table (i.e. at the end).
1758 *
1759 * We can identify whether to use "block_on_error" or
1760 * "handle_errors" by the dm-mirror module's version
1761 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1762 */
1763 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
1764 handle_errors = 1;
1765 else
1766 block_on_error = 1;
1767 }
1768
1769 if (seg->clustered) {
1770 /* Cluster mirrors require a UUID */
1771 if (!seg->uuid)
1772 return_0;
1773
1774 /*
1775 * Cluster mirrors used to have their own log
1776 * types. Now they are accessed through the
1777 * userspace log type.
1778 *
1779 * The dm-log-userspace module was added to the
1780 * 2.6.31 kernel.
1781 */
1782 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
1783 dm_log_userspace = 1;
1784 }
1785
1786 /* Region size */
1787 log_parm_count = 1;
1788
1789 /* [no]sync, block_on_error etc. */
1790 log_parm_count += hweight32(seg->flags);
1791
1792 /* "handle_errors" is a feature arg now */
1793 if (handle_errors)
1794 log_parm_count--;
1795
1796 /* DM_CORELOG does not count in the param list */
1797 if (seg->flags & DM_CORELOG)
1798 log_parm_count--;
1799
1800 if (seg->clustered) {
1801 log_parm_count++; /* For UUID */
1802
1803 if (!dm_log_userspace)
1804 EMIT_PARAMS(pos, "clustered-");
1805 else
1806 /* For clustered-* type field inserted later */
1807 log_parm_count++;
1808 }
1809
1810 if (!seg->log)
1811 logtype = "core";
1812 else {
1813 logtype = "disk";
1814 log_parm_count++;
1815 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1816 return_0;
1817 }
1818
1819 if (dm_log_userspace)
1820 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1821 log_parm_count, seg->uuid, logtype);
1822 else
1823 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
1824
1825 if (seg->log)
1826 EMIT_PARAMS(pos, " %s", logbuf);
1827
1828 EMIT_PARAMS(pos, " %u", seg->region_size);
1829
1830 if (seg->clustered && !dm_log_userspace)
1831 EMIT_PARAMS(pos, " %s", seg->uuid);
1832
1833 if ((seg->flags & DM_NOSYNC))
1834 EMIT_PARAMS(pos, " nosync");
1835 else if ((seg->flags & DM_FORCESYNC))
1836 EMIT_PARAMS(pos, " sync");
1837
1838 if (block_on_error)
1839 EMIT_PARAMS(pos, " block_on_error");
1840
1841 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1842
1843 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1844 return_0;
1845
1846 if (handle_errors)
1847 EMIT_PARAMS(pos, " 1 handle_errors");
1848
1849 return 1;
1850 }
1851
1852 static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1853 uint32_t minor, struct load_segment *seg,
1854 uint64_t *seg_start, char *params,
1855 size_t paramsize)
1856 {
1857 uint32_t i, *tmp;
1858 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1859 int pos = 0;
1860
1861 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1862 param_count++;
1863
1864 if (seg->region_size)
1865 param_count += 2;
1866
1867 tmp = (uint32_t *)(&seg->rebuilds); /* rebuilds is 64-bit */
1868 param_count += 2 * hweight32(tmp[0]);
1869 param_count += 2 * hweight32(tmp[1]);
1870
1871 if ((seg->type == SEG_RAID1) && seg->stripe_size)
1872 log_error("WARNING: Ignoring RAID1 stripe size");
1873
1874 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
1875 param_count, seg->stripe_size);
1876
1877 if (seg->flags & DM_NOSYNC)
1878 EMIT_PARAMS(pos, " nosync");
1879 else if (seg->flags & DM_FORCESYNC)
1880 EMIT_PARAMS(pos, " sync");
1881
1882 if (seg->region_size)
1883 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
1884
1885 for (i = 0; i < (seg->area_count / 2); i++)
1886 if (seg->rebuilds & (1 << i))
1887 EMIT_PARAMS(pos, " rebuild %u", i);
1888
1889 /* Print number of metadata/data device pairs */
1890 EMIT_PARAMS(pos, " %u", seg->area_count/2);
1891
1892 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1893 return_0;
1894
1895 return 1;
1896 }
1897
1898 static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1899 uint32_t minor, struct load_segment *seg,
1900 uint64_t *seg_start, char *params,
1901 size_t paramsize)
1902 {
1903 int pos = 0;
1904 int r;
1905 int target_type_is_raid = 0;
1906 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
1907 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
1908
1909 switch(seg->type) {
1910 case SEG_ERROR:
1911 case SEG_ZERO:
1912 case SEG_LINEAR:
1913 break;
1914 case SEG_MIRRORED:
1915 /* Mirrors are pretty complicated - now in separate function */
1916 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
1917 if (!r)
1918 return_0;
1919 break;
1920 case SEG_REPLICATOR:
1921 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1922 &pos)) <= 0) {
1923 stack;
1924 return r;
1925 }
1926 break;
1927 case SEG_REPLICATOR_DEV:
1928 if (!seg->replicator || !_build_dev_string(originbuf,
1929 sizeof(originbuf),
1930 seg->replicator))
1931 return_0;
1932
1933 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1934 break;
1935 case SEG_SNAPSHOT:
1936 case SEG_SNAPSHOT_MERGE:
1937 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1938 return_0;
1939 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1940 return_0;
1941 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1942 seg->persistent ? 'P' : 'N', seg->chunk_size);
1943 break;
1944 case SEG_SNAPSHOT_ORIGIN:
1945 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1946 return_0;
1947 EMIT_PARAMS(pos, "%s", originbuf);
1948 break;
1949 case SEG_STRIPED:
1950 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
1951 break;
1952 case SEG_CRYPT:
1953 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
1954 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1955 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1956 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1957 seg->iv_offset : *seg_start);
1958 break;
1959 case SEG_RAID1:
1960 case SEG_RAID4:
1961 case SEG_RAID5_LA:
1962 case SEG_RAID5_RA:
1963 case SEG_RAID5_LS:
1964 case SEG_RAID5_RS:
1965 case SEG_RAID6_ZR:
1966 case SEG_RAID6_NR:
1967 case SEG_RAID6_NC:
1968 target_type_is_raid = 1;
1969 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
1970 params, paramsize);
1971 if (!r)
1972 return_0;
1973
1974 break;
1975 case SEG_THIN_POOL:
1976 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
1977 return_0;
1978 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1979 return_0;
1980 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
1981 seg->data_block_size, seg->low_water_mark_size,
1982 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "");
1983 break;
1984 case SEG_THIN:
1985 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1986 return_0;
1987 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
1988 break;
1989 }
1990
1991 switch(seg->type) {
1992 case SEG_ERROR:
1993 case SEG_REPLICATOR:
1994 case SEG_SNAPSHOT:
1995 case SEG_SNAPSHOT_ORIGIN:
1996 case SEG_SNAPSHOT_MERGE:
1997 case SEG_ZERO:
1998 case SEG_THIN_POOL:
1999 case SEG_THIN:
2000 break;
2001 case SEG_CRYPT:
2002 case SEG_LINEAR:
2003 case SEG_REPLICATOR_DEV:
2004 case SEG_STRIPED:
2005 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2006 stack;
2007 return r;
2008 }
2009 if (!params[0]) {
2010 log_error("No parameters supplied for %s target "
2011 "%u:%u.", dm_segtypes[seg->type].target,
2012 major, minor);
2013 return 0;
2014 }
2015 break;
2016 }
2017
2018 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2019 " %" PRIu64 " %s %s", major, minor,
2020 *seg_start, seg->size, target_type_is_raid ? "raid" :
2021 dm_segtypes[seg->type].target, params);
2022
2023 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2024 target_type_is_raid ? "raid" :
2025 dm_segtypes[seg->type].target, params))
2026 return_0;
2027
2028 *seg_start += seg->size;
2029
2030 return 1;
2031 }
2032
2033 #undef EMIT_PARAMS
2034
2035 static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2036 struct load_segment *seg, uint64_t *seg_start)
2037 {
2038 char *params;
2039 size_t paramsize = 4096;
2040 int ret;
2041
2042 do {
2043 if (!(params = dm_malloc(paramsize))) {
2044 log_error("Insufficient space for target parameters.");
2045 return 0;
2046 }
2047
2048 params[0] = '\0';
2049 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2050 params, paramsize);
2051 dm_free(params);
2052
2053 if (!ret)
2054 stack;
2055
2056 if (ret >= 0)
2057 return ret;
2058
2059 log_debug("Insufficient space in params[%" PRIsize_t
2060 "] for target parameters.", paramsize);
2061
2062 paramsize *= 2;
2063 } while (paramsize < MAX_TARGET_PARAMSIZE);
2064
2065 log_error("Target parameter size too big. Aborting.");
2066 return 0;
2067 }
2068
2069 static int _load_node(struct dm_tree_node *dnode)
2070 {
2071 int r = 0;
2072 struct dm_task *dmt;
2073 struct load_segment *seg;
2074 uint64_t seg_start = 0, existing_table_size;
2075
2076 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2077 dnode->info.major, dnode->info.minor);
2078
2079 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2080 log_error("Reload dm_task creation failed for %s", dnode->name);
2081 return 0;
2082 }
2083
2084 if (!dm_task_set_major(dmt, dnode->info.major) ||
2085 !dm_task_set_minor(dmt, dnode->info.minor)) {
2086 log_error("Failed to set device number for %s reload.", dnode->name);
2087 goto out;
2088 }
2089
2090 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2091 log_error("Failed to set read only flag for %s", dnode->name);
2092 goto out;
2093 }
2094
2095 if (!dm_task_no_open_count(dmt))
2096 log_error("Failed to disable open_count");
2097
2098 dm_list_iterate_items(seg, &dnode->props.segs)
2099 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2100 seg, &seg_start))
2101 goto_out;
2102
2103 if (!dm_task_suppress_identical_reload(dmt))
2104 log_error("Failed to suppress reload of identical tables.");
2105
2106 if ((r = dm_task_run(dmt))) {
2107 r = dm_task_get_info(dmt, &dnode->info);
2108 if (r && !dnode->info.inactive_table)
2109 log_verbose("Suppressed %s identical table reload.",
2110 dnode->name);
2111
2112 existing_table_size = dm_task_get_existing_table_size(dmt);
2113 if ((dnode->props.size_changed =
2114 (existing_table_size == seg_start) ? 0 : 1)) {
2115 log_debug("Table size changed from %" PRIu64 " to %"
2116 PRIu64 " for %s", existing_table_size,
2117 seg_start, dnode->name);
2118 /*
2119 * Kernel usually skips size validation on zero-length devices
2120 * now so no need to preload them.
2121 */
2122 /* FIXME In which kernel version did this begin? */
2123 if (!existing_table_size && dnode->props.delay_resume_if_new)
2124 dnode->props.size_changed = 0;
2125 }
2126 }
2127
2128 dnode->props.segment_count = 0;
2129
2130 out:
2131 dm_task_destroy(dmt);
2132
2133 return r;
2134 }
2135
2136 int dm_tree_preload_children(struct dm_tree_node *dnode,
2137 const char *uuid_prefix,
2138 size_t uuid_prefix_len)
2139 {
2140 int r = 1;
2141 void *handle = NULL;
2142 struct dm_tree_node *child;
2143 struct dm_info newinfo;
2144 int update_devs_flag = 0;
2145
2146 /* Preload children first */
2147 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2148 /* Skip existing non-device-mapper devices */
2149 if (!child->info.exists && child->info.major)
2150 continue;
2151
2152 /* Ignore if it doesn't belong to this VG */
2153 if (child->info.exists &&
2154 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
2155 continue;
2156
2157 if (dm_tree_node_num_children(child, 0))
2158 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2159 return_0;
2160
2161 /* FIXME Cope if name exists with no uuid? */
2162 if (!child->info.exists) {
2163 if (!_create_node(child)) {
2164 stack;
2165 return 0;
2166 }
2167 }
2168
2169 if (!child->info.inactive_table && child->props.segment_count) {
2170 if (!_load_node(child)) {
2171 stack;
2172 return 0;
2173 }
2174 }
2175
2176 /* Propagate device size change change */
2177 if (child->props.size_changed)
2178 dnode->props.size_changed = 1;
2179
2180 /* Resume device immediately if it has parents and its size changed */
2181 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
2182 continue;
2183
2184 if (!child->info.inactive_table && !child->info.suspended)
2185 continue;
2186
2187 if (!_resume_node(child->name, child->info.major, child->info.minor,
2188 child->props.read_ahead, child->props.read_ahead_flags,
2189 &newinfo, &child->dtree->cookie, child->udev_flags,
2190 child->info.suspended)) {
2191 log_error("Unable to resume %s (%" PRIu32
2192 ":%" PRIu32 ")", child->name, child->info.major,
2193 child->info.minor);
2194 r = 0;
2195 continue;
2196 }
2197
2198 /* Update cached info */
2199 child->info = newinfo;
2200
2201 /*
2202 * Prepare for immediate synchronization with udev and flush all stacked
2203 * dev node operations if requested by immediate_dev_node property. But
2204 * finish processing current level in the tree first.
2205 */
2206 if (child->props.immediate_dev_node)
2207 update_devs_flag = 1;
2208
2209 /* FIXME: trial version - to avoid use of unsynchronized thin_pool transaction_id */
2210 if (child->props.thin_pool_transaction_id &&
2211 !_check_thin_pool_transaction_id(child->name, child->info.major,
2212 child->info.minor,
2213 child->props.thin_pool_transaction_id)) {
2214 stack;
2215 if (!(dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len)))
2216 log_error("Failed to deactivate %s", child->name);
2217 r = 0;
2218 continue;
2219 }
2220 }
2221
2222 handle = NULL;
2223
2224 if (update_devs_flag) {
2225 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2226 stack;
2227 dm_tree_set_cookie(dnode, 0);
2228 }
2229
2230 return r;
2231 }
2232
2233 /*
2234 * Returns 1 if unsure.
2235 */
2236 int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
2237 const char *uuid_prefix,
2238 size_t uuid_prefix_len)
2239 {
2240 void *handle = NULL;
2241 struct dm_tree_node *child = dnode;
2242 const char *uuid;
2243
2244 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2245 if (!(uuid = dm_tree_node_get_uuid(child))) {
2246 log_error("Failed to get uuid for dtree node.");
2247 return 1;
2248 }
2249
2250 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
2251 return 1;
2252
2253 if (dm_tree_node_num_children(child, 0))
2254 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
2255 }
2256
2257 return 0;
2258 }
2259
2260 /*
2261 * Target functions
2262 */
2263 static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
2264 {
2265 struct load_segment *seg;
2266
2267 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2268 log_error("dtree node segment allocation failed");
2269 return NULL;
2270 }
2271
2272 seg->type = type;
2273 seg->size = size;
2274 seg->area_count = 0;
2275 dm_list_init(&seg->areas);
2276 seg->stripe_size = 0;
2277 seg->persistent = 0;
2278 seg->chunk_size = 0;
2279 seg->cow = NULL;
2280 seg->origin = NULL;
2281 seg->merge = NULL;
2282
2283 dm_list_add(&dnode->props.segs, &seg->list);
2284 dnode->props.segment_count++;
2285
2286 return seg;
2287 }
2288
2289 int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
2290 uint64_t size,
2291 const char *origin_uuid)
2292 {
2293 struct load_segment *seg;
2294 struct dm_tree_node *origin_node;
2295
2296 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2297 return_0;
2298
2299 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
2300 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2301 return 0;
2302 }
2303
2304 seg->origin = origin_node;
2305 if (!_link_tree_nodes(dnode, origin_node))
2306 return_0;
2307
2308 /* Resume snapshot origins after new snapshots */
2309 dnode->activation_priority = 1;
2310
2311 return 1;
2312 }
2313
2314 static int _add_snapshot_target(struct dm_tree_node *node,
2315 uint64_t size,
2316 const char *origin_uuid,
2317 const char *cow_uuid,
2318 const char *merge_uuid,
2319 int persistent,
2320 uint32_t chunk_size)
2321 {
2322 struct load_segment *seg;
2323 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2324 unsigned seg_type;
2325
2326 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
2327
2328 if (!(seg = _add_segment(node, seg_type, size)))
2329 return_0;
2330
2331 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
2332 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2333 return 0;
2334 }
2335
2336 seg->origin = origin_node;
2337 if (!_link_tree_nodes(node, origin_node))
2338 return_0;
2339
2340 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
2341 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
2342 return 0;
2343 }
2344
2345 seg->cow = cow_node;
2346 if (!_link_tree_nodes(node, cow_node))
2347 return_0;
2348
2349 seg->persistent = persistent ? 1 : 0;
2350 seg->chunk_size = chunk_size;
2351
2352 if (merge_uuid) {
2353 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2354 /* not a pure error, merging snapshot may have been deactivated */
2355 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2356 } else {
2357 seg->merge = merge_node;
2358 /* must not link merging snapshot, would undermine activation_priority below */
2359 }
2360
2361 /* Resume snapshot-merge (acting origin) after other snapshots */
2362 node->activation_priority = 1;
2363 if (seg->merge) {
2364 /* Resume merging snapshot after snapshot-merge */
2365 seg->merge->activation_priority = 2;
2366 }
2367 }
2368
2369 return 1;
2370 }
2371
2372
2373 int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2374 uint64_t size,
2375 const char *origin_uuid,
2376 const char *cow_uuid,
2377 int persistent,
2378 uint32_t chunk_size)
2379 {
2380 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2381 NULL, persistent, chunk_size);
2382 }
2383
2384 int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2385 uint64_t size,
2386 const char *origin_uuid,
2387 const char *cow_uuid,
2388 const char *merge_uuid,
2389 uint32_t chunk_size)
2390 {
2391 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2392 merge_uuid, 1, chunk_size);
2393 }
2394
2395 int dm_tree_node_add_error_target(struct dm_tree_node *node,
2396 uint64_t size)
2397 {
2398 if (!_add_segment(node, SEG_ERROR, size))
2399 return_0;
2400
2401 return 1;
2402 }
2403
2404 int dm_tree_node_add_zero_target(struct dm_tree_node *node,
2405 uint64_t size)
2406 {
2407 if (!_add_segment(node, SEG_ZERO, size))
2408 return_0;
2409
2410 return 1;
2411 }
2412
2413 int dm_tree_node_add_linear_target(struct dm_tree_node *node,
2414 uint64_t size)
2415 {
2416 if (!_add_segment(node, SEG_LINEAR, size))
2417 return_0;
2418
2419 return 1;
2420 }
2421
2422 int dm_tree_node_add_striped_target(struct dm_tree_node *node,
2423 uint64_t size,
2424 uint32_t stripe_size)
2425 {
2426 struct load_segment *seg;
2427
2428 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2429 return_0;
2430
2431 seg->stripe_size = stripe_size;
2432
2433 return 1;
2434 }
2435
2436 int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2437 uint64_t size,
2438 const char *cipher,
2439 const char *chainmode,
2440 const char *iv,
2441 uint64_t iv_offset,
2442 const char *key)
2443 {
2444 struct load_segment *seg;
2445
2446 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2447 return_0;
2448
2449 seg->cipher = cipher;
2450 seg->chainmode = chainmode;
2451 seg->iv = iv;
2452 seg->iv_offset = iv_offset;
2453 seg->key = key;
2454
2455 return 1;
2456 }
2457
2458 int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
2459 uint32_t region_size,
2460 unsigned clustered,
2461 const char *log_uuid,
2462 unsigned area_count,
2463 uint32_t flags)
2464 {
2465 struct dm_tree_node *log_node = NULL;
2466 struct load_segment *seg;
2467
2468 if (!node->props.segment_count) {
2469 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2470 return 0;
2471 }
2472
2473 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2474
2475 if (log_uuid) {
2476 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2477 log_error("log uuid pool_strdup failed");
2478 return 0;
2479 }
2480 if ((flags & DM_CORELOG))
2481 /* For pvmove: immediate resume (for size validation) isn't needed. */
2482 node->props.delay_resume_if_new = 1;
2483 else {
2484 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2485 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2486 return 0;
2487 }
2488
2489 if (clustered)
2490 log_node->props.immediate_dev_node = 1;
2491
2492 /* The kernel validates the size of disk logs. */
2493 /* FIXME Propagate to any devices below */
2494 log_node->props.delay_resume_if_new = 0;
2495
2496 if (!_link_tree_nodes(node, log_node))
2497 return_0;
2498 }
2499 }
2500
2501 seg->log = log_node;
2502 seg->region_size = region_size;
2503 seg->clustered = clustered;
2504 seg->mirror_area_count = area_count;
2505 seg->flags = flags;
2506
2507 return 1;
2508 }
2509
2510 int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
2511 uint64_t size)
2512 {
2513 if (!_add_segment(node, SEG_MIRRORED, size))
2514 return_0;
2515
2516 return 1;
2517 }
2518
2519 int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2520 uint64_t size,
2521 const char *raid_type,
2522 uint32_t region_size,
2523 uint32_t stripe_size,
2524 uint64_t rebuilds,
2525 uint64_t reserved2)
2526 {
2527 int i;
2528 struct load_segment *seg = NULL;
2529
2530 for (i = 0; dm_segtypes[i].target && !seg; i++)
2531 if (!strcmp(raid_type, dm_segtypes[i].target))
2532 if (!(seg = _add_segment(node,
2533 dm_segtypes[i].type, size)))
2534 return_0;
2535
2536 if (!seg)
2537 return_0;
2538
2539 seg->region_size = region_size;
2540 seg->stripe_size = stripe_size;
2541 seg->area_count = 0;
2542 seg->rebuilds = rebuilds;
2543
2544 return 1;
2545 }
2546
2547 int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2548 uint64_t size,
2549 const char *rlog_uuid,
2550 const char *rlog_type,
2551 unsigned rsite_index,
2552 dm_replicator_mode_t mode,
2553 uint32_t async_timeout,
2554 uint64_t fall_behind_data,
2555 uint32_t fall_behind_ios)
2556 {
2557 struct load_segment *rseg;
2558 struct replicator_site *rsite;
2559
2560 /* Local site0 - adds replicator segment and links rlog device */
2561 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2562 if (node->props.segment_count) {
2563 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2564 return 0;
2565 }
2566
2567 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2568 return_0;
2569
2570 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2571 log_error("Missing replicator log uuid %s.", rlog_uuid);
2572 return 0;
2573 }
2574
2575 if (!_link_tree_nodes(node, rseg->log))
2576 return_0;
2577
2578 if (strcmp(rlog_type, "ringbuffer") != 0) {
2579 log_error("Unsupported replicator log type %s.", rlog_type);
2580 return 0;
2581 }
2582
2583 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2584 return_0;
2585
2586 dm_list_init(&rseg->rsites);
2587 rseg->rdevice_count = 0;
2588 node->activation_priority = 1;
2589 }
2590
2591 /* Add site to segment */
2592 if (mode == DM_REPLICATOR_SYNC
2593 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2594 log_error("Async parameters passed for synchronnous replicator.");
2595 return 0;
2596 }
2597
2598 if (node->props.segment_count != 1) {
2599 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2600 return 0;
2601 }
2602
2603 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2604 if (rseg->type != SEG_REPLICATOR) {
2605 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2606 dm_segtypes[rseg->type].target);
2607 return 0;
2608 }
2609
2610 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2611 log_error("Failed to allocate remote site segment.");
2612 return 0;
2613 }
2614
2615 dm_list_add(&rseg->rsites, &rsite->list);
2616 rseg->rsite_count++;
2617
2618 rsite->mode = mode;
2619 rsite->async_timeout = async_timeout;
2620 rsite->fall_behind_data = fall_behind_data;
2621 rsite->fall_behind_ios = fall_behind_ios;
2622 rsite->rsite_index = rsite_index;
2623
2624 return 1;
2625 }
2626
2627 /* Appends device node to Replicator */
2628 int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2629 uint64_t size,
2630 const char *replicator_uuid,
2631 uint64_t rdevice_index,
2632 const char *rdev_uuid,
2633 unsigned rsite_index,
2634 const char *slog_uuid,
2635 uint32_t slog_flags,
2636 uint32_t slog_region_size)
2637 {
2638 struct seg_area *area;
2639 struct load_segment *rseg;
2640 struct load_segment *rep_seg;
2641
2642 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2643 /* Site index for local target */
2644 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2645 return_0;
2646
2647 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2648 log_error("Missing replicator uuid %s.", replicator_uuid);
2649 return 0;
2650 }
2651
2652 /* Local slink0 for replicator must be always initialized first */
2653 if (rseg->replicator->props.segment_count != 1) {
2654 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2655 return 0;
2656 }
2657
2658 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2659 if (rep_seg->type != SEG_REPLICATOR) {
2660 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2661 dm_segtypes[rep_seg->type].target);
2662 return 0;
2663 }
2664 rep_seg->rdevice_count++;
2665
2666 if (!_link_tree_nodes(node, rseg->replicator))
2667 return_0;
2668
2669 rseg->rdevice_index = rdevice_index;
2670 } else {
2671 /* Local slink0 for replicator must be always initialized first */
2672 if (node->props.segment_count != 1) {
2673 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2674 return 0;
2675 }
2676
2677 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2678 if (rseg->type != SEG_REPLICATOR_DEV) {
2679 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2680 dm_segtypes[rseg->type].target);
2681 return 0;
2682 }
2683 }
2684
2685 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2686 log_error("Unspecified sync log uuid.");
2687 return 0;
2688 }
2689
2690 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2691 return_0;
2692
2693 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2694
2695 if (!(slog_flags & DM_CORELOG)) {
2696 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2697 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2698 return 0;
2699 }
2700
2701 if (!_link_tree_nodes(node, area->slog))
2702 return_0;
2703 }
2704
2705 area->flags = slog_flags;
2706 area->region_size = slog_region_size;
2707 area->rsite_index = rsite_index;
2708
2709 return 1;
2710 }
2711
2712 int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2713 uint64_t size,
2714 uint64_t transaction_id,
2715 const char *pool_uuid,
2716 const char *metadata_uuid,
2717 uint32_t data_block_size,
2718 uint64_t low_water_mark_size,
2719 unsigned skip_block_zeroing)
2720 {
2721 struct load_segment *seg;
2722
2723 if (data_block_size < THIN_MIN_DATA_SIZE) {
2724 log_error("Data block size %d is lower then "
2725 QUOTE(THIN_MIN_DATA_SIZE) " sectors.",
2726 data_block_size);
2727 return 0;
2728 }
2729
2730 if (data_block_size > THIN_MAX_DATA_SIZE) {
2731 log_error("Data block size %d is higher then "
2732 QUOTE(THIN_MAX_DATA_SIZE) " sectors.",
2733 data_block_size);
2734 return 0;
2735 }
2736
2737 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2738 return_0;
2739
2740 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2741 log_error("Missing metadata uuid %s.", metadata_uuid);
2742 return 0;
2743 }
2744
2745 if (!_link_tree_nodes(node, seg->metadata))
2746 return_0;
2747
2748 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2749 log_error("Missing pool uuid %s.", pool_uuid);
2750 return 0;
2751 }
2752
2753 if (!_link_tree_nodes(node, seg->pool))
2754 return_0;
2755
2756 node->props.thin_pool_transaction_id = transaction_id; // compare on resume
2757 seg->low_water_mark_size = low_water_mark_size;
2758 seg->data_block_size = data_block_size;
2759 seg->skip_block_zeroing = skip_block_zeroing;
2760
2761 return 1;
2762 }
2763
2764 int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2765 uint64_t size,
2766 const char *thin_pool_uuid,
2767 uint32_t device_id)
2768 {
2769 struct load_segment *seg;
2770
2771 if (device_id > THIN_MAX_DEVICE_ID) {
2772 log_error("Device id %d is higher then " QUOTE(THIN_MAX_DEVICE_ID) ".",
2773 device_id);
2774 return 0;
2775 }
2776
2777 if (!(seg = _add_segment(node, SEG_THIN, size)))
2778 return_0;
2779
2780 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2781 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2782 return 0;
2783 }
2784
2785 if (!_link_tree_nodes(node, seg->pool))
2786 return_0;
2787
2788 seg->device_id = device_id;
2789
2790 return 1;
2791 }
2792
2793 static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
2794 {
2795 struct seg_area *area;
2796
2797 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
2798 log_error("Failed to allocate target segment area.");
2799 return 0;
2800 }
2801
2802 area->dev_node = dev_node;
2803 area->offset = offset;
2804
2805 dm_list_add(&seg->areas, &area->list);
2806 seg->area_count++;
2807
2808 return 1;
2809 }
2810
2811 int dm_tree_node_add_target_area(struct dm_tree_node *node,
2812 const char *dev_name,
2813 const char *uuid,
2814 uint64_t offset)
2815 {
2816 struct load_segment *seg;
2817 struct stat info;
2818 struct dm_tree_node *dev_node;
2819
2820 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
2821 log_error("dm_tree_node_add_target_area called without device");
2822 return 0;
2823 }
2824
2825 if (uuid) {
2826 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
2827 log_error("Couldn't find area uuid %s.", uuid);
2828 return 0;
2829 }
2830 if (!_link_tree_nodes(node, dev_node))
2831 return_0;
2832 } else {
2833 if (stat(dev_name, &info) < 0) {
2834 log_error("Device %s not found.", dev_name);
2835 return 0;
2836 }
2837
2838 if (!S_ISBLK(info.st_mode)) {
2839 log_error("Device %s is not a block device.", dev_name);
2840 return 0;
2841 }
2842
2843 /* FIXME Check correct macro use */
2844 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2845 MINOR(info.st_rdev), 0)))
2846 return_0;
2847 }
2848
2849 if (!node->props.segment_count) {
2850 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2851 return 0;
2852 }
2853
2854 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2855
2856 if (!_add_area(node, seg, dev_node, offset))
2857 return_0;
2858
2859 return 1;
2860 }
2861
2862 int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
2863 {
2864 struct load_segment *seg;
2865
2866 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2867
2868 switch (seg->type) {
2869 case SEG_RAID1:
2870 case SEG_RAID4:
2871 case SEG_RAID5_LA:
2872 case SEG_RAID5_RA:
2873 case SEG_RAID5_LS:
2874 case SEG_RAID5_RS:
2875 case SEG_RAID6_ZR:
2876 case SEG_RAID6_NR:
2877 case SEG_RAID6_NC:
2878 break;
2879 default:
2880 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
2881 return 0;
2882 }
2883
2884 if (!_add_area(node, seg, NULL, offset))
2885 return_0;
2886
2887 return 1;
2888 }
2889
2890 void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2891 {
2892 node->dtree->cookie = cookie;
2893 }
2894
2895 uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2896 {
2897 return node->dtree->cookie;
2898 }
This page took 0.167084 seconds and 6 git commands to generate.