]> sourceware.org Git - lvm2.git/blob - libdm/libdm-deptree.c
Thin error messages clenaup and some indent
[lvm2.git] / libdm / libdm-deptree.c
1 /*
2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15 #include "dmlib.h"
16 #include "libdm-targets.h"
17 #include "libdm-common.h"
18 #include "kdev_t.h"
19 #include "dm-ioctl.h"
20
21 #include <stdarg.h>
22 #include <sys/param.h>
23 #include <sys/utsname.h>
24
25 #define MAX_TARGET_PARAMSIZE 500000
26
27 /* FIXME Fix interface so this is used only by LVM */
28 #define UUID_PREFIX "LVM-"
29
30 #define REPLICATOR_LOCAL_SITE 0
31
32 /* Supported segment types */
33 enum {
34 SEG_CRYPT,
35 SEG_ERROR,
36 SEG_LINEAR,
37 SEG_MIRRORED,
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
42 SEG_SNAPSHOT_MERGE,
43 SEG_STRIPED,
44 SEG_ZERO,
45 SEG_THIN_POOL,
46 SEG_THIN,
47 SEG_RAID1,
48 SEG_RAID4,
49 SEG_RAID5_LA,
50 SEG_RAID5_RA,
51 SEG_RAID5_LS,
52 SEG_RAID5_RS,
53 SEG_RAID6_ZR,
54 SEG_RAID6_NR,
55 SEG_RAID6_NC,
56 SEG_LAST,
57 };
58
59 /* FIXME Add crypt and multipath support */
60
61 struct {
62 unsigned type;
63 const char *target;
64 } dm_segtypes[] = {
65 { SEG_CRYPT, "crypt" },
66 { SEG_ERROR, "error" },
67 { SEG_LINEAR, "linear" },
68 { SEG_MIRRORED, "mirror" },
69 { SEG_REPLICATOR, "replicator" },
70 { SEG_REPLICATOR_DEV, "replicator-dev" },
71 { SEG_SNAPSHOT, "snapshot" },
72 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
73 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
74 { SEG_STRIPED, "striped" },
75 { SEG_ZERO, "zero"},
76 { SEG_THIN_POOL, "thin-pool"},
77 { SEG_THIN, "thin"},
78 { SEG_RAID1, "raid1"},
79 { SEG_RAID4, "raid4"},
80 { SEG_RAID5_LA, "raid5_la"},
81 { SEG_RAID5_RA, "raid5_ra"},
82 { SEG_RAID5_LS, "raid5_ls"},
83 { SEG_RAID5_RS, "raid5_rs"},
84 { SEG_RAID6_ZR, "raid6_zr"},
85 { SEG_RAID6_NR, "raid6_nr"},
86 { SEG_RAID6_NC, "raid6_nc"},
87
88 /*
89 *WARNING: Since 'raid' target overloads this 1:1 mapping table
90 * for search do not add new enum elements past them!
91 */
92 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
93 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
94 { SEG_LAST, NULL },
95 };
96
97 /* Some segment types have a list of areas of other devices attached */
98 struct seg_area {
99 struct dm_list list;
100
101 struct dm_tree_node *dev_node;
102
103 uint64_t offset;
104
105 unsigned rsite_index; /* Replicator site index */
106 struct dm_tree_node *slog; /* Replicator sync log node */
107 uint64_t region_size; /* Replicator sync log size */
108 uint32_t flags; /* Replicator sync log flags */
109 };
110
111 struct thin_message {
112 struct dm_list list;
113 struct dm_thin_message message;
114 int expected_errno;
115 };
116
117 /* Replicator-log has a list of sites */
118 /* FIXME: maybe move to seg_area too? */
119 struct replicator_site {
120 struct dm_list list;
121
122 unsigned rsite_index;
123 dm_replicator_mode_t mode;
124 uint32_t async_timeout;
125 uint32_t fall_behind_ios;
126 uint64_t fall_behind_data;
127 };
128
129 /* Per-segment properties */
130 struct load_segment {
131 struct dm_list list;
132
133 unsigned type;
134
135 uint64_t size;
136
137 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
138 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
139
140 uint32_t stripe_size; /* Striped + raid */
141
142 int persistent; /* Snapshot */
143 uint32_t chunk_size; /* Snapshot */
144 struct dm_tree_node *cow; /* Snapshot */
145 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
146 struct dm_tree_node *merge; /* Snapshot */
147
148 struct dm_tree_node *log; /* Mirror + Replicator */
149 uint32_t region_size; /* Mirror + raid */
150 unsigned clustered; /* Mirror */
151 unsigned mirror_area_count; /* Mirror */
152 uint32_t flags; /* Mirror log */
153 char *uuid; /* Clustered mirror log */
154
155 const char *cipher; /* Crypt */
156 const char *chainmode; /* Crypt */
157 const char *iv; /* Crypt */
158 uint64_t iv_offset; /* Crypt */
159 const char *key; /* Crypt */
160
161 const char *rlog_type; /* Replicator */
162 struct dm_list rsites; /* Replicator */
163 unsigned rsite_count; /* Replicator */
164 unsigned rdevice_count; /* Replicator */
165 struct dm_tree_node *replicator;/* Replicator-dev */
166 uint64_t rdevice_index; /* Replicator-dev */
167
168 uint64_t rebuilds; /* raid */
169
170 struct dm_tree_node *metadata; /* Thin_pool */
171 struct dm_tree_node *pool; /* Thin_pool, Thin */
172 struct dm_list thin_messages; /* Thin_pool */
173 uint64_t low_water_mark; /* Thin_pool */
174 uint32_t data_block_size; /* Thin_pool */
175 unsigned skip_block_zeroing; /* Thin_pool */
176 uint32_t device_id; /* Thin */
177
178 };
179
180 /* Per-device properties */
181 struct load_properties {
182 int read_only;
183 uint32_t major;
184 uint32_t minor;
185
186 uint32_t read_ahead;
187 uint32_t read_ahead_flags;
188
189 uint64_t thin_pool_transaction_id; /* Thin_pool */
190
191 unsigned segment_count;
192 unsigned size_changed;
193 struct dm_list segs;
194
195 const char *new_name;
196
197 /* If immediate_dev_node is set to 1, try to create the dev node
198 * as soon as possible (e.g. in preload stage even during traversal
199 * and processing of dm tree). This will also flush all stacked dev
200 * node operations, synchronizing with udev.
201 */
202 unsigned immediate_dev_node;
203
204 /*
205 * If the device size changed from zero and this is set,
206 * don't resume the device immediately, even if the device
207 * has parents. This works provided the parents do not
208 * validate the device size and is required by pvmove to
209 * avoid starting the mirror resync operation too early.
210 */
211 unsigned delay_resume_if_new;
212 };
213
214 /* Two of these used to join two nodes with uses and used_by. */
215 struct dm_tree_link {
216 struct dm_list list;
217 struct dm_tree_node *node;
218 };
219
220 struct dm_tree_node {
221 struct dm_tree *dtree;
222
223 const char *name;
224 const char *uuid;
225 struct dm_info info;
226
227 struct dm_list uses; /* Nodes this node uses */
228 struct dm_list used_by; /* Nodes that use this node */
229
230 int activation_priority; /* 0 gets activated first */
231
232 uint16_t udev_flags; /* Udev control flags */
233
234 void *context; /* External supplied context */
235
236 struct load_properties props; /* For creation/table (re)load */
237
238 /*
239 * If presuspend of child node is needed
240 * Note: only direct child is allowed
241 */
242 struct dm_tree_node *presuspend_node;
243 };
244
245 struct dm_tree {
246 struct dm_pool *mem;
247 struct dm_hash_table *devs;
248 struct dm_hash_table *uuids;
249 struct dm_tree_node root;
250 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
251 int no_flush; /* 1 sets noflush (mirrors/multipath) */
252 int retry_remove; /* 1 retries remove if not successful */
253 uint32_t cookie;
254 };
255
256 struct dm_tree *dm_tree_create(void)
257 {
258 struct dm_pool *dmem;
259 struct dm_tree *dtree;
260
261 if (!(dmem = dm_pool_create("dtree", 1024)) ||
262 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
263 log_error("Failed to allocate dtree.");
264 if (dmem)
265 dm_pool_destroy(dmem);
266 return NULL;
267 }
268
269 dtree->root.dtree = dtree;
270 dm_list_init(&dtree->root.uses);
271 dm_list_init(&dtree->root.used_by);
272 dtree->skip_lockfs = 0;
273 dtree->no_flush = 0;
274 dtree->mem = dmem;
275
276 if (!(dtree->devs = dm_hash_create(8))) {
277 log_error("dtree hash creation failed");
278 dm_pool_destroy(dtree->mem);
279 return NULL;
280 }
281
282 if (!(dtree->uuids = dm_hash_create(32))) {
283 log_error("dtree uuid hash creation failed");
284 dm_hash_destroy(dtree->devs);
285 dm_pool_destroy(dtree->mem);
286 return NULL;
287 }
288
289 return dtree;
290 }
291
292 void dm_tree_free(struct dm_tree *dtree)
293 {
294 if (!dtree)
295 return;
296
297 dm_hash_destroy(dtree->uuids);
298 dm_hash_destroy(dtree->devs);
299 dm_pool_destroy(dtree->mem);
300 }
301
302 static int _nodes_are_linked(const struct dm_tree_node *parent,
303 const struct dm_tree_node *child)
304 {
305 struct dm_tree_link *dlink;
306
307 dm_list_iterate_items(dlink, &parent->uses)
308 if (dlink->node == child)
309 return 1;
310
311 return 0;
312 }
313
314 static int _link(struct dm_list *list, struct dm_tree_node *node)
315 {
316 struct dm_tree_link *dlink;
317
318 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
319 log_error("dtree link allocation failed");
320 return 0;
321 }
322
323 dlink->node = node;
324 dm_list_add(list, &dlink->list);
325
326 return 1;
327 }
328
329 static int _link_nodes(struct dm_tree_node *parent,
330 struct dm_tree_node *child)
331 {
332 if (_nodes_are_linked(parent, child))
333 return 1;
334
335 if (!_link(&parent->uses, child))
336 return 0;
337
338 if (!_link(&child->used_by, parent))
339 return 0;
340
341 return 1;
342 }
343
344 static void _unlink(struct dm_list *list, struct dm_tree_node *node)
345 {
346 struct dm_tree_link *dlink;
347
348 dm_list_iterate_items(dlink, list)
349 if (dlink->node == node) {
350 dm_list_del(&dlink->list);
351 break;
352 }
353 }
354
355 static void _unlink_nodes(struct dm_tree_node *parent,
356 struct dm_tree_node *child)
357 {
358 if (!_nodes_are_linked(parent, child))
359 return;
360
361 _unlink(&parent->uses, child);
362 _unlink(&child->used_by, parent);
363 }
364
365 static int _add_to_toplevel(struct dm_tree_node *node)
366 {
367 return _link_nodes(&node->dtree->root, node);
368 }
369
370 static void _remove_from_toplevel(struct dm_tree_node *node)
371 {
372 _unlink_nodes(&node->dtree->root, node);
373 }
374
375 static int _add_to_bottomlevel(struct dm_tree_node *node)
376 {
377 return _link_nodes(node, &node->dtree->root);
378 }
379
380 static void _remove_from_bottomlevel(struct dm_tree_node *node)
381 {
382 _unlink_nodes(node, &node->dtree->root);
383 }
384
385 static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
386 {
387 /* Don't link to root node if child already has a parent */
388 if (parent == &parent->dtree->root) {
389 if (dm_tree_node_num_children(child, 1))
390 return 1;
391 } else
392 _remove_from_toplevel(child);
393
394 if (child == &child->dtree->root) {
395 if (dm_tree_node_num_children(parent, 0))
396 return 1;
397 } else
398 _remove_from_bottomlevel(parent);
399
400 return _link_nodes(parent, child);
401 }
402
403 static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
404 const char *name,
405 const char *uuid,
406 struct dm_info *info,
407 void *context,
408 uint16_t udev_flags)
409 {
410 struct dm_tree_node *node;
411 uint64_t dev;
412
413 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
414 log_error("_create_dm_tree_node alloc failed");
415 return NULL;
416 }
417
418 node->dtree = dtree;
419
420 node->name = name;
421 node->uuid = uuid;
422 node->info = *info;
423 node->context = context;
424 node->udev_flags = udev_flags;
425 node->activation_priority = 0;
426
427 dm_list_init(&node->uses);
428 dm_list_init(&node->used_by);
429 dm_list_init(&node->props.segs);
430
431 dev = MKDEV(info->major, info->minor);
432
433 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
434 sizeof(dev), node)) {
435 log_error("dtree node hash insertion failed");
436 dm_pool_free(dtree->mem, node);
437 return NULL;
438 }
439
440 if (uuid && *uuid &&
441 !dm_hash_insert(dtree->uuids, uuid, node)) {
442 log_error("dtree uuid hash insertion failed");
443 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
444 sizeof(dev));
445 dm_pool_free(dtree->mem, node);
446 return NULL;
447 }
448
449 return node;
450 }
451
452 static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
453 uint32_t major, uint32_t minor)
454 {
455 uint64_t dev = MKDEV(major, minor);
456
457 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
458 sizeof(dev));
459 }
460
461 static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
462 const char *uuid)
463 {
464 struct dm_tree_node *node;
465
466 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
467 return node;
468
469 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
470 return NULL;
471
472 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
473 }
474
475 static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
476 const char **name, const char **uuid,
477 struct dm_info *info, struct dm_deps **deps)
478 {
479 memset(info, 0, sizeof(*info));
480
481 if (!dm_is_dm_major(major)) {
482 *name = "";
483 *uuid = "";
484 *deps = NULL;
485 info->major = major;
486 info->minor = minor;
487 info->exists = 0;
488 info->live_table = 0;
489 info->inactive_table = 0;
490 info->read_only = 0;
491 return 1;
492 }
493
494 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
495 log_error("deps dm_task creation failed");
496 return 0;
497 }
498
499 if (!dm_task_set_major(*dmt, major)) {
500 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
501 major, minor);
502 goto failed;
503 }
504
505 if (!dm_task_set_minor(*dmt, minor)) {
506 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
507 major, minor);
508 goto failed;
509 }
510
511 if (!dm_task_run(*dmt)) {
512 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
513 major, minor);
514 goto failed;
515 }
516
517 if (!dm_task_get_info(*dmt, info)) {
518 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
519 major, minor);
520 goto failed;
521 }
522
523 if (!info->exists) {
524 *name = "";
525 *uuid = "";
526 *deps = NULL;
527 } else {
528 if (info->major != major) {
529 log_error("Inconsistent dtree major number: %u != %u",
530 major, info->major);
531 goto failed;
532 }
533 if (info->minor != minor) {
534 log_error("Inconsistent dtree minor number: %u != %u",
535 minor, info->minor);
536 goto failed;
537 }
538 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
539 log_error("name pool_strdup failed");
540 goto failed;
541 }
542 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
543 log_error("uuid pool_strdup failed");
544 goto failed;
545 }
546 *deps = dm_task_get_deps(*dmt);
547 }
548
549 return 1;
550
551 failed:
552 dm_task_destroy(*dmt);
553 return 0;
554 }
555
556 static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
557 struct dm_tree_node *parent,
558 uint32_t major, uint32_t minor,
559 uint16_t udev_flags)
560 {
561 struct dm_task *dmt = NULL;
562 struct dm_info info;
563 struct dm_deps *deps = NULL;
564 const char *name = NULL;
565 const char *uuid = NULL;
566 struct dm_tree_node *node = NULL;
567 uint32_t i;
568 int new = 0;
569
570 /* Already in tree? */
571 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
572 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
573 return_NULL;
574
575 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
576 NULL, udev_flags)))
577 goto_out;
578 new = 1;
579 }
580
581 if (!_link_tree_nodes(parent, node)) {
582 node = NULL;
583 goto_out;
584 }
585
586 /* If node was already in tree, no need to recurse. */
587 if (!new)
588 goto out;
589
590 /* Can't recurse if not a mapped device or there are no dependencies */
591 if (!node->info.exists || !deps->count) {
592 if (!_add_to_bottomlevel(node)) {
593 stack;
594 node = NULL;
595 }
596 goto out;
597 }
598
599 /* Add dependencies to tree */
600 for (i = 0; i < deps->count; i++)
601 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
602 MINOR(deps->device[i]), udev_flags)) {
603 node = NULL;
604 goto_out;
605 }
606
607 out:
608 if (dmt)
609 dm_task_destroy(dmt);
610
611 return node;
612 }
613
614 static int _node_clear_table(struct dm_tree_node *dnode)
615 {
616 struct dm_task *dmt;
617 struct dm_info *info;
618 const char *name;
619 int r;
620
621 if (!(info = &dnode->info)) {
622 log_error("_node_clear_table failed: missing info");
623 return 0;
624 }
625
626 if (!(name = dm_tree_node_get_name(dnode))) {
627 log_error("_node_clear_table failed: missing name");
628 return 0;
629 }
630
631 /* Is there a table? */
632 if (!info->exists || !info->inactive_table)
633 return 1;
634
635 // FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
636
637 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
638 name, info->major, info->minor);
639
640 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
641 log_error("Table clear dm_task creation failed for %s", name);
642 return 0;
643 }
644
645 if (!dm_task_set_major(dmt, info->major) ||
646 !dm_task_set_minor(dmt, info->minor)) {
647 log_error("Failed to set device number for %s table clear", name);
648 dm_task_destroy(dmt);
649 return 0;
650 }
651
652 r = dm_task_run(dmt);
653
654 if (!dm_task_get_info(dmt, info)) {
655 log_error("_node_clear_table failed: info missing after running task for %s", name);
656 r = 0;
657 }
658
659 dm_task_destroy(dmt);
660
661 return r;
662 }
663
664 struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
665 const char *name,
666 const char *uuid,
667 uint32_t major, uint32_t minor,
668 int read_only,
669 int clear_inactive,
670 void *context)
671 {
672 struct dm_tree_node *dnode;
673 struct dm_info info;
674 const char *name2;
675 const char *uuid2;
676
677 /* Do we need to add node to tree? */
678 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
679 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
680 log_error("name pool_strdup failed");
681 return NULL;
682 }
683 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
684 log_error("uuid pool_strdup failed");
685 return NULL;
686 }
687
688 info.major = 0;
689 info.minor = 0;
690 info.exists = 0;
691 info.live_table = 0;
692 info.inactive_table = 0;
693 info.read_only = 0;
694
695 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
696 context, 0)))
697 return_NULL;
698
699 /* Attach to root node until a table is supplied */
700 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
701 return_NULL;
702
703 dnode->props.major = major;
704 dnode->props.minor = minor;
705 dnode->props.new_name = NULL;
706 dnode->props.size_changed = 0;
707 } else if (strcmp(name, dnode->name)) {
708 /* Do we need to rename node? */
709 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
710 log_error("name pool_strdup failed");
711 return 0;
712 }
713 }
714
715 dnode->props.read_only = read_only ? 1 : 0;
716 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
717 dnode->props.read_ahead_flags = 0;
718
719 if (clear_inactive && !_node_clear_table(dnode))
720 return_NULL;
721
722 dnode->context = context;
723 dnode->udev_flags = 0;
724
725 return dnode;
726 }
727
728 struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
729 const char *name,
730 const char *uuid,
731 uint32_t major,
732 uint32_t minor,
733 int read_only,
734 int clear_inactive,
735 void *context,
736 uint16_t udev_flags)
737 {
738 struct dm_tree_node *node;
739
740 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
741 clear_inactive, context)))
742 node->udev_flags = udev_flags;
743
744 return node;
745 }
746
747 void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
748
749 {
750 struct dm_info *dinfo = &dnode->info;
751
752 if (udev_flags != dnode->udev_flags)
753 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
754 ") udev_flags from 0x%x to 0x%x",
755 dnode->name, dinfo->major, dinfo->minor,
756 dnode->udev_flags, udev_flags);
757 dnode->udev_flags = udev_flags;
758 }
759
760 void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
761 uint32_t read_ahead,
762 uint32_t read_ahead_flags)
763 {
764 dnode->props.read_ahead = read_ahead;
765 dnode->props.read_ahead_flags = read_ahead_flags;
766 }
767
768 void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
769 struct dm_tree_node *presuspend_node)
770 {
771 node->presuspend_node = presuspend_node;
772 }
773
774 int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
775 {
776 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
777 }
778
779 int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
780 uint32_t minor, uint16_t udev_flags)
781 {
782 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
783 }
784
785 const char *dm_tree_node_get_name(const struct dm_tree_node *node)
786 {
787 return node->info.exists ? node->name : "";
788 }
789
790 const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
791 {
792 return node->info.exists ? node->uuid : "";
793 }
794
795 const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
796 {
797 return &node->info;
798 }
799
800 void *dm_tree_node_get_context(const struct dm_tree_node *node)
801 {
802 return node->context;
803 }
804
805 int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
806 {
807 return dnode->props.size_changed;
808 }
809
810 int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
811 {
812 if (inverted) {
813 if (_nodes_are_linked(&node->dtree->root, node))
814 return 0;
815 return dm_list_size(&node->used_by);
816 }
817
818 if (_nodes_are_linked(node, &node->dtree->root))
819 return 0;
820
821 return dm_list_size(&node->uses);
822 }
823
824 /*
825 * Returns 1 if no prefix supplied
826 */
827 static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
828 {
829 if (!uuid_prefix)
830 return 1;
831
832 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
833 return 1;
834
835 /* Handle transition: active device uuids might be missing the prefix */
836 if (uuid_prefix_len <= 4)
837 return 0;
838
839 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
840 return 0;
841
842 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
843 return 0;
844
845 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
846 return 1;
847
848 return 0;
849 }
850
851 /*
852 * Returns 1 if no children.
853 */
854 static int _children_suspended(struct dm_tree_node *node,
855 uint32_t inverted,
856 const char *uuid_prefix,
857 size_t uuid_prefix_len)
858 {
859 struct dm_list *list;
860 struct dm_tree_link *dlink;
861 const struct dm_info *dinfo;
862 const char *uuid;
863
864 if (inverted) {
865 if (_nodes_are_linked(&node->dtree->root, node))
866 return 1;
867 list = &node->used_by;
868 } else {
869 if (_nodes_are_linked(node, &node->dtree->root))
870 return 1;
871 list = &node->uses;
872 }
873
874 dm_list_iterate_items(dlink, list) {
875 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
876 stack;
877 continue;
878 }
879
880 /* Ignore if it doesn't belong to this VG */
881 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
882 continue;
883
884 /* Ignore if parent node wants to presuspend this node */
885 if (dlink->node->presuspend_node == node)
886 continue;
887
888 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
889 stack; /* FIXME Is this normal? */
890 return 0;
891 }
892
893 if (!dinfo->suspended)
894 return 0;
895 }
896
897 return 1;
898 }
899
900 /*
901 * Set major and minor to zero for root of tree.
902 */
903 struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
904 uint32_t major,
905 uint32_t minor)
906 {
907 if (!major && !minor)
908 return &dtree->root;
909
910 return _find_dm_tree_node(dtree, major, minor);
911 }
912
913 /*
914 * Set uuid to NULL for root of tree.
915 */
916 struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
917 const char *uuid)
918 {
919 if (!uuid || !*uuid)
920 return &dtree->root;
921
922 return _find_dm_tree_node_by_uuid(dtree, uuid);
923 }
924
925 /*
926 * First time set *handle to NULL.
927 * Set inverted to invert the tree.
928 */
929 struct dm_tree_node *dm_tree_next_child(void **handle,
930 const struct dm_tree_node *parent,
931 uint32_t inverted)
932 {
933 struct dm_list **dlink = (struct dm_list **) handle;
934 const struct dm_list *use_list;
935
936 if (inverted)
937 use_list = &parent->used_by;
938 else
939 use_list = &parent->uses;
940
941 if (!*dlink)
942 *dlink = dm_list_first(use_list);
943 else
944 *dlink = dm_list_next(use_list, *dlink);
945
946 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
947 }
948
949 /*
950 * Deactivate a device with its dependencies if the uuid prefix matches.
951 */
952 static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
953 struct dm_info *info)
954 {
955 struct dm_task *dmt;
956 int r;
957
958 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
959 log_error("_info_by_dev: dm_task creation failed");
960 return 0;
961 }
962
963 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
964 log_error("_info_by_dev: Failed to set device number");
965 dm_task_destroy(dmt);
966 return 0;
967 }
968
969 if (!with_open_count && !dm_task_no_open_count(dmt))
970 log_error("Failed to disable open_count");
971
972 if ((r = dm_task_run(dmt)))
973 r = dm_task_get_info(dmt, info);
974
975 dm_task_destroy(dmt);
976
977 return r;
978 }
979
980 static int _check_device_not_in_use(const char *name, struct dm_info *info)
981 {
982 if (!info->exists)
983 return 1;
984
985 /* If sysfs is not used, use open_count information only. */
986 if (!*dm_sysfs_dir()) {
987 if (info->open_count) {
988 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
989 name, info->major, info->minor);
990 return 0;
991 }
992
993 return 1;
994 }
995
996 if (dm_device_has_holders(info->major, info->minor)) {
997 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
998 "by another device.", name, info->major, info->minor);
999 return 0;
1000 }
1001
1002 if (dm_device_has_mounted_fs(info->major, info->minor)) {
1003 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
1004 "a filesystem in use.", name, info->major, info->minor);
1005 return 0;
1006 }
1007
1008 return 1;
1009 }
1010
1011 /* Check if all parent nodes of given node have open_count == 0 */
1012 static int _node_has_closed_parents(struct dm_tree_node *node,
1013 const char *uuid_prefix,
1014 size_t uuid_prefix_len)
1015 {
1016 struct dm_tree_link *dlink;
1017 const struct dm_info *dinfo;
1018 struct dm_info info;
1019 const char *uuid;
1020
1021 /* Iterate through parents of this node */
1022 dm_list_iterate_items(dlink, &node->used_by) {
1023 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1024 stack;
1025 continue;
1026 }
1027
1028 /* Ignore if it doesn't belong to this VG */
1029 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1030 continue;
1031
1032 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1033 stack; /* FIXME Is this normal? */
1034 return 0;
1035 }
1036
1037 /* Refresh open_count */
1038 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1039 !info.exists)
1040 continue;
1041
1042 if (info.open_count) {
1043 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1044 dinfo->major, dinfo->minor, info.open_count);
1045 return 0;
1046 }
1047 }
1048
1049 return 1;
1050 }
1051
1052 static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
1053 uint32_t *cookie, uint16_t udev_flags, int retry)
1054 {
1055 struct dm_task *dmt;
1056 int r = 0;
1057
1058 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1059
1060 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1061 log_error("Deactivation dm_task creation failed for %s", name);
1062 return 0;
1063 }
1064
1065 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1066 log_error("Failed to set device number for %s deactivation", name);
1067 goto out;
1068 }
1069
1070 if (!dm_task_no_open_count(dmt))
1071 log_error("Failed to disable open_count");
1072
1073 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1074 goto out;
1075
1076
1077 if (retry)
1078 dm_task_retry_remove(dmt);
1079
1080 r = dm_task_run(dmt);
1081
1082 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1083 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
1084 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
1085
1086 /* FIXME Remove node from tree or mark invalid? */
1087
1088 out:
1089 dm_task_destroy(dmt);
1090
1091 return r;
1092 }
1093
1094 static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
1095 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
1096 {
1097 struct dm_task *dmt;
1098 int r = 0;
1099
1100 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1101
1102 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1103 log_error("Rename dm_task creation failed for %s", old_name);
1104 return 0;
1105 }
1106
1107 if (!dm_task_set_name(dmt, old_name)) {
1108 log_error("Failed to set name for %s rename.", old_name);
1109 goto out;
1110 }
1111
1112 if (!dm_task_set_newname(dmt, new_name))
1113 goto_out;
1114
1115 if (!dm_task_no_open_count(dmt))
1116 log_error("Failed to disable open_count");
1117
1118 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1119 goto out;
1120
1121 r = dm_task_run(dmt);
1122
1123 out:
1124 dm_task_destroy(dmt);
1125
1126 return r;
1127 }
1128
1129 /* FIXME Merge with _suspend_node? */
1130 static int _resume_node(const char *name, uint32_t major, uint32_t minor,
1131 uint32_t read_ahead, uint32_t read_ahead_flags,
1132 struct dm_info *newinfo, uint32_t *cookie,
1133 uint16_t udev_flags, int already_suspended)
1134 {
1135 struct dm_task *dmt;
1136 int r = 0;
1137
1138 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1139
1140 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
1141 log_debug("Suspend dm_task creation failed for %s.", name);
1142 return 0;
1143 }
1144
1145 /* FIXME Kernel should fill in name on return instead */
1146 if (!dm_task_set_name(dmt, name)) {
1147 log_debug("Failed to set device name for %s resumption.", name);
1148 goto out;
1149 }
1150
1151 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1152 log_error("Failed to set device number for %s resumption.", name);
1153 goto out;
1154 }
1155
1156 if (!dm_task_no_open_count(dmt))
1157 log_error("Failed to disable open_count");
1158
1159 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1160 log_error("Failed to set read ahead");
1161
1162 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1163 goto_out;
1164
1165 if (!(r = dm_task_run(dmt)))
1166 goto_out;
1167
1168 if (already_suspended)
1169 dec_suspended();
1170
1171 if (!(r = dm_task_get_info(dmt, newinfo)))
1172 stack;
1173
1174 out:
1175 dm_task_destroy(dmt);
1176
1177 return r;
1178 }
1179
1180 static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
1181 int skip_lockfs, int no_flush, struct dm_info *newinfo)
1182 {
1183 struct dm_task *dmt;
1184 int r;
1185
1186 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1187 name, major, minor,
1188 skip_lockfs ? "" : " with filesystem sync",
1189 no_flush ? "" : " with device flush");
1190
1191 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1192 log_error("Suspend dm_task creation failed for %s", name);
1193 return 0;
1194 }
1195
1196 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1197 log_error("Failed to set device number for %s suspension.", name);
1198 dm_task_destroy(dmt);
1199 return 0;
1200 }
1201
1202 if (!dm_task_no_open_count(dmt))
1203 log_error("Failed to disable open_count");
1204
1205 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1206 log_error("Failed to set skip_lockfs flag.");
1207
1208 if (no_flush && !dm_task_no_flush(dmt))
1209 log_error("Failed to set no_flush flag.");
1210
1211 if ((r = dm_task_run(dmt))) {
1212 inc_suspended();
1213 r = dm_task_get_info(dmt, newinfo);
1214 }
1215
1216 dm_task_destroy(dmt);
1217
1218 return r;
1219 }
1220
1221 static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
1222 {
1223 struct dm_task *dmt;
1224 int r = 0;
1225 uint64_t start, length;
1226 char *type = NULL;
1227 char *params = NULL;
1228
1229 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1230 return_0;
1231
1232 if (!dm_task_set_major(dmt, dnode->info.major) ||
1233 !dm_task_set_minor(dmt, dnode->info.minor)) {
1234 log_error("Failed to set major minor.");
1235 goto out;
1236 }
1237
1238 if (!dm_task_run(dmt))
1239 goto_out;
1240
1241 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1242
1243 if (type && (strcmp(type, "thin-pool") != 0)) {
1244 log_error("Expected thin-pool target for %d:%d and got %s.",
1245 dnode->info.major, dnode->info.minor, type);
1246 goto out;
1247 }
1248
1249 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
1250 log_error("Failed to parse transaction_id from %s.", params);
1251 goto out;
1252 }
1253
1254 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
1255
1256 r = 1;
1257 out:
1258 dm_task_destroy(dmt);
1259
1260 return r;
1261 }
1262
1263 static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1264 {
1265 struct dm_task *dmt;
1266 struct dm_thin_message *m = &tm->message;
1267 char buf[64];
1268 int r;
1269
1270 switch (m->type) {
1271 case DM_THIN_MESSAGE_CREATE_SNAP:
1272 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1273 m->u.m_create_snap.device_id,
1274 m->u.m_create_snap.origin_id);
1275 break;
1276 case DM_THIN_MESSAGE_CREATE_THIN:
1277 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1278 m->u.m_create_thin.device_id);
1279 break;
1280 case DM_THIN_MESSAGE_DELETE:
1281 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1282 m->u.m_delete.device_id);
1283 break;
1284 case DM_THIN_MESSAGE_TRIM:
1285 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1286 m->u.m_trim.device_id,
1287 m->u.m_trim.new_size);
1288 break;
1289 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1290 r = dm_snprintf(buf, sizeof(buf),
1291 "set_transaction_id %" PRIu64 " %" PRIu64,
1292 m->u.m_set_transaction_id.current_id,
1293 m->u.m_set_transaction_id.new_id);
1294 break;
1295 }
1296
1297 if (!r) {
1298 log_error("Failed to prepare message.");
1299 return 0;
1300 }
1301
1302 r = 0;
1303
1304 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1305 return_0;
1306
1307 if (!dm_task_set_major(dmt, dnode->info.major) ||
1308 !dm_task_set_minor(dmt, dnode->info.minor)) {
1309 log_error("Failed to set message major minor.");
1310 goto out;
1311 }
1312
1313 if (!dm_task_set_message(dmt, buf))
1314 goto_out;
1315
1316 /* Internal functionality of dm_task */
1317 dmt->expected_errno = tm->expected_errno;
1318
1319 if (!dm_task_run(dmt))
1320 goto_out;
1321
1322 r = 1;
1323 out:
1324 dm_task_destroy(dmt);
1325
1326 return r;
1327 }
1328
1329 static int _node_send_messages(struct dm_tree_node *dnode,
1330 const char *uuid_prefix,
1331 size_t uuid_prefix_len)
1332 {
1333 struct load_segment *seg;
1334 struct thin_message *tmsg;
1335 uint64_t trans_id;
1336 const char *uuid;
1337
1338 if ((dnode == &dnode->dtree->root) || /* root has props.segs uninitialized */
1339 !dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
1340 return 1;
1341
1342 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
1343 if (seg->type != SEG_THIN_POOL)
1344 return 1;
1345
1346 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1347 return_0;
1348
1349 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1350 log_debug("UUID \"%s\" does not match.", uuid);
1351 return 1;
1352 }
1353
1354 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
1355 return_0;
1356
1357 if (trans_id == dnode->props.thin_pool_transaction_id)
1358 return 1; /* In sync - skip messages */
1359
1360 if (trans_id != (dnode->props.thin_pool_transaction_id - 1)) {
1361 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
1362 trans_id, dnode->props.thin_pool_transaction_id - 1);
1363 return 0; /* Nothing to send */
1364 }
1365
1366 dm_list_iterate_items(tmsg, &seg->thin_messages)
1367 if (!(_thin_pool_node_message(dnode, tmsg)))
1368 return_0;
1369
1370 return 1;
1371 }
1372
1373 /*
1374 * FIXME Don't attempt to deactivate known internal dependencies.
1375 */
1376 static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1377 const char *uuid_prefix,
1378 size_t uuid_prefix_len,
1379 unsigned level)
1380 {
1381 int r = 1;
1382 void *handle = NULL;
1383 struct dm_tree_node *child = dnode;
1384 struct dm_info info;
1385 const struct dm_info *dinfo;
1386 const char *name;
1387 const char *uuid;
1388
1389 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1390 if (!(dinfo = dm_tree_node_get_info(child))) {
1391 stack;
1392 continue;
1393 }
1394
1395 if (!(name = dm_tree_node_get_name(child))) {
1396 stack;
1397 continue;
1398 }
1399
1400 if (!(uuid = dm_tree_node_get_uuid(child))) {
1401 stack;
1402 continue;
1403 }
1404
1405 /* Ignore if it doesn't belong to this VG */
1406 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1407 continue;
1408
1409 /* Refresh open_count */
1410 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1411 !info.exists)
1412 continue;
1413
1414 if (info.open_count) {
1415 /* Skip internal non-toplevel opened nodes */
1416 if (level)
1417 continue;
1418
1419 /* When retry is not allowed, error */
1420 if (!child->dtree->retry_remove) {
1421 log_error("Unable to deactivate open %s (%" PRIu32
1422 ":%" PRIu32 ")", name, info.major, info.minor);
1423 r = 0;
1424 continue;
1425 }
1426
1427 /* Check toplevel node for holders/mounted fs */
1428 if (!_check_device_not_in_use(name, &info)) {
1429 stack;
1430 r = 0;
1431 continue;
1432 }
1433 /* Go on with retry */
1434 }
1435
1436 /* Also checking open_count in parent nodes of presuspend_node */
1437 if ((child->presuspend_node &&
1438 !_node_has_closed_parents(child->presuspend_node,
1439 uuid_prefix, uuid_prefix_len))) {
1440 /* Only report error from (likely non-internal) dependency at top level */
1441 if (!level) {
1442 log_error("Unable to deactivate open %s (%" PRIu32
1443 ":%" PRIu32 ")", name, info.major,
1444 info.minor);
1445 r = 0;
1446 }
1447 continue;
1448 }
1449
1450 /* Suspend child node first if requested */
1451 if (child->presuspend_node &&
1452 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1453 continue;
1454
1455 if (!_deactivate_node(name, info.major, info.minor,
1456 &child->dtree->cookie, child->udev_flags,
1457 (level == 0) ? child->dtree->retry_remove : 0)) {
1458 log_error("Unable to deactivate %s (%" PRIu32
1459 ":%" PRIu32 ")", name, info.major,
1460 info.minor);
1461 r = 0;
1462 continue;
1463 } else if (info.suspended)
1464 dec_suspended();
1465
1466 if (dm_tree_node_num_children(child, 0)) {
1467 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
1468 return_0;
1469 }
1470 }
1471
1472 return r;
1473 }
1474
1475 int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1476 const char *uuid_prefix,
1477 size_t uuid_prefix_len)
1478 {
1479 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1480 }
1481
1482 void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1483 {
1484 dnode->dtree->skip_lockfs = 1;
1485 }
1486
1487 void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1488 {
1489 dnode->dtree->no_flush = 1;
1490 }
1491
1492 void dm_tree_retry_remove(struct dm_tree_node *dnode)
1493 {
1494 dnode->dtree->retry_remove = 1;
1495 }
1496
1497 int dm_tree_suspend_children(struct dm_tree_node *dnode,
1498 const char *uuid_prefix,
1499 size_t uuid_prefix_len)
1500 {
1501 int r = 1;
1502 void *handle = NULL;
1503 struct dm_tree_node *child = dnode;
1504 struct dm_info info, newinfo;
1505 const struct dm_info *dinfo;
1506 const char *name;
1507 const char *uuid;
1508
1509 /* Suspend nodes at this level of the tree */
1510 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1511 if (!(dinfo = dm_tree_node_get_info(child))) {
1512 stack;
1513 continue;
1514 }
1515
1516 if (!(name = dm_tree_node_get_name(child))) {
1517 stack;
1518 continue;
1519 }
1520
1521 if (!(uuid = dm_tree_node_get_uuid(child))) {
1522 stack;
1523 continue;
1524 }
1525
1526 /* Ignore if it doesn't belong to this VG */
1527 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1528 continue;
1529
1530 /* Ensure immediate parents are already suspended */
1531 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1532 continue;
1533
1534 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
1535 !info.exists || info.suspended)
1536 continue;
1537
1538 if (!_suspend_node(name, info.major, info.minor,
1539 child->dtree->skip_lockfs,
1540 child->dtree->no_flush, &newinfo)) {
1541 log_error("Unable to suspend %s (%" PRIu32
1542 ":%" PRIu32 ")", name, info.major,
1543 info.minor);
1544 r = 0;
1545 continue;
1546 }
1547
1548 /* Update cached info */
1549 child->info = newinfo;
1550 }
1551
1552 /* Then suspend any child nodes */
1553 handle = NULL;
1554
1555 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1556 if (!(uuid = dm_tree_node_get_uuid(child))) {
1557 stack;
1558 continue;
1559 }
1560
1561 /* Ignore if it doesn't belong to this VG */
1562 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1563 continue;
1564
1565 if (dm_tree_node_num_children(child, 0))
1566 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1567 return_0;
1568 }
1569
1570 return r;
1571 }
1572
1573 int dm_tree_activate_children(struct dm_tree_node *dnode,
1574 const char *uuid_prefix,
1575 size_t uuid_prefix_len)
1576 {
1577 int r = 1;
1578 void *handle = NULL;
1579 struct dm_tree_node *child = dnode;
1580 struct dm_info newinfo;
1581 const char *name;
1582 const char *uuid;
1583 int priority;
1584
1585 /* Activate children first */
1586 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1587 if (!(uuid = dm_tree_node_get_uuid(child))) {
1588 stack;
1589 continue;
1590 }
1591
1592 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1593 continue;
1594
1595 if (dm_tree_node_num_children(child, 0))
1596 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1597 return_0;
1598 }
1599
1600 handle = NULL;
1601
1602 for (priority = 0; priority < 3; priority++) {
1603 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1604 if (priority != child->activation_priority)
1605 continue;
1606
1607 if (!(uuid = dm_tree_node_get_uuid(child))) {
1608 stack;
1609 continue;
1610 }
1611
1612 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1613 continue;
1614
1615 if (!(name = dm_tree_node_get_name(child))) {
1616 stack;
1617 continue;
1618 }
1619
1620 /* Rename? */
1621 if (child->props.new_name) {
1622 if (!_rename_node(name, child->props.new_name, child->info.major,
1623 child->info.minor, &child->dtree->cookie,
1624 child->udev_flags)) {
1625 log_error("Failed to rename %s (%" PRIu32
1626 ":%" PRIu32 ") to %s", name, child->info.major,
1627 child->info.minor, child->props.new_name);
1628 return 0;
1629 }
1630 child->name = child->props.new_name;
1631 child->props.new_name = NULL;
1632 }
1633
1634 if (!child->info.inactive_table && !child->info.suspended)
1635 continue;
1636
1637 if (!_resume_node(child->name, child->info.major, child->info.minor,
1638 child->props.read_ahead, child->props.read_ahead_flags,
1639 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
1640 log_error("Unable to resume %s (%" PRIu32
1641 ":%" PRIu32 ")", child->name, child->info.major,
1642 child->info.minor);
1643 r = 0;
1644 continue;
1645 }
1646
1647 /* Update cached info */
1648 child->info = newinfo;
1649 }
1650 }
1651
1652 handle = NULL;
1653
1654 return r;
1655 }
1656
1657 static int _create_node(struct dm_tree_node *dnode)
1658 {
1659 int r = 0;
1660 struct dm_task *dmt;
1661
1662 log_verbose("Creating %s", dnode->name);
1663
1664 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1665 log_error("Create dm_task creation failed for %s", dnode->name);
1666 return 0;
1667 }
1668
1669 if (!dm_task_set_name(dmt, dnode->name)) {
1670 log_error("Failed to set device name for %s", dnode->name);
1671 goto out;
1672 }
1673
1674 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1675 log_error("Failed to set uuid for %s", dnode->name);
1676 goto out;
1677 }
1678
1679 if (dnode->props.major &&
1680 (!dm_task_set_major(dmt, dnode->props.major) ||
1681 !dm_task_set_minor(dmt, dnode->props.minor))) {
1682 log_error("Failed to set device number for %s creation.", dnode->name);
1683 goto out;
1684 }
1685
1686 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1687 log_error("Failed to set read only flag for %s", dnode->name);
1688 goto out;
1689 }
1690
1691 if (!dm_task_no_open_count(dmt))
1692 log_error("Failed to disable open_count");
1693
1694 if ((r = dm_task_run(dmt)))
1695 r = dm_task_get_info(dmt, &dnode->info);
1696
1697 out:
1698 dm_task_destroy(dmt);
1699
1700 return r;
1701 }
1702
1703
1704 static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
1705 {
1706 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
1707 log_error("Failed to format %s device number for %s as dm "
1708 "target (%u,%u)",
1709 node->name, node->uuid, node->info.major, node->info.minor);
1710 return 0;
1711 }
1712
1713 return 1;
1714 }
1715
1716 /* simplify string emiting code */
1717 #define EMIT_PARAMS(p, str...)\
1718 do {\
1719 int w;\
1720 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1721 stack; /* Out of space */\
1722 return -1;\
1723 }\
1724 p += w;\
1725 } while (0)
1726
1727 /*
1728 * _emit_areas_line
1729 *
1730 * Returns: 1 on success, 0 on failure
1731 */
1732 static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
1733 struct load_segment *seg, char *params,
1734 size_t paramsize, int *pos)
1735 {
1736 struct seg_area *area;
1737 char devbuf[DM_FORMAT_DEV_BUFSIZE];
1738 unsigned first_time = 1;
1739 const char *logtype, *synctype;
1740 unsigned log_parm_count;
1741
1742 dm_list_iterate_items(area, &seg->areas) {
1743 switch (seg->type) {
1744 case SEG_REPLICATOR_DEV:
1745 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1746 return_0;
1747
1748 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1749 if (first_time)
1750 EMIT_PARAMS(*pos, " nolog 0");
1751 else {
1752 /* Remote devices */
1753 log_parm_count = (area->flags &
1754 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1755
1756 if (!area->slog) {
1757 devbuf[0] = 0; /* Only core log parameters */
1758 logtype = "core";
1759 } else {
1760 devbuf[0] = ' '; /* Extra space before device name */
1761 if (!_build_dev_string(devbuf + 1,
1762 sizeof(devbuf) - 1,
1763 area->slog))
1764 return_0;
1765 logtype = "disk";
1766 log_parm_count++; /* Extra sync log device name parameter */
1767 }
1768
1769 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1770 log_parm_count, devbuf, area->region_size);
1771
1772 synctype = (area->flags & DM_NOSYNC) ?
1773 " nosync" : (area->flags & DM_FORCESYNC) ?
1774 " sync" : NULL;
1775
1776 if (synctype)
1777 EMIT_PARAMS(*pos, "%s", synctype);
1778 }
1779 break;
1780 case SEG_RAID1:
1781 case SEG_RAID4:
1782 case SEG_RAID5_LA:
1783 case SEG_RAID5_RA:
1784 case SEG_RAID5_LS:
1785 case SEG_RAID5_RS:
1786 case SEG_RAID6_ZR:
1787 case SEG_RAID6_NR:
1788 case SEG_RAID6_NC:
1789 if (!area->dev_node) {
1790 EMIT_PARAMS(*pos, " -");
1791 break;
1792 }
1793 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1794 return_0;
1795
1796 EMIT_PARAMS(*pos, " %s", devbuf);
1797 break;
1798 default:
1799 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1800 return_0;
1801
1802 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1803 devbuf, area->offset);
1804 }
1805
1806 first_time = 0;
1807 }
1808
1809 return 1;
1810 }
1811
1812 static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1813 size_t paramsize, int *pos)
1814 {
1815 const struct load_segment *rlog_seg;
1816 struct replicator_site *rsite;
1817 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1818 unsigned parm_count;
1819
1820 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1821 return_0;
1822
1823 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1824 struct load_segment);
1825
1826 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1827 seg->rlog_type, rlogbuf, rlog_seg->size);
1828
1829 dm_list_iterate_items(rsite, &seg->rsites) {
1830 parm_count = (rsite->fall_behind_data
1831 || rsite->fall_behind_ios
1832 || rsite->async_timeout) ? 4 : 2;
1833
1834 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1835 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1836
1837 if (rsite->fall_behind_data)
1838 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1839 else if (rsite->fall_behind_ios)
1840 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1841 else if (rsite->async_timeout)
1842 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1843 }
1844
1845 return 1;
1846 }
1847
1848 /*
1849 * Returns: 1 on success, 0 on failure
1850 */
1851 static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1852 char *params, size_t paramsize)
1853 {
1854 int block_on_error = 0;
1855 int handle_errors = 0;
1856 int dm_log_userspace = 0;
1857 struct utsname uts;
1858 unsigned log_parm_count;
1859 int pos = 0, parts;
1860 char logbuf[DM_FORMAT_DEV_BUFSIZE];
1861 const char *logtype;
1862 unsigned kmaj = 0, kmin = 0, krel = 0;
1863
1864 if (uname(&uts) == -1) {
1865 log_error("Cannot read kernel release version.");
1866 return 0;
1867 }
1868
1869 /* Kernels with a major number of 2 always had 3 parts. */
1870 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1871 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1872 log_error("Wrong kernel release version %s.", uts.release);
1873 return 0;
1874 }
1875
1876 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1877 /*
1878 * Originally, block_on_error was an argument to the log
1879 * portion of the mirror CTR table. It was renamed to
1880 * "handle_errors" and now resides in the 'features'
1881 * section of the mirror CTR table (i.e. at the end).
1882 *
1883 * We can identify whether to use "block_on_error" or
1884 * "handle_errors" by the dm-mirror module's version
1885 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1886 */
1887 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
1888 handle_errors = 1;
1889 else
1890 block_on_error = 1;
1891 }
1892
1893 if (seg->clustered) {
1894 /* Cluster mirrors require a UUID */
1895 if (!seg->uuid)
1896 return_0;
1897
1898 /*
1899 * Cluster mirrors used to have their own log
1900 * types. Now they are accessed through the
1901 * userspace log type.
1902 *
1903 * The dm-log-userspace module was added to the
1904 * 2.6.31 kernel.
1905 */
1906 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
1907 dm_log_userspace = 1;
1908 }
1909
1910 /* Region size */
1911 log_parm_count = 1;
1912
1913 /* [no]sync, block_on_error etc. */
1914 log_parm_count += hweight32(seg->flags);
1915
1916 /* "handle_errors" is a feature arg now */
1917 if (handle_errors)
1918 log_parm_count--;
1919
1920 /* DM_CORELOG does not count in the param list */
1921 if (seg->flags & DM_CORELOG)
1922 log_parm_count--;
1923
1924 if (seg->clustered) {
1925 log_parm_count++; /* For UUID */
1926
1927 if (!dm_log_userspace)
1928 EMIT_PARAMS(pos, "clustered-");
1929 else
1930 /* For clustered-* type field inserted later */
1931 log_parm_count++;
1932 }
1933
1934 if (!seg->log)
1935 logtype = "core";
1936 else {
1937 logtype = "disk";
1938 log_parm_count++;
1939 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1940 return_0;
1941 }
1942
1943 if (dm_log_userspace)
1944 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1945 log_parm_count, seg->uuid, logtype);
1946 else
1947 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
1948
1949 if (seg->log)
1950 EMIT_PARAMS(pos, " %s", logbuf);
1951
1952 EMIT_PARAMS(pos, " %u", seg->region_size);
1953
1954 if (seg->clustered && !dm_log_userspace)
1955 EMIT_PARAMS(pos, " %s", seg->uuid);
1956
1957 if ((seg->flags & DM_NOSYNC))
1958 EMIT_PARAMS(pos, " nosync");
1959 else if ((seg->flags & DM_FORCESYNC))
1960 EMIT_PARAMS(pos, " sync");
1961
1962 if (block_on_error)
1963 EMIT_PARAMS(pos, " block_on_error");
1964
1965 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1966
1967 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1968 return_0;
1969
1970 if (handle_errors)
1971 EMIT_PARAMS(pos, " 1 handle_errors");
1972
1973 return 1;
1974 }
1975
1976 static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1977 uint32_t minor, struct load_segment *seg,
1978 uint64_t *seg_start, char *params,
1979 size_t paramsize)
1980 {
1981 uint32_t i;
1982 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1983 int pos = 0;
1984
1985 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1986 param_count++;
1987
1988 if (seg->region_size)
1989 param_count += 2;
1990
1991 /* rebuilds is 64-bit */
1992 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
1993 param_count += 2 * hweight32(seg->rebuilds >> 32);
1994
1995 if ((seg->type == SEG_RAID1) && seg->stripe_size)
1996 log_error("WARNING: Ignoring RAID1 stripe size");
1997
1998 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
1999 param_count, seg->stripe_size);
2000
2001 if (seg->flags & DM_NOSYNC)
2002 EMIT_PARAMS(pos, " nosync");
2003 else if (seg->flags & DM_FORCESYNC)
2004 EMIT_PARAMS(pos, " sync");
2005
2006 if (seg->region_size)
2007 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2008
2009 for (i = 0; i < (seg->area_count / 2); i++)
2010 if (seg->rebuilds & (1 << i))
2011 EMIT_PARAMS(pos, " rebuild %u", i);
2012
2013 /* Print number of metadata/data device pairs */
2014 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2015
2016 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2017 return_0;
2018
2019 return 1;
2020 }
2021
2022 static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2023 uint32_t minor, struct load_segment *seg,
2024 uint64_t *seg_start, char *params,
2025 size_t paramsize)
2026 {
2027 int pos = 0;
2028 int r;
2029 int target_type_is_raid = 0;
2030 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
2031 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
2032
2033 switch(seg->type) {
2034 case SEG_ERROR:
2035 case SEG_ZERO:
2036 case SEG_LINEAR:
2037 break;
2038 case SEG_MIRRORED:
2039 /* Mirrors are pretty complicated - now in separate function */
2040 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
2041 if (!r)
2042 return_0;
2043 break;
2044 case SEG_REPLICATOR:
2045 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2046 &pos)) <= 0) {
2047 stack;
2048 return r;
2049 }
2050 break;
2051 case SEG_REPLICATOR_DEV:
2052 if (!seg->replicator || !_build_dev_string(originbuf,
2053 sizeof(originbuf),
2054 seg->replicator))
2055 return_0;
2056
2057 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2058 break;
2059 case SEG_SNAPSHOT:
2060 case SEG_SNAPSHOT_MERGE:
2061 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2062 return_0;
2063 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2064 return_0;
2065 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2066 seg->persistent ? 'P' : 'N', seg->chunk_size);
2067 break;
2068 case SEG_SNAPSHOT_ORIGIN:
2069 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2070 return_0;
2071 EMIT_PARAMS(pos, "%s", originbuf);
2072 break;
2073 case SEG_STRIPED:
2074 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
2075 break;
2076 case SEG_CRYPT:
2077 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
2078 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2079 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2080 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2081 seg->iv_offset : *seg_start);
2082 break;
2083 case SEG_RAID1:
2084 case SEG_RAID4:
2085 case SEG_RAID5_LA:
2086 case SEG_RAID5_RA:
2087 case SEG_RAID5_LS:
2088 case SEG_RAID5_RS:
2089 case SEG_RAID6_ZR:
2090 case SEG_RAID6_NR:
2091 case SEG_RAID6_NC:
2092 target_type_is_raid = 1;
2093 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2094 params, paramsize);
2095 if (!r)
2096 return_0;
2097
2098 break;
2099 case SEG_THIN_POOL:
2100 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2101 return_0;
2102 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2103 return_0;
2104 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
2105 seg->data_block_size, seg->low_water_mark,
2106 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
2107 break;
2108 case SEG_THIN:
2109 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2110 return_0;
2111 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2112 break;
2113 }
2114
2115 switch(seg->type) {
2116 case SEG_ERROR:
2117 case SEG_REPLICATOR:
2118 case SEG_SNAPSHOT:
2119 case SEG_SNAPSHOT_ORIGIN:
2120 case SEG_SNAPSHOT_MERGE:
2121 case SEG_ZERO:
2122 case SEG_THIN_POOL:
2123 case SEG_THIN:
2124 break;
2125 case SEG_CRYPT:
2126 case SEG_LINEAR:
2127 case SEG_REPLICATOR_DEV:
2128 case SEG_STRIPED:
2129 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2130 stack;
2131 return r;
2132 }
2133 if (!params[0]) {
2134 log_error("No parameters supplied for %s target "
2135 "%u:%u.", dm_segtypes[seg->type].target,
2136 major, minor);
2137 return 0;
2138 }
2139 break;
2140 }
2141
2142 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2143 " %" PRIu64 " %s %s", major, minor,
2144 *seg_start, seg->size, target_type_is_raid ? "raid" :
2145 dm_segtypes[seg->type].target, params);
2146
2147 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2148 target_type_is_raid ? "raid" :
2149 dm_segtypes[seg->type].target, params))
2150 return_0;
2151
2152 *seg_start += seg->size;
2153
2154 return 1;
2155 }
2156
2157 #undef EMIT_PARAMS
2158
2159 static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2160 struct load_segment *seg, uint64_t *seg_start)
2161 {
2162 char *params;
2163 size_t paramsize = 4096;
2164 int ret;
2165
2166 do {
2167 if (!(params = dm_malloc(paramsize))) {
2168 log_error("Insufficient space for target parameters.");
2169 return 0;
2170 }
2171
2172 params[0] = '\0';
2173 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2174 params, paramsize);
2175 dm_free(params);
2176
2177 if (!ret)
2178 stack;
2179
2180 if (ret >= 0)
2181 return ret;
2182
2183 log_debug("Insufficient space in params[%" PRIsize_t
2184 "] for target parameters.", paramsize);
2185
2186 paramsize *= 2;
2187 } while (paramsize < MAX_TARGET_PARAMSIZE);
2188
2189 log_error("Target parameter size too big. Aborting.");
2190 return 0;
2191 }
2192
2193 static int _load_node(struct dm_tree_node *dnode)
2194 {
2195 int r = 0;
2196 struct dm_task *dmt;
2197 struct load_segment *seg;
2198 uint64_t seg_start = 0, existing_table_size;
2199
2200 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2201 dnode->info.major, dnode->info.minor);
2202
2203 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2204 log_error("Reload dm_task creation failed for %s", dnode->name);
2205 return 0;
2206 }
2207
2208 if (!dm_task_set_major(dmt, dnode->info.major) ||
2209 !dm_task_set_minor(dmt, dnode->info.minor)) {
2210 log_error("Failed to set device number for %s reload.", dnode->name);
2211 goto out;
2212 }
2213
2214 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2215 log_error("Failed to set read only flag for %s", dnode->name);
2216 goto out;
2217 }
2218
2219 if (!dm_task_no_open_count(dmt))
2220 log_error("Failed to disable open_count");
2221
2222 dm_list_iterate_items(seg, &dnode->props.segs)
2223 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2224 seg, &seg_start))
2225 goto_out;
2226
2227 if (!dm_task_suppress_identical_reload(dmt))
2228 log_error("Failed to suppress reload of identical tables.");
2229
2230 if ((r = dm_task_run(dmt))) {
2231 r = dm_task_get_info(dmt, &dnode->info);
2232 if (r && !dnode->info.inactive_table)
2233 log_verbose("Suppressed %s identical table reload.",
2234 dnode->name);
2235
2236 existing_table_size = dm_task_get_existing_table_size(dmt);
2237 if ((dnode->props.size_changed =
2238 (existing_table_size == seg_start) ? 0 : 1)) {
2239 log_debug("Table size changed from %" PRIu64 " to %"
2240 PRIu64 " for %s", existing_table_size,
2241 seg_start, dnode->name);
2242 /*
2243 * Kernel usually skips size validation on zero-length devices
2244 * now so no need to preload them.
2245 */
2246 /* FIXME In which kernel version did this begin? */
2247 if (!existing_table_size && dnode->props.delay_resume_if_new)
2248 dnode->props.size_changed = 0;
2249 }
2250 }
2251
2252 dnode->props.segment_count = 0;
2253
2254 out:
2255 dm_task_destroy(dmt);
2256
2257 return r;
2258 }
2259
2260 int dm_tree_preload_children(struct dm_tree_node *dnode,
2261 const char *uuid_prefix,
2262 size_t uuid_prefix_len)
2263 {
2264 int r = 1;
2265 void *handle = NULL;
2266 struct dm_tree_node *child;
2267 struct dm_info newinfo;
2268 int update_devs_flag = 0;
2269
2270 /* Preload children first */
2271 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2272 /* Skip existing non-device-mapper devices */
2273 if (!child->info.exists && child->info.major)
2274 continue;
2275
2276 /* Ignore if it doesn't belong to this VG */
2277 if (child->info.exists &&
2278 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
2279 continue;
2280
2281 if (dm_tree_node_num_children(child, 0))
2282 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2283 return_0;
2284
2285 /* FIXME Cope if name exists with no uuid? */
2286 if (!child->info.exists && !_create_node(child))
2287 return_0;
2288
2289 if (!child->info.inactive_table &&
2290 child->props.segment_count &&
2291 !_load_node(child))
2292 return_0;
2293
2294 /* Propagate device size change change */
2295 if (child->props.size_changed)
2296 dnode->props.size_changed = 1;
2297
2298 /* Resume device immediately if it has parents and its size changed */
2299 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
2300 continue;
2301
2302 if (!child->info.inactive_table && !child->info.suspended)
2303 continue;
2304
2305 if (!_resume_node(child->name, child->info.major, child->info.minor,
2306 child->props.read_ahead, child->props.read_ahead_flags,
2307 &newinfo, &child->dtree->cookie, child->udev_flags,
2308 child->info.suspended)) {
2309 log_error("Unable to resume %s (%" PRIu32
2310 ":%" PRIu32 ")", child->name, child->info.major,
2311 child->info.minor);
2312 r = 0;
2313 continue;
2314 }
2315
2316 /* Update cached info */
2317 child->info = newinfo;
2318
2319 /*
2320 * Prepare for immediate synchronization with udev and flush all stacked
2321 * dev node operations if requested by immediate_dev_node property. But
2322 * finish processing current level in the tree first.
2323 */
2324 if (child->props.immediate_dev_node)
2325 update_devs_flag = 1;
2326 }
2327
2328 handle = NULL;
2329
2330 if (update_devs_flag) {
2331 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2332 stack;
2333 dm_tree_set_cookie(dnode, 0);
2334 }
2335
2336 if (r && !_node_send_messages(dnode, uuid_prefix, uuid_prefix_len)) {
2337 stack;
2338 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
2339 log_error("Failed to deactivate %s", dnode->name);
2340 r = 0;
2341 }
2342
2343 return r;
2344 }
2345
2346 /*
2347 * Returns 1 if unsure.
2348 */
2349 int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
2350 const char *uuid_prefix,
2351 size_t uuid_prefix_len)
2352 {
2353 void *handle = NULL;
2354 struct dm_tree_node *child = dnode;
2355 const char *uuid;
2356
2357 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2358 if (!(uuid = dm_tree_node_get_uuid(child))) {
2359 log_error("Failed to get uuid for dtree node.");
2360 return 1;
2361 }
2362
2363 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
2364 return 1;
2365
2366 if (dm_tree_node_num_children(child, 0))
2367 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
2368 }
2369
2370 return 0;
2371 }
2372
2373 /*
2374 * Target functions
2375 */
2376 static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
2377 {
2378 struct load_segment *seg;
2379
2380 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2381 log_error("dtree node segment allocation failed");
2382 return NULL;
2383 }
2384
2385 seg->type = type;
2386 seg->size = size;
2387 seg->area_count = 0;
2388 dm_list_init(&seg->areas);
2389 seg->stripe_size = 0;
2390 seg->persistent = 0;
2391 seg->chunk_size = 0;
2392 seg->cow = NULL;
2393 seg->origin = NULL;
2394 seg->merge = NULL;
2395
2396 dm_list_add(&dnode->props.segs, &seg->list);
2397 dnode->props.segment_count++;
2398
2399 return seg;
2400 }
2401
2402 int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
2403 uint64_t size,
2404 const char *origin_uuid)
2405 {
2406 struct load_segment *seg;
2407 struct dm_tree_node *origin_node;
2408
2409 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2410 return_0;
2411
2412 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
2413 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2414 return 0;
2415 }
2416
2417 seg->origin = origin_node;
2418 if (!_link_tree_nodes(dnode, origin_node))
2419 return_0;
2420
2421 /* Resume snapshot origins after new snapshots */
2422 dnode->activation_priority = 1;
2423
2424 return 1;
2425 }
2426
2427 static int _add_snapshot_target(struct dm_tree_node *node,
2428 uint64_t size,
2429 const char *origin_uuid,
2430 const char *cow_uuid,
2431 const char *merge_uuid,
2432 int persistent,
2433 uint32_t chunk_size)
2434 {
2435 struct load_segment *seg;
2436 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2437 unsigned seg_type;
2438
2439 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
2440
2441 if (!(seg = _add_segment(node, seg_type, size)))
2442 return_0;
2443
2444 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
2445 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2446 return 0;
2447 }
2448
2449 seg->origin = origin_node;
2450 if (!_link_tree_nodes(node, origin_node))
2451 return_0;
2452
2453 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
2454 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
2455 return 0;
2456 }
2457
2458 seg->cow = cow_node;
2459 if (!_link_tree_nodes(node, cow_node))
2460 return_0;
2461
2462 seg->persistent = persistent ? 1 : 0;
2463 seg->chunk_size = chunk_size;
2464
2465 if (merge_uuid) {
2466 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2467 /* not a pure error, merging snapshot may have been deactivated */
2468 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2469 } else {
2470 seg->merge = merge_node;
2471 /* must not link merging snapshot, would undermine activation_priority below */
2472 }
2473
2474 /* Resume snapshot-merge (acting origin) after other snapshots */
2475 node->activation_priority = 1;
2476 if (seg->merge) {
2477 /* Resume merging snapshot after snapshot-merge */
2478 seg->merge->activation_priority = 2;
2479 }
2480 }
2481
2482 return 1;
2483 }
2484
2485
2486 int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2487 uint64_t size,
2488 const char *origin_uuid,
2489 const char *cow_uuid,
2490 int persistent,
2491 uint32_t chunk_size)
2492 {
2493 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2494 NULL, persistent, chunk_size);
2495 }
2496
2497 int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2498 uint64_t size,
2499 const char *origin_uuid,
2500 const char *cow_uuid,
2501 const char *merge_uuid,
2502 uint32_t chunk_size)
2503 {
2504 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2505 merge_uuid, 1, chunk_size);
2506 }
2507
2508 int dm_tree_node_add_error_target(struct dm_tree_node *node,
2509 uint64_t size)
2510 {
2511 if (!_add_segment(node, SEG_ERROR, size))
2512 return_0;
2513
2514 return 1;
2515 }
2516
2517 int dm_tree_node_add_zero_target(struct dm_tree_node *node,
2518 uint64_t size)
2519 {
2520 if (!_add_segment(node, SEG_ZERO, size))
2521 return_0;
2522
2523 return 1;
2524 }
2525
2526 int dm_tree_node_add_linear_target(struct dm_tree_node *node,
2527 uint64_t size)
2528 {
2529 if (!_add_segment(node, SEG_LINEAR, size))
2530 return_0;
2531
2532 return 1;
2533 }
2534
2535 int dm_tree_node_add_striped_target(struct dm_tree_node *node,
2536 uint64_t size,
2537 uint32_t stripe_size)
2538 {
2539 struct load_segment *seg;
2540
2541 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2542 return_0;
2543
2544 seg->stripe_size = stripe_size;
2545
2546 return 1;
2547 }
2548
2549 int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2550 uint64_t size,
2551 const char *cipher,
2552 const char *chainmode,
2553 const char *iv,
2554 uint64_t iv_offset,
2555 const char *key)
2556 {
2557 struct load_segment *seg;
2558
2559 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2560 return_0;
2561
2562 seg->cipher = cipher;
2563 seg->chainmode = chainmode;
2564 seg->iv = iv;
2565 seg->iv_offset = iv_offset;
2566 seg->key = key;
2567
2568 return 1;
2569 }
2570
2571 int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
2572 uint32_t region_size,
2573 unsigned clustered,
2574 const char *log_uuid,
2575 unsigned area_count,
2576 uint32_t flags)
2577 {
2578 struct dm_tree_node *log_node = NULL;
2579 struct load_segment *seg;
2580
2581 if (!node->props.segment_count) {
2582 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2583 return 0;
2584 }
2585
2586 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2587
2588 if (log_uuid) {
2589 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2590 log_error("log uuid pool_strdup failed");
2591 return 0;
2592 }
2593 if ((flags & DM_CORELOG))
2594 /* For pvmove: immediate resume (for size validation) isn't needed. */
2595 node->props.delay_resume_if_new = 1;
2596 else {
2597 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2598 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2599 return 0;
2600 }
2601
2602 if (clustered)
2603 log_node->props.immediate_dev_node = 1;
2604
2605 /* The kernel validates the size of disk logs. */
2606 /* FIXME Propagate to any devices below */
2607 log_node->props.delay_resume_if_new = 0;
2608
2609 if (!_link_tree_nodes(node, log_node))
2610 return_0;
2611 }
2612 }
2613
2614 seg->log = log_node;
2615 seg->region_size = region_size;
2616 seg->clustered = clustered;
2617 seg->mirror_area_count = area_count;
2618 seg->flags = flags;
2619
2620 return 1;
2621 }
2622
2623 int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
2624 uint64_t size)
2625 {
2626 if (!_add_segment(node, SEG_MIRRORED, size))
2627 return_0;
2628
2629 return 1;
2630 }
2631
2632 int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2633 uint64_t size,
2634 const char *raid_type,
2635 uint32_t region_size,
2636 uint32_t stripe_size,
2637 uint64_t rebuilds,
2638 uint64_t reserved2)
2639 {
2640 int i;
2641 struct load_segment *seg = NULL;
2642
2643 for (i = 0; dm_segtypes[i].target && !seg; i++)
2644 if (!strcmp(raid_type, dm_segtypes[i].target))
2645 if (!(seg = _add_segment(node,
2646 dm_segtypes[i].type, size)))
2647 return_0;
2648
2649 if (!seg)
2650 return_0;
2651
2652 seg->region_size = region_size;
2653 seg->stripe_size = stripe_size;
2654 seg->area_count = 0;
2655 seg->rebuilds = rebuilds;
2656
2657 return 1;
2658 }
2659
2660 int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2661 uint64_t size,
2662 const char *rlog_uuid,
2663 const char *rlog_type,
2664 unsigned rsite_index,
2665 dm_replicator_mode_t mode,
2666 uint32_t async_timeout,
2667 uint64_t fall_behind_data,
2668 uint32_t fall_behind_ios)
2669 {
2670 struct load_segment *rseg;
2671 struct replicator_site *rsite;
2672
2673 /* Local site0 - adds replicator segment and links rlog device */
2674 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2675 if (node->props.segment_count) {
2676 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2677 return 0;
2678 }
2679
2680 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2681 return_0;
2682
2683 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2684 log_error("Missing replicator log uuid %s.", rlog_uuid);
2685 return 0;
2686 }
2687
2688 if (!_link_tree_nodes(node, rseg->log))
2689 return_0;
2690
2691 if (strcmp(rlog_type, "ringbuffer") != 0) {
2692 log_error("Unsupported replicator log type %s.", rlog_type);
2693 return 0;
2694 }
2695
2696 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2697 return_0;
2698
2699 dm_list_init(&rseg->rsites);
2700 rseg->rdevice_count = 0;
2701 node->activation_priority = 1;
2702 }
2703
2704 /* Add site to segment */
2705 if (mode == DM_REPLICATOR_SYNC
2706 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2707 log_error("Async parameters passed for synchronnous replicator.");
2708 return 0;
2709 }
2710
2711 if (node->props.segment_count != 1) {
2712 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2713 return 0;
2714 }
2715
2716 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2717 if (rseg->type != SEG_REPLICATOR) {
2718 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2719 dm_segtypes[rseg->type].target);
2720 return 0;
2721 }
2722
2723 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2724 log_error("Failed to allocate remote site segment.");
2725 return 0;
2726 }
2727
2728 dm_list_add(&rseg->rsites, &rsite->list);
2729 rseg->rsite_count++;
2730
2731 rsite->mode = mode;
2732 rsite->async_timeout = async_timeout;
2733 rsite->fall_behind_data = fall_behind_data;
2734 rsite->fall_behind_ios = fall_behind_ios;
2735 rsite->rsite_index = rsite_index;
2736
2737 return 1;
2738 }
2739
2740 /* Appends device node to Replicator */
2741 int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2742 uint64_t size,
2743 const char *replicator_uuid,
2744 uint64_t rdevice_index,
2745 const char *rdev_uuid,
2746 unsigned rsite_index,
2747 const char *slog_uuid,
2748 uint32_t slog_flags,
2749 uint32_t slog_region_size)
2750 {
2751 struct seg_area *area;
2752 struct load_segment *rseg;
2753 struct load_segment *rep_seg;
2754
2755 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2756 /* Site index for local target */
2757 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2758 return_0;
2759
2760 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2761 log_error("Missing replicator uuid %s.", replicator_uuid);
2762 return 0;
2763 }
2764
2765 /* Local slink0 for replicator must be always initialized first */
2766 if (rseg->replicator->props.segment_count != 1) {
2767 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2768 return 0;
2769 }
2770
2771 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2772 if (rep_seg->type != SEG_REPLICATOR) {
2773 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2774 dm_segtypes[rep_seg->type].target);
2775 return 0;
2776 }
2777 rep_seg->rdevice_count++;
2778
2779 if (!_link_tree_nodes(node, rseg->replicator))
2780 return_0;
2781
2782 rseg->rdevice_index = rdevice_index;
2783 } else {
2784 /* Local slink0 for replicator must be always initialized first */
2785 if (node->props.segment_count != 1) {
2786 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2787 return 0;
2788 }
2789
2790 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2791 if (rseg->type != SEG_REPLICATOR_DEV) {
2792 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2793 dm_segtypes[rseg->type].target);
2794 return 0;
2795 }
2796 }
2797
2798 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2799 log_error("Unspecified sync log uuid.");
2800 return 0;
2801 }
2802
2803 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2804 return_0;
2805
2806 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2807
2808 if (!(slog_flags & DM_CORELOG)) {
2809 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2810 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2811 return 0;
2812 }
2813
2814 if (!_link_tree_nodes(node, area->slog))
2815 return_0;
2816 }
2817
2818 area->flags = slog_flags;
2819 area->region_size = slog_region_size;
2820 area->rsite_index = rsite_index;
2821
2822 return 1;
2823 }
2824
2825 static int _thin_validate_device_id(uint32_t device_id)
2826 {
2827 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2828 log_error("Device id %u is higher then %u.",
2829 device_id, DM_THIN_MAX_DEVICE_ID);
2830 return 0;
2831 }
2832
2833 return 1;
2834 }
2835
2836 int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2837 uint64_t size,
2838 uint64_t transaction_id,
2839 const char *metadata_uuid,
2840 const char *pool_uuid,
2841 uint32_t data_block_size,
2842 uint64_t low_water_mark,
2843 unsigned skip_block_zeroing)
2844 {
2845 struct load_segment *seg;
2846
2847 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
2848 log_error("Data block size %u is lower then %u sectors.",
2849 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
2850 return 0;
2851 }
2852
2853 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
2854 log_error("Data block size %u is higher then %u sectors.",
2855 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
2856 return 0;
2857 }
2858
2859 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2860 return_0;
2861
2862 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2863 log_error("Missing metadata uuid %s.", metadata_uuid);
2864 return 0;
2865 }
2866
2867 if (!_link_tree_nodes(node, seg->metadata))
2868 return_0;
2869
2870 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2871 log_error("Missing pool uuid %s.", pool_uuid);
2872 return 0;
2873 }
2874
2875 if (!_link_tree_nodes(node, seg->pool))
2876 return_0;
2877
2878 node->props.thin_pool_transaction_id = transaction_id; // compare on resume
2879 seg->low_water_mark = low_water_mark;
2880 seg->data_block_size = data_block_size;
2881 seg->skip_block_zeroing = skip_block_zeroing;
2882 dm_list_init(&seg->thin_messages);
2883
2884 return 1;
2885 }
2886
2887 int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
2888 const struct dm_thin_message *message)
2889 {
2890 struct load_segment *seg;
2891 struct thin_message *tm;
2892
2893 if (node->props.segment_count != 1) {
2894 log_error("Thin pool node must have only one segment.");
2895 return 0;
2896 }
2897
2898 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2899 if (seg->type != SEG_THIN_POOL) {
2900 log_error("Thin pool node has segment type %s.",
2901 dm_segtypes[seg->type].target);
2902 return 0;
2903 }
2904
2905 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
2906 log_error("Failed to allocate thin message.");
2907 return 0;
2908 }
2909
2910 switch (message->type) {
2911 case DM_THIN_MESSAGE_CREATE_SNAP:
2912 /* If the thin origin is active, it must be suspend first! */
2913 if (message->u.m_create_snap.device_id == message->u.m_create_snap.origin_id) {
2914 log_error("Cannot use same device id for origin and its snapshot.");
2915 return 0;
2916 }
2917 if (!_thin_validate_device_id(message->u.m_create_snap.device_id) ||
2918 !_thin_validate_device_id(message->u.m_create_snap.origin_id))
2919 return_0;
2920 tm->message.u.m_create_snap = message->u.m_create_snap;
2921 break;
2922 case DM_THIN_MESSAGE_CREATE_THIN:
2923 if (!_thin_validate_device_id(message->u.m_create_thin.device_id))
2924 return_0;
2925 tm->message.u.m_create_thin = message->u.m_create_thin;
2926 tm->expected_errno = EEXIST;
2927 break;
2928 case DM_THIN_MESSAGE_DELETE:
2929 if (!_thin_validate_device_id(message->u.m_delete.device_id))
2930 return_0;
2931 tm->message.u.m_delete = message->u.m_delete;
2932 tm->expected_errno = ENODATA;
2933 break;
2934 case DM_THIN_MESSAGE_TRIM:
2935 if (!_thin_validate_device_id(message->u.m_trim.device_id))
2936 return_0;
2937 tm->message.u.m_trim = message->u.m_trim;
2938 break;
2939 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
2940 if (message->u.m_set_transaction_id.current_id !=
2941 (message->u.m_set_transaction_id.new_id - 1)) {
2942 log_error("New transaction_id must be sequential.");
2943 return 0; /* FIXME: Maybe too strict here? */
2944 }
2945 tm->message.u.m_set_transaction_id = message->u.m_set_transaction_id;
2946 break;
2947 default:
2948 log_error("Unsupported message type %d.", (int) message->type);
2949 return 0;
2950 }
2951
2952 tm->message.type = message->type;
2953 dm_list_add(&seg->thin_messages, &tm->list);
2954
2955 return 1;
2956 }
2957
2958 int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2959 uint64_t size,
2960 const char *thin_pool_uuid,
2961 uint32_t device_id)
2962 {
2963 struct load_segment *seg;
2964
2965 if (!_thin_validate_device_id(device_id))
2966 return_0;
2967
2968 if (!(seg = _add_segment(node, SEG_THIN, size)))
2969 return_0;
2970
2971 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2972 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2973 return 0;
2974 }
2975
2976 if (!_link_tree_nodes(node, seg->pool))
2977 return_0;
2978
2979 seg->device_id = device_id;
2980
2981 return 1;
2982 }
2983
2984 static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
2985 {
2986 struct seg_area *area;
2987
2988 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
2989 log_error("Failed to allocate target segment area.");
2990 return 0;
2991 }
2992
2993 area->dev_node = dev_node;
2994 area->offset = offset;
2995
2996 dm_list_add(&seg->areas, &area->list);
2997 seg->area_count++;
2998
2999 return 1;
3000 }
3001
3002 int dm_tree_node_add_target_area(struct dm_tree_node *node,
3003 const char *dev_name,
3004 const char *uuid,
3005 uint64_t offset)
3006 {
3007 struct load_segment *seg;
3008 struct stat info;
3009 struct dm_tree_node *dev_node;
3010
3011 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
3012 log_error("dm_tree_node_add_target_area called without device");
3013 return 0;
3014 }
3015
3016 if (uuid) {
3017 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
3018 log_error("Couldn't find area uuid %s.", uuid);
3019 return 0;
3020 }
3021 if (!_link_tree_nodes(node, dev_node))
3022 return_0;
3023 } else {
3024 if (stat(dev_name, &info) < 0) {
3025 log_error("Device %s not found.", dev_name);
3026 return 0;
3027 }
3028
3029 if (!S_ISBLK(info.st_mode)) {
3030 log_error("Device %s is not a block device.", dev_name);
3031 return 0;
3032 }
3033
3034 /* FIXME Check correct macro use */
3035 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3036 MINOR(info.st_rdev), 0)))
3037 return_0;
3038 }
3039
3040 if (!node->props.segment_count) {
3041 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
3042 return 0;
3043 }
3044
3045 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3046
3047 if (!_add_area(node, seg, dev_node, offset))
3048 return_0;
3049
3050 return 1;
3051 }
3052
3053 int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3054 {
3055 struct load_segment *seg;
3056
3057 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3058
3059 switch (seg->type) {
3060 case SEG_RAID1:
3061 case SEG_RAID4:
3062 case SEG_RAID5_LA:
3063 case SEG_RAID5_RA:
3064 case SEG_RAID5_LS:
3065 case SEG_RAID5_RS:
3066 case SEG_RAID6_ZR:
3067 case SEG_RAID6_NR:
3068 case SEG_RAID6_NC:
3069 break;
3070 default:
3071 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3072 return 0;
3073 }
3074
3075 if (!_add_area(node, seg, NULL, offset))
3076 return_0;
3077
3078 return 1;
3079 }
3080
3081 void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
3082 {
3083 node->dtree->cookie = cookie;
3084 }
3085
3086 uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
3087 {
3088 return node->dtree->cookie;
3089 }
This page took 0.160211 seconds and 6 git commands to generate.