]> sourceware.org Git - lvm2.git/blob - libdm/libdm-deptree.c
Add printf format attributes to yes_no_prompt & dm_{sn,as}printf and fix a calle
[lvm2.git] / libdm / libdm-deptree.c
1 /*
2 * Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15 #include "dmlib.h"
16 #include "libdm-targets.h"
17 #include "libdm-common.h"
18 #include "kdev_t.h"
19 #include "dm-ioctl.h"
20
21 #include <stdarg.h>
22 #include <sys/param.h>
23 #include <sys/utsname.h>
24
25 #define MAX_TARGET_PARAMSIZE 500000
26
27 /* FIXME Fix interface so this is used only by LVM */
28 #define UUID_PREFIX "LVM-"
29
30 #define REPLICATOR_LOCAL_SITE 0
31
32 /* Supported segment types */
33 enum {
34 SEG_CRYPT,
35 SEG_ERROR,
36 SEG_LINEAR,
37 SEG_MIRRORED,
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
42 SEG_SNAPSHOT_MERGE,
43 SEG_STRIPED,
44 SEG_ZERO,
45 };
46
47 /* FIXME Add crypt and multipath support */
48
49 struct {
50 unsigned type;
51 const char *target;
52 } dm_segtypes[] = {
53 { SEG_CRYPT, "crypt" },
54 { SEG_ERROR, "error" },
55 { SEG_LINEAR, "linear" },
56 { SEG_MIRRORED, "mirror" },
57 { SEG_REPLICATOR, "replicator" },
58 { SEG_REPLICATOR_DEV, "replicator-dev" },
59 { SEG_SNAPSHOT, "snapshot" },
60 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
61 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
62 { SEG_STRIPED, "striped" },
63 { SEG_ZERO, "zero"},
64 };
65
66 /* Some segment types have a list of areas of other devices attached */
67 struct seg_area {
68 struct dm_list list;
69
70 struct dm_tree_node *dev_node;
71
72 uint64_t offset;
73
74 unsigned rsite_index; /* Replicator site index */
75 struct dm_tree_node *slog; /* Replicator sync log node */
76 uint64_t region_size; /* Replicator sync log size */
77 uint32_t flags; /* Replicator sync log flags */
78 };
79
80 /* Replicator-log has a list of sites */
81 /* FIXME: maybe move to seg_area too? */
82 struct replicator_site {
83 struct dm_list list;
84
85 unsigned rsite_index;
86 dm_replicator_mode_t mode;
87 uint32_t async_timeout;
88 uint32_t fall_behind_ios;
89 uint64_t fall_behind_data;
90 };
91
92 /* Per-segment properties */
93 struct load_segment {
94 struct dm_list list;
95
96 unsigned type;
97
98 uint64_t size;
99
100 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
101 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
102
103 uint32_t stripe_size; /* Striped */
104
105 int persistent; /* Snapshot */
106 uint32_t chunk_size; /* Snapshot */
107 struct dm_tree_node *cow; /* Snapshot */
108 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
109 struct dm_tree_node *merge; /* Snapshot */
110
111 struct dm_tree_node *log; /* Mirror + Replicator */
112 uint32_t region_size; /* Mirror */
113 unsigned clustered; /* Mirror */
114 unsigned mirror_area_count; /* Mirror */
115 uint32_t flags; /* Mirror log */
116 char *uuid; /* Clustered mirror log */
117
118 const char *cipher; /* Crypt */
119 const char *chainmode; /* Crypt */
120 const char *iv; /* Crypt */
121 uint64_t iv_offset; /* Crypt */
122 const char *key; /* Crypt */
123
124 const char *rlog_type; /* Replicator */
125 struct dm_list rsites; /* Replicator */
126 unsigned rsite_count; /* Replicator */
127 unsigned rdevice_count; /* Replicator */
128 struct dm_tree_node *replicator;/* Replicator-dev */
129 uint64_t rdevice_index; /* Replicator-dev */
130 };
131
132 /* Per-device properties */
133 struct load_properties {
134 int read_only;
135 uint32_t major;
136 uint32_t minor;
137
138 uint32_t read_ahead;
139 uint32_t read_ahead_flags;
140
141 unsigned segment_count;
142 unsigned size_changed;
143 struct dm_list segs;
144
145 const char *new_name;
146
147 /* If immediate_dev_node is set to 1, try to create the dev node
148 * as soon as possible (e.g. in preload stage even during traversal
149 * and processing of dm tree). This will also flush all stacked dev
150 * node operations, synchronizing with udev.
151 */
152 int immediate_dev_node;
153 };
154
155 /* Two of these used to join two nodes with uses and used_by. */
156 struct dm_tree_link {
157 struct dm_list list;
158 struct dm_tree_node *node;
159 };
160
161 struct dm_tree_node {
162 struct dm_tree *dtree;
163
164 const char *name;
165 const char *uuid;
166 struct dm_info info;
167
168 struct dm_list uses; /* Nodes this node uses */
169 struct dm_list used_by; /* Nodes that use this node */
170
171 int activation_priority; /* 0 gets activated first */
172
173 uint16_t udev_flags; /* Udev control flags */
174
175 void *context; /* External supplied context */
176
177 struct load_properties props; /* For creation/table (re)load */
178
179 /*
180 * If presuspend of child node is needed
181 * Note: only direct child is allowed
182 */
183 struct dm_tree_node *presuspend_node;
184 };
185
186 struct dm_tree {
187 struct dm_pool *mem;
188 struct dm_hash_table *devs;
189 struct dm_hash_table *uuids;
190 struct dm_tree_node root;
191 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
192 int no_flush; /* 1 sets noflush (mirrors/multipath) */
193 uint32_t cookie;
194 };
195
196 struct dm_tree *dm_tree_create(void)
197 {
198 struct dm_tree *dtree;
199
200 if (!(dtree = dm_malloc(sizeof(*dtree)))) {
201 log_error("dm_tree_create malloc failed");
202 return NULL;
203 }
204
205 memset(dtree, 0, sizeof(*dtree));
206 dtree->root.dtree = dtree;
207 dm_list_init(&dtree->root.uses);
208 dm_list_init(&dtree->root.used_by);
209 dtree->skip_lockfs = 0;
210 dtree->no_flush = 0;
211
212 if (!(dtree->mem = dm_pool_create("dtree", 1024))) {
213 log_error("dtree pool creation failed");
214 dm_free(dtree);
215 return NULL;
216 }
217
218 if (!(dtree->devs = dm_hash_create(8))) {
219 log_error("dtree hash creation failed");
220 dm_pool_destroy(dtree->mem);
221 dm_free(dtree);
222 return NULL;
223 }
224
225 if (!(dtree->uuids = dm_hash_create(32))) {
226 log_error("dtree uuid hash creation failed");
227 dm_hash_destroy(dtree->devs);
228 dm_pool_destroy(dtree->mem);
229 dm_free(dtree);
230 return NULL;
231 }
232
233 return dtree;
234 }
235
236 void dm_tree_free(struct dm_tree *dtree)
237 {
238 if (!dtree)
239 return;
240
241 dm_hash_destroy(dtree->uuids);
242 dm_hash_destroy(dtree->devs);
243 dm_pool_destroy(dtree->mem);
244 dm_free(dtree);
245 }
246
247 static int _nodes_are_linked(const struct dm_tree_node *parent,
248 const struct dm_tree_node *child)
249 {
250 struct dm_tree_link *dlink;
251
252 dm_list_iterate_items(dlink, &parent->uses)
253 if (dlink->node == child)
254 return 1;
255
256 return 0;
257 }
258
259 static int _link(struct dm_list *list, struct dm_tree_node *node)
260 {
261 struct dm_tree_link *dlink;
262
263 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
264 log_error("dtree link allocation failed");
265 return 0;
266 }
267
268 dlink->node = node;
269 dm_list_add(list, &dlink->list);
270
271 return 1;
272 }
273
274 static int _link_nodes(struct dm_tree_node *parent,
275 struct dm_tree_node *child)
276 {
277 if (_nodes_are_linked(parent, child))
278 return 1;
279
280 if (!_link(&parent->uses, child))
281 return 0;
282
283 if (!_link(&child->used_by, parent))
284 return 0;
285
286 return 1;
287 }
288
289 static void _unlink(struct dm_list *list, struct dm_tree_node *node)
290 {
291 struct dm_tree_link *dlink;
292
293 dm_list_iterate_items(dlink, list)
294 if (dlink->node == node) {
295 dm_list_del(&dlink->list);
296 break;
297 }
298 }
299
300 static void _unlink_nodes(struct dm_tree_node *parent,
301 struct dm_tree_node *child)
302 {
303 if (!_nodes_are_linked(parent, child))
304 return;
305
306 _unlink(&parent->uses, child);
307 _unlink(&child->used_by, parent);
308 }
309
310 static int _add_to_toplevel(struct dm_tree_node *node)
311 {
312 return _link_nodes(&node->dtree->root, node);
313 }
314
315 static void _remove_from_toplevel(struct dm_tree_node *node)
316 {
317 _unlink_nodes(&node->dtree->root, node);
318 }
319
320 static int _add_to_bottomlevel(struct dm_tree_node *node)
321 {
322 return _link_nodes(node, &node->dtree->root);
323 }
324
325 static void _remove_from_bottomlevel(struct dm_tree_node *node)
326 {
327 _unlink_nodes(node, &node->dtree->root);
328 }
329
330 static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
331 {
332 /* Don't link to root node if child already has a parent */
333 if ((parent == &parent->dtree->root)) {
334 if (dm_tree_node_num_children(child, 1))
335 return 1;
336 } else
337 _remove_from_toplevel(child);
338
339 if ((child == &child->dtree->root)) {
340 if (dm_tree_node_num_children(parent, 0))
341 return 1;
342 } else
343 _remove_from_bottomlevel(parent);
344
345 return _link_nodes(parent, child);
346 }
347
348 static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
349 const char *name,
350 const char *uuid,
351 struct dm_info *info,
352 void *context,
353 uint16_t udev_flags)
354 {
355 struct dm_tree_node *node;
356 uint64_t dev;
357
358 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
359 log_error("_create_dm_tree_node alloc failed");
360 return NULL;
361 }
362
363 node->dtree = dtree;
364
365 node->name = name;
366 node->uuid = uuid;
367 node->info = *info;
368 node->context = context;
369 node->udev_flags = udev_flags;
370 node->activation_priority = 0;
371
372 dm_list_init(&node->uses);
373 dm_list_init(&node->used_by);
374 dm_list_init(&node->props.segs);
375
376 dev = MKDEV(info->major, info->minor);
377
378 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
379 sizeof(dev), node)) {
380 log_error("dtree node hash insertion failed");
381 dm_pool_free(dtree->mem, node);
382 return NULL;
383 }
384
385 if (uuid && *uuid &&
386 !dm_hash_insert(dtree->uuids, uuid, node)) {
387 log_error("dtree uuid hash insertion failed");
388 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
389 sizeof(dev));
390 dm_pool_free(dtree->mem, node);
391 return NULL;
392 }
393
394 return node;
395 }
396
397 static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
398 uint32_t major, uint32_t minor)
399 {
400 uint64_t dev = MKDEV(major, minor);
401
402 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
403 sizeof(dev));
404 }
405
406 static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
407 const char *uuid)
408 {
409 struct dm_tree_node *node;
410
411 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
412 return node;
413
414 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
415 return NULL;
416
417 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
418 }
419
420 static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
421 const char **name, const char **uuid,
422 struct dm_info *info, struct dm_deps **deps)
423 {
424 memset(info, 0, sizeof(*info));
425
426 if (!dm_is_dm_major(major)) {
427 *name = "";
428 *uuid = "";
429 *deps = NULL;
430 info->major = major;
431 info->minor = minor;
432 info->exists = 0;
433 info->live_table = 0;
434 info->inactive_table = 0;
435 info->read_only = 0;
436 return 1;
437 }
438
439 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
440 log_error("deps dm_task creation failed");
441 return 0;
442 }
443
444 if (!dm_task_set_major(*dmt, major)) {
445 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
446 major, minor);
447 goto failed;
448 }
449
450 if (!dm_task_set_minor(*dmt, minor)) {
451 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
452 major, minor);
453 goto failed;
454 }
455
456 if (!dm_task_run(*dmt)) {
457 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
458 major, minor);
459 goto failed;
460 }
461
462 if (!dm_task_get_info(*dmt, info)) {
463 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
464 major, minor);
465 goto failed;
466 }
467
468 if (!info->exists) {
469 *name = "";
470 *uuid = "";
471 *deps = NULL;
472 } else {
473 if (info->major != major) {
474 log_error("Inconsistent dtree major number: %u != %u",
475 major, info->major);
476 goto failed;
477 }
478 if (info->minor != minor) {
479 log_error("Inconsistent dtree minor number: %u != %u",
480 minor, info->minor);
481 goto failed;
482 }
483 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
484 log_error("name pool_strdup failed");
485 goto failed;
486 }
487 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
488 log_error("uuid pool_strdup failed");
489 goto failed;
490 }
491 *deps = dm_task_get_deps(*dmt);
492 }
493
494 return 1;
495
496 failed:
497 dm_task_destroy(*dmt);
498 return 0;
499 }
500
501 static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
502 struct dm_tree_node *parent,
503 uint32_t major, uint32_t minor,
504 uint16_t udev_flags)
505 {
506 struct dm_task *dmt = NULL;
507 struct dm_info info;
508 struct dm_deps *deps = NULL;
509 const char *name = NULL;
510 const char *uuid = NULL;
511 struct dm_tree_node *node = NULL;
512 uint32_t i;
513 int new = 0;
514
515 /* Already in tree? */
516 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
517 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
518 return_NULL;
519
520 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
521 NULL, udev_flags)))
522 goto_out;
523 new = 1;
524 }
525
526 if (!_link_tree_nodes(parent, node)) {
527 node = NULL;
528 goto_out;
529 }
530
531 /* If node was already in tree, no need to recurse. */
532 if (!new)
533 goto out;
534
535 /* Can't recurse if not a mapped device or there are no dependencies */
536 if (!node->info.exists || !deps->count) {
537 if (!_add_to_bottomlevel(node)) {
538 stack;
539 node = NULL;
540 }
541 goto out;
542 }
543
544 /* Add dependencies to tree */
545 for (i = 0; i < deps->count; i++)
546 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
547 MINOR(deps->device[i]), udev_flags)) {
548 node = NULL;
549 goto_out;
550 }
551
552 out:
553 if (dmt)
554 dm_task_destroy(dmt);
555
556 return node;
557 }
558
559 static int _node_clear_table(struct dm_tree_node *dnode)
560 {
561 struct dm_task *dmt;
562 struct dm_info *info;
563 const char *name;
564 int r;
565
566 if (!(info = &dnode->info)) {
567 log_error("_node_clear_table failed: missing info");
568 return 0;
569 }
570
571 if (!(name = dm_tree_node_get_name(dnode))) {
572 log_error("_node_clear_table failed: missing name");
573 return 0;
574 }
575
576 /* Is there a table? */
577 if (!info->exists || !info->inactive_table)
578 return 1;
579
580 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
581 name, info->major, info->minor);
582
583 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
584 dm_task_destroy(dmt);
585 log_error("Table clear dm_task creation failed for %s", name);
586 return 0;
587 }
588
589 if (!dm_task_set_major(dmt, info->major) ||
590 !dm_task_set_minor(dmt, info->minor)) {
591 log_error("Failed to set device number for %s table clear", name);
592 dm_task_destroy(dmt);
593 return 0;
594 }
595
596 r = dm_task_run(dmt);
597
598 if (!dm_task_get_info(dmt, info)) {
599 log_error("_node_clear_table failed: info missing after running task for %s", name);
600 r = 0;
601 }
602
603 dm_task_destroy(dmt);
604
605 return r;
606 }
607
608 struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
609 const char *name,
610 const char *uuid,
611 uint32_t major, uint32_t minor,
612 int read_only,
613 int clear_inactive,
614 void *context)
615 {
616 struct dm_tree_node *dnode;
617 struct dm_info info;
618 const char *name2;
619 const char *uuid2;
620
621 /* Do we need to add node to tree? */
622 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
623 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
624 log_error("name pool_strdup failed");
625 return NULL;
626 }
627 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
628 log_error("uuid pool_strdup failed");
629 return NULL;
630 }
631
632 info.major = 0;
633 info.minor = 0;
634 info.exists = 0;
635 info.live_table = 0;
636 info.inactive_table = 0;
637 info.read_only = 0;
638
639 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
640 context, 0)))
641 return_NULL;
642
643 /* Attach to root node until a table is supplied */
644 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
645 return_NULL;
646
647 dnode->props.major = major;
648 dnode->props.minor = minor;
649 dnode->props.new_name = NULL;
650 dnode->props.size_changed = 0;
651 } else if (strcmp(name, dnode->name)) {
652 /* Do we need to rename node? */
653 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
654 log_error("name pool_strdup failed");
655 return 0;
656 }
657 }
658
659 dnode->props.read_only = read_only ? 1 : 0;
660 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
661 dnode->props.read_ahead_flags = 0;
662
663 if (clear_inactive && !_node_clear_table(dnode))
664 return_NULL;
665
666 dnode->context = context;
667 dnode->udev_flags = 0;
668
669 return dnode;
670 }
671
672 struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
673 const char *name,
674 const char *uuid,
675 uint32_t major,
676 uint32_t minor,
677 int read_only,
678 int clear_inactive,
679 void *context,
680 uint16_t udev_flags)
681 {
682 struct dm_tree_node *node;
683
684 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
685 clear_inactive, context)))
686 node->udev_flags = udev_flags;
687
688 return node;
689 }
690
691
692 void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
693 uint32_t read_ahead,
694 uint32_t read_ahead_flags)
695 {
696 dnode->props.read_ahead = read_ahead;
697 dnode->props.read_ahead_flags = read_ahead_flags;
698 }
699
700 void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
701 struct dm_tree_node *presuspend_node)
702 {
703 node->presuspend_node = presuspend_node;
704 }
705
706 int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
707 {
708 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
709 }
710
711 int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
712 uint32_t minor, uint16_t udev_flags)
713 {
714 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
715 }
716
717 const char *dm_tree_node_get_name(const struct dm_tree_node *node)
718 {
719 return node->info.exists ? node->name : "";
720 }
721
722 const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
723 {
724 return node->info.exists ? node->uuid : "";
725 }
726
727 const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
728 {
729 return &node->info;
730 }
731
732 void *dm_tree_node_get_context(const struct dm_tree_node *node)
733 {
734 return node->context;
735 }
736
737 int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
738 {
739 return dnode->props.size_changed;
740 }
741
742 int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
743 {
744 if (inverted) {
745 if (_nodes_are_linked(&node->dtree->root, node))
746 return 0;
747 return dm_list_size(&node->used_by);
748 }
749
750 if (_nodes_are_linked(node, &node->dtree->root))
751 return 0;
752
753 return dm_list_size(&node->uses);
754 }
755
756 /*
757 * Returns 1 if no prefix supplied
758 */
759 static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
760 {
761 if (!uuid_prefix)
762 return 1;
763
764 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
765 return 1;
766
767 /* Handle transition: active device uuids might be missing the prefix */
768 if (uuid_prefix_len <= 4)
769 return 0;
770
771 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
772 return 0;
773
774 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
775 return 0;
776
777 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
778 return 1;
779
780 return 0;
781 }
782
783 /*
784 * Returns 1 if no children.
785 */
786 static int _children_suspended(struct dm_tree_node *node,
787 uint32_t inverted,
788 const char *uuid_prefix,
789 size_t uuid_prefix_len)
790 {
791 struct dm_list *list;
792 struct dm_tree_link *dlink;
793 const struct dm_info *dinfo;
794 const char *uuid;
795
796 if (inverted) {
797 if (_nodes_are_linked(&node->dtree->root, node))
798 return 1;
799 list = &node->used_by;
800 } else {
801 if (_nodes_are_linked(node, &node->dtree->root))
802 return 1;
803 list = &node->uses;
804 }
805
806 dm_list_iterate_items(dlink, list) {
807 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
808 stack;
809 continue;
810 }
811
812 /* Ignore if it doesn't belong to this VG */
813 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
814 continue;
815
816 /* Ignore if parent node wants to presuspend this node */
817 if (dlink->node->presuspend_node == node)
818 continue;
819
820 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
821 stack; /* FIXME Is this normal? */
822 return 0;
823 }
824
825 if (!dinfo->suspended)
826 return 0;
827 }
828
829 return 1;
830 }
831
832 /*
833 * Set major and minor to zero for root of tree.
834 */
835 struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
836 uint32_t major,
837 uint32_t minor)
838 {
839 if (!major && !minor)
840 return &dtree->root;
841
842 return _find_dm_tree_node(dtree, major, minor);
843 }
844
845 /*
846 * Set uuid to NULL for root of tree.
847 */
848 struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
849 const char *uuid)
850 {
851 if (!uuid || !*uuid)
852 return &dtree->root;
853
854 return _find_dm_tree_node_by_uuid(dtree, uuid);
855 }
856
857 /*
858 * First time set *handle to NULL.
859 * Set inverted to invert the tree.
860 */
861 struct dm_tree_node *dm_tree_next_child(void **handle,
862 const struct dm_tree_node *parent,
863 uint32_t inverted)
864 {
865 struct dm_list **dlink = (struct dm_list **) handle;
866 const struct dm_list *use_list;
867
868 if (inverted)
869 use_list = &parent->used_by;
870 else
871 use_list = &parent->uses;
872
873 if (!*dlink)
874 *dlink = dm_list_first(use_list);
875 else
876 *dlink = dm_list_next(use_list, *dlink);
877
878 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
879 }
880
881 /*
882 * Deactivate a device with its dependencies if the uuid prefix matches.
883 */
884 static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
885 struct dm_info *info)
886 {
887 struct dm_task *dmt;
888 int r;
889
890 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
891 log_error("_info_by_dev: dm_task creation failed");
892 return 0;
893 }
894
895 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
896 log_error("_info_by_dev: Failed to set device number");
897 dm_task_destroy(dmt);
898 return 0;
899 }
900
901 if (!with_open_count && !dm_task_no_open_count(dmt))
902 log_error("Failed to disable open_count");
903
904 if ((r = dm_task_run(dmt)))
905 r = dm_task_get_info(dmt, info);
906
907 dm_task_destroy(dmt);
908
909 return r;
910 }
911
912 /* Check if all parent nodes of given node have open_count == 0 */
913 static int _node_has_closed_parents(struct dm_tree_node *node,
914 const char *uuid_prefix,
915 size_t uuid_prefix_len)
916 {
917 struct dm_tree_link *dlink;
918 const struct dm_info *dinfo;
919 struct dm_info info;
920 const char *uuid;
921
922 /* Iterate through parents of this node */
923 dm_list_iterate_items(dlink, &node->used_by) {
924 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
925 stack;
926 continue;
927 }
928
929 /* Ignore if it doesn't belong to this VG */
930 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
931 continue;
932
933 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
934 stack; /* FIXME Is this normal? */
935 return 0;
936 }
937
938 /* Refresh open_count */
939 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
940 !info.exists)
941 continue;
942
943 if (info.open_count)
944 return 0;
945 }
946
947 return 1;
948 }
949
950 static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
951 uint32_t *cookie, uint16_t udev_flags)
952 {
953 struct dm_task *dmt;
954 int r = 0;
955
956 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
957
958 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
959 log_error("Deactivation dm_task creation failed for %s", name);
960 return 0;
961 }
962
963 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
964 log_error("Failed to set device number for %s deactivation", name);
965 goto out;
966 }
967
968 if (!dm_task_no_open_count(dmt))
969 log_error("Failed to disable open_count");
970
971 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
972 goto out;
973
974 r = dm_task_run(dmt);
975
976 /* FIXME Until kernel returns actual name so dm-ioctl.c can handle it */
977 rm_dev_node(name, dmt->cookie_set &&
978 !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG));
979
980 /* FIXME Remove node from tree or mark invalid? */
981
982 out:
983 dm_task_destroy(dmt);
984
985 return r;
986 }
987
988 static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
989 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
990 {
991 struct dm_task *dmt;
992 int r = 0;
993
994 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
995
996 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
997 log_error("Rename dm_task creation failed for %s", old_name);
998 return 0;
999 }
1000
1001 if (!dm_task_set_name(dmt, old_name)) {
1002 log_error("Failed to set name for %s rename.", old_name);
1003 goto out;
1004 }
1005
1006 if (!dm_task_set_newname(dmt, new_name))
1007 goto_out;
1008
1009 if (!dm_task_no_open_count(dmt))
1010 log_error("Failed to disable open_count");
1011
1012 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1013 goto out;
1014
1015 r = dm_task_run(dmt);
1016
1017 out:
1018 dm_task_destroy(dmt);
1019
1020 return r;
1021 }
1022
1023 /* FIXME Merge with _suspend_node? */
1024 static int _resume_node(const char *name, uint32_t major, uint32_t minor,
1025 uint32_t read_ahead, uint32_t read_ahead_flags,
1026 struct dm_info *newinfo, uint32_t *cookie,
1027 uint16_t udev_flags)
1028 {
1029 struct dm_task *dmt;
1030 int r = 0;
1031
1032 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1033
1034 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
1035 log_error("Suspend dm_task creation failed for %s", name);
1036 return 0;
1037 }
1038
1039 /* FIXME Kernel should fill in name on return instead */
1040 if (!dm_task_set_name(dmt, name)) {
1041 log_error("Failed to set readahead device name for %s", name);
1042 goto out;
1043 }
1044
1045 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1046 log_error("Failed to set device number for %s resumption.", name);
1047 goto out;
1048 }
1049
1050 if (!dm_task_no_open_count(dmt))
1051 log_error("Failed to disable open_count");
1052
1053 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1054 log_error("Failed to set read ahead");
1055
1056 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1057 goto out;
1058
1059 if ((r = dm_task_run(dmt)))
1060 r = dm_task_get_info(dmt, newinfo);
1061
1062 out:
1063 dm_task_destroy(dmt);
1064
1065 return r;
1066 }
1067
1068 static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
1069 int skip_lockfs, int no_flush, struct dm_info *newinfo)
1070 {
1071 struct dm_task *dmt;
1072 int r;
1073
1074 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1075 name, major, minor,
1076 skip_lockfs ? "" : " with filesystem sync",
1077 no_flush ? "" : " with device flush");
1078
1079 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1080 log_error("Suspend dm_task creation failed for %s", name);
1081 return 0;
1082 }
1083
1084 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1085 log_error("Failed to set device number for %s suspension.", name);
1086 dm_task_destroy(dmt);
1087 return 0;
1088 }
1089
1090 if (!dm_task_no_open_count(dmt))
1091 log_error("Failed to disable open_count");
1092
1093 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1094 log_error("Failed to set skip_lockfs flag.");
1095
1096 if (no_flush && !dm_task_no_flush(dmt))
1097 log_error("Failed to set no_flush flag.");
1098
1099 if ((r = dm_task_run(dmt)))
1100 r = dm_task_get_info(dmt, newinfo);
1101
1102 dm_task_destroy(dmt);
1103
1104 return r;
1105 }
1106
1107 /*
1108 * FIXME Don't attempt to deactivate known internal dependencies.
1109 */
1110 static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1111 const char *uuid_prefix,
1112 size_t uuid_prefix_len,
1113 unsigned level)
1114 {
1115 int r = 1;
1116 void *handle = NULL;
1117 struct dm_tree_node *child = dnode;
1118 struct dm_info info;
1119 const struct dm_info *dinfo;
1120 const char *name;
1121 const char *uuid;
1122
1123 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1124 if (!(dinfo = dm_tree_node_get_info(child))) {
1125 stack;
1126 continue;
1127 }
1128
1129 if (!(name = dm_tree_node_get_name(child))) {
1130 stack;
1131 continue;
1132 }
1133
1134 if (!(uuid = dm_tree_node_get_uuid(child))) {
1135 stack;
1136 continue;
1137 }
1138
1139 /* Ignore if it doesn't belong to this VG */
1140 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1141 continue;
1142
1143 /* Refresh open_count */
1144 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1145 !info.exists)
1146 continue;
1147
1148 /* Also checking open_count in parent nodes of presuspend_node */
1149 if (info.open_count ||
1150 (child->presuspend_node &&
1151 !_node_has_closed_parents(child->presuspend_node,
1152 uuid_prefix, uuid_prefix_len))) {
1153 /* Only report error from (likely non-internal) dependency at top level */
1154 if (!level) {
1155 log_error("Unable to deactivate open %s (%" PRIu32
1156 ":%" PRIu32 ")", name, info.major,
1157 info.minor);
1158 r = 0;
1159 }
1160 continue;
1161 }
1162
1163 /* Suspend child node first if requested */
1164 if (child->presuspend_node &&
1165 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1166 continue;
1167
1168 if (!_deactivate_node(name, info.major, info.minor,
1169 &child->dtree->cookie, child->udev_flags)) {
1170 log_error("Unable to deactivate %s (%" PRIu32
1171 ":%" PRIu32 ")", name, info.major,
1172 info.minor);
1173 r = 0;
1174 continue;
1175 }
1176
1177 if (dm_tree_node_num_children(child, 0)) {
1178 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
1179 return_0;
1180 }
1181 }
1182
1183 return r;
1184 }
1185
1186 int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1187 const char *uuid_prefix,
1188 size_t uuid_prefix_len)
1189 {
1190 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1191 }
1192
1193 void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1194 {
1195 dnode->dtree->skip_lockfs = 1;
1196 }
1197
1198 void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1199 {
1200 dnode->dtree->no_flush = 1;
1201 }
1202
1203 int dm_tree_suspend_children(struct dm_tree_node *dnode,
1204 const char *uuid_prefix,
1205 size_t uuid_prefix_len)
1206 {
1207 int r = 1;
1208 void *handle = NULL;
1209 struct dm_tree_node *child = dnode;
1210 struct dm_info info, newinfo;
1211 const struct dm_info *dinfo;
1212 const char *name;
1213 const char *uuid;
1214
1215 /* Suspend nodes at this level of the tree */
1216 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1217 if (!(dinfo = dm_tree_node_get_info(child))) {
1218 stack;
1219 continue;
1220 }
1221
1222 if (!(name = dm_tree_node_get_name(child))) {
1223 stack;
1224 continue;
1225 }
1226
1227 if (!(uuid = dm_tree_node_get_uuid(child))) {
1228 stack;
1229 continue;
1230 }
1231
1232 /* Ignore if it doesn't belong to this VG */
1233 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1234 continue;
1235
1236 /* Ensure immediate parents are already suspended */
1237 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1238 continue;
1239
1240 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
1241 !info.exists || info.suspended)
1242 continue;
1243
1244 if (!_suspend_node(name, info.major, info.minor,
1245 child->dtree->skip_lockfs,
1246 child->dtree->no_flush, &newinfo)) {
1247 log_error("Unable to suspend %s (%" PRIu32
1248 ":%" PRIu32 ")", name, info.major,
1249 info.minor);
1250 r = 0;
1251 continue;
1252 }
1253
1254 /* Update cached info */
1255 child->info = newinfo;
1256 }
1257
1258 /* Then suspend any child nodes */
1259 handle = NULL;
1260
1261 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1262 if (!(uuid = dm_tree_node_get_uuid(child))) {
1263 stack;
1264 continue;
1265 }
1266
1267 /* Ignore if it doesn't belong to this VG */
1268 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1269 continue;
1270
1271 if (dm_tree_node_num_children(child, 0))
1272 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1273 return_0;
1274 }
1275
1276 return r;
1277 }
1278
1279 int dm_tree_activate_children(struct dm_tree_node *dnode,
1280 const char *uuid_prefix,
1281 size_t uuid_prefix_len)
1282 {
1283 int r = 1;
1284 void *handle = NULL;
1285 struct dm_tree_node *child = dnode;
1286 struct dm_info newinfo;
1287 const char *name;
1288 const char *uuid;
1289 int priority;
1290
1291 /* Activate children first */
1292 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1293 if (!(uuid = dm_tree_node_get_uuid(child))) {
1294 stack;
1295 continue;
1296 }
1297
1298 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1299 continue;
1300
1301 if (dm_tree_node_num_children(child, 0))
1302 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1303 return_0;
1304 }
1305
1306 handle = NULL;
1307
1308 for (priority = 0; priority < 3; priority++) {
1309 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1310 if (!(uuid = dm_tree_node_get_uuid(child))) {
1311 stack;
1312 continue;
1313 }
1314
1315 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1316 continue;
1317
1318 if (priority != child->activation_priority)
1319 continue;
1320
1321 if (!(name = dm_tree_node_get_name(child))) {
1322 stack;
1323 continue;
1324 }
1325
1326 /* Rename? */
1327 if (child->props.new_name) {
1328 if (!_rename_node(name, child->props.new_name, child->info.major,
1329 child->info.minor, &child->dtree->cookie,
1330 child->udev_flags)) {
1331 log_error("Failed to rename %s (%" PRIu32
1332 ":%" PRIu32 ") to %s", name, child->info.major,
1333 child->info.minor, child->props.new_name);
1334 return 0;
1335 }
1336 child->name = child->props.new_name;
1337 child->props.new_name = NULL;
1338 }
1339
1340 if (!child->info.inactive_table && !child->info.suspended)
1341 continue;
1342
1343 if (!_resume_node(child->name, child->info.major, child->info.minor,
1344 child->props.read_ahead, child->props.read_ahead_flags,
1345 &newinfo, &child->dtree->cookie, child->udev_flags)) {
1346 log_error("Unable to resume %s (%" PRIu32
1347 ":%" PRIu32 ")", child->name, child->info.major,
1348 child->info.minor);
1349 r = 0;
1350 continue;
1351 }
1352
1353 /* Update cached info */
1354 child->info = newinfo;
1355 }
1356 }
1357
1358 handle = NULL;
1359
1360 return r;
1361 }
1362
1363 static int _create_node(struct dm_tree_node *dnode)
1364 {
1365 int r = 0;
1366 struct dm_task *dmt;
1367
1368 log_verbose("Creating %s", dnode->name);
1369
1370 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1371 log_error("Create dm_task creation failed for %s", dnode->name);
1372 return 0;
1373 }
1374
1375 if (!dm_task_set_name(dmt, dnode->name)) {
1376 log_error("Failed to set device name for %s", dnode->name);
1377 goto out;
1378 }
1379
1380 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1381 log_error("Failed to set uuid for %s", dnode->name);
1382 goto out;
1383 }
1384
1385 if (dnode->props.major &&
1386 (!dm_task_set_major(dmt, dnode->props.major) ||
1387 !dm_task_set_minor(dmt, dnode->props.minor))) {
1388 log_error("Failed to set device number for %s creation.", dnode->name);
1389 goto out;
1390 }
1391
1392 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1393 log_error("Failed to set read only flag for %s", dnode->name);
1394 goto out;
1395 }
1396
1397 if (!dm_task_no_open_count(dmt))
1398 log_error("Failed to disable open_count");
1399
1400 if ((r = dm_task_run(dmt)))
1401 r = dm_task_get_info(dmt, &dnode->info);
1402
1403 out:
1404 dm_task_destroy(dmt);
1405
1406 return r;
1407 }
1408
1409
1410 static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
1411 {
1412 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
1413 log_error("Failed to format %s device number for %s as dm "
1414 "target (%u,%u)",
1415 node->name, node->uuid, node->info.major, node->info.minor);
1416 return 0;
1417 }
1418
1419 return 1;
1420 }
1421
1422 /* simplify string emiting code */
1423 #define EMIT_PARAMS(p, str...)\
1424 do {\
1425 int w;\
1426 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1427 stack; /* Out of space */\
1428 return -1;\
1429 }\
1430 p += w;\
1431 } while (0)
1432
1433 /*
1434 * _emit_areas_line
1435 *
1436 * Returns: 1 on success, 0 on failure
1437 */
1438 static int _emit_areas_line(struct dm_task *dmt __attribute((unused)),
1439 struct load_segment *seg, char *params,
1440 size_t paramsize, int *pos)
1441 {
1442 struct seg_area *area;
1443 char devbuf[DM_FORMAT_DEV_BUFSIZE];
1444 unsigned first_time = 1;
1445 const char *logtype, *synctype;
1446 unsigned log_parm_count;
1447
1448 dm_list_iterate_items(area, &seg->areas) {
1449 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1450 return_0;
1451
1452 switch (seg->type) {
1453 case SEG_REPLICATOR_DEV:
1454 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1455 if (first_time)
1456 EMIT_PARAMS(*pos, " nolog 0");
1457 else {
1458 /* Remote devices */
1459 log_parm_count = (area->flags &
1460 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1461
1462 if (!area->slog) {
1463 devbuf[0] = 0; /* Only core log parameters */
1464 logtype = "core";
1465 } else {
1466 devbuf[0] = ' '; /* Extra space before device name */
1467 if (!_build_dev_string(devbuf + 1,
1468 sizeof(devbuf) - 1,
1469 area->slog))
1470 return_0;
1471 logtype = "disk";
1472 log_parm_count++; /* Extra sync log device name parameter */
1473 }
1474
1475 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1476 log_parm_count, devbuf, area->region_size);
1477
1478 synctype = (area->flags & DM_NOSYNC) ?
1479 " nosync" : (area->flags & DM_FORCESYNC) ?
1480 " sync" : NULL;
1481
1482 if (synctype)
1483 EMIT_PARAMS(*pos, "%s", synctype);
1484 }
1485 break;
1486 default:
1487 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1488 devbuf, area->offset);
1489 }
1490
1491 first_time = 0;
1492 }
1493
1494 return 1;
1495 }
1496
1497 static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1498 size_t paramsize, int *pos)
1499 {
1500 const struct load_segment *rlog_seg;
1501 struct replicator_site *rsite;
1502 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1503 unsigned parm_count;
1504
1505 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1506 return_0;
1507
1508 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1509 struct load_segment);
1510
1511 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1512 seg->rlog_type, rlogbuf, rlog_seg->size);
1513
1514 dm_list_iterate_items(rsite, &seg->rsites) {
1515 parm_count = (rsite->fall_behind_data
1516 || rsite->fall_behind_ios
1517 || rsite->async_timeout) ? 4 : 2;
1518
1519 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1520 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1521
1522 if (rsite->fall_behind_data)
1523 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1524 else if (rsite->fall_behind_ios)
1525 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1526 else if (rsite->async_timeout)
1527 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1528 }
1529
1530 return 1;
1531 }
1532
1533 /*
1534 * Returns: 1 on success, 0 on failure
1535 */
1536 static int _mirror_emit_segment_line(struct dm_task *dmt, uint32_t major,
1537 uint32_t minor, struct load_segment *seg,
1538 uint64_t *seg_start, char *params,
1539 size_t paramsize)
1540 {
1541 int r;
1542 int block_on_error = 0;
1543 int handle_errors = 0;
1544 int dm_log_userspace = 0;
1545 struct utsname uts;
1546 unsigned log_parm_count;
1547 int pos = 0;
1548 char logbuf[DM_FORMAT_DEV_BUFSIZE];
1549 const char *logtype;
1550 unsigned kmaj, kmin, krel;
1551
1552 if (uname(&uts) == -1 || sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel) != 3) {
1553 log_error("Cannot read kernel release version");
1554 return 0;
1555 }
1556
1557 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1558 /*
1559 * Originally, block_on_error was an argument to the log
1560 * portion of the mirror CTR table. It was renamed to
1561 * "handle_errors" and now resides in the 'features'
1562 * section of the mirror CTR table (i.e. at the end).
1563 *
1564 * We can identify whether to use "block_on_error" or
1565 * "handle_errors" by the dm-mirror module's version
1566 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1567 */
1568 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
1569 handle_errors = 1;
1570 else
1571 block_on_error = 1;
1572 }
1573
1574 if (seg->clustered) {
1575 /* Cluster mirrors require a UUID */
1576 if (!seg->uuid)
1577 return_0;
1578
1579 /*
1580 * Cluster mirrors used to have their own log
1581 * types. Now they are accessed through the
1582 * userspace log type.
1583 *
1584 * The dm-log-userspace module was added to the
1585 * 2.6.31 kernel.
1586 */
1587 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
1588 dm_log_userspace = 1;
1589 }
1590
1591 /* Region size */
1592 log_parm_count = 1;
1593
1594 /* [no]sync, block_on_error etc. */
1595 log_parm_count += hweight32(seg->flags);
1596
1597 /* "handle_errors" is a feature arg now */
1598 if (handle_errors)
1599 log_parm_count--;
1600
1601 /* DM_CORELOG does not count in the param list */
1602 if (seg->flags & DM_CORELOG)
1603 log_parm_count--;
1604
1605 if (seg->clustered) {
1606 log_parm_count++; /* For UUID */
1607
1608 if (!dm_log_userspace)
1609 EMIT_PARAMS(pos, "clustered-");
1610 else
1611 /* For clustered-* type field inserted later */
1612 log_parm_count++;
1613 }
1614
1615 if (!seg->log)
1616 logtype = "core";
1617 else {
1618 logtype = "disk";
1619 log_parm_count++;
1620 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1621 return_0;
1622 }
1623
1624 if (dm_log_userspace)
1625 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1626 log_parm_count, seg->uuid, logtype);
1627 else
1628 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
1629
1630 if (seg->log)
1631 EMIT_PARAMS(pos, " %s", logbuf);
1632
1633 EMIT_PARAMS(pos, " %u", seg->region_size);
1634
1635 if (seg->clustered && !dm_log_userspace)
1636 EMIT_PARAMS(pos, " %s", seg->uuid);
1637
1638 if ((seg->flags & DM_NOSYNC))
1639 EMIT_PARAMS(pos, " nosync");
1640 else if ((seg->flags & DM_FORCESYNC))
1641 EMIT_PARAMS(pos, " sync");
1642
1643 if (block_on_error)
1644 EMIT_PARAMS(pos, " block_on_error");
1645
1646 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1647
1648 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0)
1649 return_0;
1650
1651 if (handle_errors)
1652 EMIT_PARAMS(pos, " 1 handle_errors");
1653
1654 return 1;
1655 }
1656
1657 static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1658 uint32_t minor, struct load_segment *seg,
1659 uint64_t *seg_start, char *params,
1660 size_t paramsize)
1661 {
1662 int pos = 0;
1663 int r;
1664 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
1665
1666 switch(seg->type) {
1667 case SEG_ERROR:
1668 case SEG_ZERO:
1669 case SEG_LINEAR:
1670 break;
1671 case SEG_MIRRORED:
1672 /* Mirrors are pretty complicated - now in separate function */
1673 r = _mirror_emit_segment_line(dmt, major, minor, seg, seg_start,
1674 params, paramsize);
1675 if (!r)
1676 return_0;
1677 break;
1678 case SEG_REPLICATOR:
1679 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1680 &pos)) <= 0) {
1681 stack;
1682 return r;
1683 }
1684 break;
1685 case SEG_REPLICATOR_DEV:
1686 if (!seg->replicator || !_build_dev_string(originbuf,
1687 sizeof(originbuf),
1688 seg->replicator))
1689 return_0;
1690
1691 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1692 break;
1693 case SEG_SNAPSHOT:
1694 case SEG_SNAPSHOT_MERGE:
1695 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1696 return_0;
1697 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1698 return_0;
1699 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1700 seg->persistent ? 'P' : 'N', seg->chunk_size);
1701 break;
1702 case SEG_SNAPSHOT_ORIGIN:
1703 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1704 return_0;
1705 EMIT_PARAMS(pos, "%s", originbuf);
1706 break;
1707 case SEG_STRIPED:
1708 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
1709 break;
1710 case SEG_CRYPT:
1711 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
1712 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1713 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1714 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1715 seg->iv_offset : *seg_start);
1716 break;
1717 }
1718
1719 switch(seg->type) {
1720 case SEG_ERROR:
1721 case SEG_REPLICATOR:
1722 case SEG_SNAPSHOT:
1723 case SEG_SNAPSHOT_ORIGIN:
1724 case SEG_SNAPSHOT_MERGE:
1725 case SEG_ZERO:
1726 break;
1727 case SEG_CRYPT:
1728 case SEG_LINEAR:
1729 case SEG_REPLICATOR_DEV:
1730 case SEG_STRIPED:
1731 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
1732 stack;
1733 return r;
1734 }
1735 break;
1736 }
1737
1738 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
1739 " %" PRIu64 " %s %s", major, minor,
1740 *seg_start, seg->size, dm_segtypes[seg->type].target, params);
1741
1742 if (!dm_task_add_target(dmt, *seg_start, seg->size, dm_segtypes[seg->type].target, params))
1743 return_0;
1744
1745 *seg_start += seg->size;
1746
1747 return 1;
1748 }
1749
1750 #undef EMIT_PARAMS
1751
1752 static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
1753 struct load_segment *seg, uint64_t *seg_start)
1754 {
1755 char *params;
1756 size_t paramsize = 4096;
1757 int ret;
1758
1759 do {
1760 if (!(params = dm_malloc(paramsize))) {
1761 log_error("Insufficient space for target parameters.");
1762 return 0;
1763 }
1764
1765 params[0] = '\0';
1766 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
1767 params, paramsize);
1768 dm_free(params);
1769
1770 if (!ret)
1771 stack;
1772
1773 if (ret >= 0)
1774 return ret;
1775
1776 log_debug("Insufficient space in params[%" PRIsize_t
1777 "] for target parameters.", paramsize);
1778
1779 paramsize *= 2;
1780 } while (paramsize < MAX_TARGET_PARAMSIZE);
1781
1782 log_error("Target parameter size too big. Aborting.");
1783 return 0;
1784 }
1785
1786 static int _load_node(struct dm_tree_node *dnode)
1787 {
1788 int r = 0;
1789 struct dm_task *dmt;
1790 struct load_segment *seg;
1791 uint64_t seg_start = 0;
1792
1793 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
1794 dnode->info.major, dnode->info.minor);
1795
1796 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
1797 log_error("Reload dm_task creation failed for %s", dnode->name);
1798 return 0;
1799 }
1800
1801 if (!dm_task_set_major(dmt, dnode->info.major) ||
1802 !dm_task_set_minor(dmt, dnode->info.minor)) {
1803 log_error("Failed to set device number for %s reload.", dnode->name);
1804 goto out;
1805 }
1806
1807 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1808 log_error("Failed to set read only flag for %s", dnode->name);
1809 goto out;
1810 }
1811
1812 if (!dm_task_no_open_count(dmt))
1813 log_error("Failed to disable open_count");
1814
1815 dm_list_iterate_items(seg, &dnode->props.segs)
1816 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
1817 seg, &seg_start))
1818 goto_out;
1819
1820 if (!dm_task_suppress_identical_reload(dmt))
1821 log_error("Failed to suppress reload of identical tables.");
1822
1823 if ((r = dm_task_run(dmt))) {
1824 r = dm_task_get_info(dmt, &dnode->info);
1825 if (r && !dnode->info.inactive_table)
1826 log_verbose("Suppressed %s identical table reload.",
1827 dnode->name);
1828
1829 if ((dnode->props.size_changed =
1830 (dm_task_get_existing_table_size(dmt) == seg_start) ? 0 : 1))
1831 log_debug("Table size changed from %" PRIu64 " to %"
1832 PRIu64 " for %s",
1833 dm_task_get_existing_table_size(dmt),
1834 seg_start, dnode->name);
1835 }
1836
1837 dnode->props.segment_count = 0;
1838
1839 out:
1840 dm_task_destroy(dmt);
1841
1842 return r;
1843 }
1844
1845 int dm_tree_preload_children(struct dm_tree_node *dnode,
1846 const char *uuid_prefix,
1847 size_t uuid_prefix_len)
1848 {
1849 int r = 1;
1850 void *handle = NULL;
1851 struct dm_tree_node *child;
1852 struct dm_info newinfo;
1853 int update_devs_flag = 0;
1854
1855 /* Preload children first */
1856 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1857 /* Skip existing non-device-mapper devices */
1858 if (!child->info.exists && child->info.major)
1859 continue;
1860
1861 /* Ignore if it doesn't belong to this VG */
1862 if (child->info.exists &&
1863 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
1864 continue;
1865
1866 if (dm_tree_node_num_children(child, 0))
1867 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
1868 return_0;
1869
1870 /* FIXME Cope if name exists with no uuid? */
1871 if (!child->info.exists) {
1872 if (!_create_node(child)) {
1873 stack;
1874 return 0;
1875 }
1876 }
1877
1878 if (!child->info.inactive_table && child->props.segment_count) {
1879 if (!_load_node(child)) {
1880 stack;
1881 return 0;
1882 }
1883 }
1884
1885 /* Propagate device size change change */
1886 if (child->props.size_changed)
1887 dnode->props.size_changed = 1;
1888
1889 /* Resume device immediately if it has parents and its size changed */
1890 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
1891 continue;
1892
1893 if (!child->info.inactive_table && !child->info.suspended)
1894 continue;
1895
1896 if (!_resume_node(child->name, child->info.major, child->info.minor,
1897 child->props.read_ahead, child->props.read_ahead_flags,
1898 &newinfo, &child->dtree->cookie, child->udev_flags)) {
1899 log_error("Unable to resume %s (%" PRIu32
1900 ":%" PRIu32 ")", child->name, child->info.major,
1901 child->info.minor);
1902 r = 0;
1903 continue;
1904 }
1905
1906 /* Update cached info */
1907 child->info = newinfo;
1908
1909 /*
1910 * Prepare for immediate synchronization with udev and flush all stacked
1911 * dev node operations if requested by immediate_dev_node property. But
1912 * finish processing current level in the tree first.
1913 */
1914 if (child->props.immediate_dev_node)
1915 update_devs_flag = 1;
1916
1917 }
1918
1919 handle = NULL;
1920
1921 if (update_devs_flag) {
1922 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
1923 stack;
1924 dm_tree_set_cookie(dnode, 0);
1925 dm_task_update_nodes();
1926 }
1927
1928 return r;
1929 }
1930
1931 /*
1932 * Returns 1 if unsure.
1933 */
1934 int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
1935 const char *uuid_prefix,
1936 size_t uuid_prefix_len)
1937 {
1938 void *handle = NULL;
1939 struct dm_tree_node *child = dnode;
1940 const char *uuid;
1941
1942 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1943 if (!(uuid = dm_tree_node_get_uuid(child))) {
1944 log_error("Failed to get uuid for dtree node.");
1945 return 1;
1946 }
1947
1948 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1949 return 1;
1950
1951 if (dm_tree_node_num_children(child, 0))
1952 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
1953 }
1954
1955 return 0;
1956 }
1957
1958 /*
1959 * Target functions
1960 */
1961 static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
1962 {
1963 struct load_segment *seg;
1964
1965 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
1966 log_error("dtree node segment allocation failed");
1967 return NULL;
1968 }
1969
1970 seg->type = type;
1971 seg->size = size;
1972 seg->area_count = 0;
1973 dm_list_init(&seg->areas);
1974 seg->stripe_size = 0;
1975 seg->persistent = 0;
1976 seg->chunk_size = 0;
1977 seg->cow = NULL;
1978 seg->origin = NULL;
1979 seg->merge = NULL;
1980
1981 dm_list_add(&dnode->props.segs, &seg->list);
1982 dnode->props.segment_count++;
1983
1984 return seg;
1985 }
1986
1987 int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
1988 uint64_t size,
1989 const char *origin_uuid)
1990 {
1991 struct load_segment *seg;
1992 struct dm_tree_node *origin_node;
1993
1994 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
1995 return_0;
1996
1997 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
1998 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
1999 return 0;
2000 }
2001
2002 seg->origin = origin_node;
2003 if (!_link_tree_nodes(dnode, origin_node))
2004 return_0;
2005
2006 /* Resume snapshot origins after new snapshots */
2007 dnode->activation_priority = 1;
2008
2009 return 1;
2010 }
2011
2012 static int _add_snapshot_target(struct dm_tree_node *node,
2013 uint64_t size,
2014 const char *origin_uuid,
2015 const char *cow_uuid,
2016 const char *merge_uuid,
2017 int persistent,
2018 uint32_t chunk_size)
2019 {
2020 struct load_segment *seg;
2021 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2022 unsigned seg_type;
2023
2024 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
2025
2026 if (!(seg = _add_segment(node, seg_type, size)))
2027 return_0;
2028
2029 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
2030 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2031 return 0;
2032 }
2033
2034 seg->origin = origin_node;
2035 if (!_link_tree_nodes(node, origin_node))
2036 return_0;
2037
2038 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
2039 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
2040 return 0;
2041 }
2042
2043 seg->cow = cow_node;
2044 if (!_link_tree_nodes(node, cow_node))
2045 return_0;
2046
2047 seg->persistent = persistent ? 1 : 0;
2048 seg->chunk_size = chunk_size;
2049
2050 if (merge_uuid) {
2051 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2052 /* not a pure error, merging snapshot may have been deactivated */
2053 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2054 } else {
2055 seg->merge = merge_node;
2056 /* must not link merging snapshot, would undermine activation_priority below */
2057 }
2058
2059 /* Resume snapshot-merge (acting origin) after other snapshots */
2060 node->activation_priority = 1;
2061 if (seg->merge) {
2062 /* Resume merging snapshot after snapshot-merge */
2063 seg->merge->activation_priority = 2;
2064 }
2065 }
2066
2067 return 1;
2068 }
2069
2070
2071 int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2072 uint64_t size,
2073 const char *origin_uuid,
2074 const char *cow_uuid,
2075 int persistent,
2076 uint32_t chunk_size)
2077 {
2078 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2079 NULL, persistent, chunk_size);
2080 }
2081
2082 int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2083 uint64_t size,
2084 const char *origin_uuid,
2085 const char *cow_uuid,
2086 const char *merge_uuid,
2087 uint32_t chunk_size)
2088 {
2089 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2090 merge_uuid, 1, chunk_size);
2091 }
2092
2093 int dm_tree_node_add_error_target(struct dm_tree_node *node,
2094 uint64_t size)
2095 {
2096 if (!_add_segment(node, SEG_ERROR, size))
2097 return_0;
2098
2099 return 1;
2100 }
2101
2102 int dm_tree_node_add_zero_target(struct dm_tree_node *node,
2103 uint64_t size)
2104 {
2105 if (!_add_segment(node, SEG_ZERO, size))
2106 return_0;
2107
2108 return 1;
2109 }
2110
2111 int dm_tree_node_add_linear_target(struct dm_tree_node *node,
2112 uint64_t size)
2113 {
2114 if (!_add_segment(node, SEG_LINEAR, size))
2115 return_0;
2116
2117 return 1;
2118 }
2119
2120 int dm_tree_node_add_striped_target(struct dm_tree_node *node,
2121 uint64_t size,
2122 uint32_t stripe_size)
2123 {
2124 struct load_segment *seg;
2125
2126 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2127 return_0;
2128
2129 seg->stripe_size = stripe_size;
2130
2131 return 1;
2132 }
2133
2134 int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2135 uint64_t size,
2136 const char *cipher,
2137 const char *chainmode,
2138 const char *iv,
2139 uint64_t iv_offset,
2140 const char *key)
2141 {
2142 struct load_segment *seg;
2143
2144 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2145 return_0;
2146
2147 seg->cipher = cipher;
2148 seg->chainmode = chainmode;
2149 seg->iv = iv;
2150 seg->iv_offset = iv_offset;
2151 seg->key = key;
2152
2153 return 1;
2154 }
2155
2156 int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
2157 uint32_t region_size,
2158 unsigned clustered,
2159 const char *log_uuid,
2160 unsigned area_count,
2161 uint32_t flags)
2162 {
2163 struct dm_tree_node *log_node = NULL;
2164 struct load_segment *seg;
2165
2166 if (!node->props.segment_count) {
2167 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2168 return 0;
2169 }
2170
2171 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2172
2173 if (log_uuid) {
2174 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2175 log_error("log uuid pool_strdup failed");
2176 return 0;
2177 }
2178 if (!(flags & DM_CORELOG)) {
2179 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2180 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2181 return 0;
2182 }
2183
2184 if (clustered)
2185 log_node->props.immediate_dev_node = 1;
2186
2187 if (!_link_tree_nodes(node, log_node))
2188 return_0;
2189 }
2190 }
2191
2192 seg->log = log_node;
2193 seg->region_size = region_size;
2194 seg->clustered = clustered;
2195 seg->mirror_area_count = area_count;
2196 seg->flags = flags;
2197
2198 return 1;
2199 }
2200
2201 int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
2202 uint64_t size)
2203 {
2204 struct load_segment *seg;
2205
2206 if (!(seg = _add_segment(node, SEG_MIRRORED, size)))
2207 return_0;
2208
2209 return 1;
2210 }
2211
2212 int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2213 uint64_t size,
2214 const char *rlog_uuid,
2215 const char *rlog_type,
2216 unsigned rsite_index,
2217 dm_replicator_mode_t mode,
2218 uint32_t async_timeout,
2219 uint64_t fall_behind_data,
2220 uint32_t fall_behind_ios)
2221 {
2222 struct load_segment *rseg;
2223 struct replicator_site *rsite;
2224
2225 /* Local site0 - adds replicator segment and links rlog device */
2226 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2227 if (node->props.segment_count) {
2228 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2229 return 0;
2230 }
2231
2232 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2233 return_0;
2234
2235 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2236 log_error("Missing replicator log uuid %s.", rlog_uuid);
2237 return 0;
2238 }
2239
2240 if (!_link_tree_nodes(node, rseg->log))
2241 return_0;
2242
2243 if (strcmp(rlog_type, "ringbuffer") != 0) {
2244 log_error("Unsupported replicator log type %s.", rlog_type);
2245 return 0;
2246 }
2247
2248 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2249 return_0;
2250
2251 dm_list_init(&rseg->rsites);
2252 rseg->rdevice_count = 0;
2253 node->activation_priority = 1;
2254 }
2255
2256 /* Add site to segment */
2257 if (mode == DM_REPLICATOR_SYNC
2258 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2259 log_error("Async parameters passed for synchronnous replicator.");
2260 return 0;
2261 }
2262
2263 if (node->props.segment_count != 1) {
2264 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2265 return 0;
2266 }
2267
2268 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2269 if (rseg->type != SEG_REPLICATOR) {
2270 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2271 dm_segtypes[rseg->type].target);
2272 return 0;
2273 }
2274
2275 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2276 log_error("Failed to allocate remote site segment.");
2277 return 0;
2278 }
2279
2280 dm_list_add(&rseg->rsites, &rsite->list);
2281 rseg->rsite_count++;
2282
2283 rsite->mode = mode;
2284 rsite->async_timeout = async_timeout;
2285 rsite->fall_behind_data = fall_behind_data;
2286 rsite->fall_behind_ios = fall_behind_ios;
2287 rsite->rsite_index = rsite_index;
2288
2289 return 1;
2290 }
2291
2292 /* Appends device node to Replicator */
2293 int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2294 uint64_t size,
2295 const char *replicator_uuid,
2296 uint64_t rdevice_index,
2297 const char *rdev_uuid,
2298 unsigned rsite_index,
2299 const char *slog_uuid,
2300 uint32_t slog_flags,
2301 uint32_t slog_region_size)
2302 {
2303 struct seg_area *area;
2304 struct load_segment *rseg;
2305 struct load_segment *rep_seg;
2306
2307 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2308 /* Site index for local target */
2309 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2310 return_0;
2311
2312 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2313 log_error("Missing replicator uuid %s.", replicator_uuid);
2314 return 0;
2315 }
2316
2317 /* Local slink0 for replicator must be always initialized first */
2318 if (rseg->replicator->props.segment_count != 1) {
2319 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2320 return 0;
2321 }
2322
2323 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2324 if (rep_seg->type != SEG_REPLICATOR) {
2325 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2326 dm_segtypes[rep_seg->type].target);
2327 return 0;
2328 }
2329 rep_seg->rdevice_count++;
2330
2331 if (!_link_tree_nodes(node, rseg->replicator))
2332 return_0;
2333
2334 rseg->rdevice_index = rdevice_index;
2335 } else {
2336 /* Local slink0 for replicator must be always initialized first */
2337 if (node->props.segment_count != 1) {
2338 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2339 return 0;
2340 }
2341
2342 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2343 if (rseg->type != SEG_REPLICATOR_DEV) {
2344 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2345 dm_segtypes[rseg->type].target);
2346 return 0;
2347 }
2348 }
2349
2350 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2351 log_error("Unspecified sync log uuid.");
2352 return 0;
2353 }
2354
2355 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2356 return_0;
2357
2358 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2359
2360 if (!(slog_flags & DM_CORELOG)) {
2361 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2362 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2363 return 0;
2364 }
2365
2366 if (!_link_tree_nodes(node, area->slog))
2367 return_0;
2368 }
2369
2370 area->flags = slog_flags;
2371 area->region_size = slog_region_size;
2372 area->rsite_index = rsite_index;
2373
2374 return 1;
2375 }
2376
2377 static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
2378 {
2379 struct seg_area *area;
2380
2381 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
2382 log_error("Failed to allocate target segment area.");
2383 return 0;
2384 }
2385
2386 area->dev_node = dev_node;
2387 area->offset = offset;
2388
2389 dm_list_add(&seg->areas, &area->list);
2390 seg->area_count++;
2391
2392 return 1;
2393 }
2394
2395 int dm_tree_node_add_target_area(struct dm_tree_node *node,
2396 const char *dev_name,
2397 const char *uuid,
2398 uint64_t offset)
2399 {
2400 struct load_segment *seg;
2401 struct stat info;
2402 struct dm_tree_node *dev_node;
2403
2404 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
2405 log_error("dm_tree_node_add_target_area called without device");
2406 return 0;
2407 }
2408
2409 if (uuid) {
2410 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
2411 log_error("Couldn't find area uuid %s.", uuid);
2412 return 0;
2413 }
2414 if (!_link_tree_nodes(node, dev_node))
2415 return_0;
2416 } else {
2417 if (stat(dev_name, &info) < 0) {
2418 log_error("Device %s not found.", dev_name);
2419 return 0;
2420 }
2421
2422 if (!S_ISBLK(info.st_mode)) {
2423 log_error("Device %s is not a block device.", dev_name);
2424 return 0;
2425 }
2426
2427 /* FIXME Check correct macro use */
2428 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2429 MINOR(info.st_rdev), 0)))
2430 return_0;
2431 }
2432
2433 if (!node->props.segment_count) {
2434 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2435 return 0;
2436 }
2437
2438 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2439
2440 if (!_add_area(node, seg, dev_node, offset))
2441 return_0;
2442
2443 return 1;
2444 }
2445
2446 void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2447 {
2448 node->dtree->cookie = cookie;
2449 }
2450
2451 uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2452 {
2453 return node->dtree->cookie;
2454 }
This page took 0.130082 seconds and 6 git commands to generate.