]> sourceware.org Git - lvm2.git/blob - libdm/libdm-deptree.c
Add debug message for open_count failure
[lvm2.git] / libdm / libdm-deptree.c
1 /*
2 * Copyright (C) 2005-2010 Red Hat, Inc. All rights reserved.
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15 #include "dmlib.h"
16 #include "libdm-targets.h"
17 #include "libdm-common.h"
18 #include "kdev_t.h"
19 #include "dm-ioctl.h"
20
21 #include <stdarg.h>
22 #include <sys/param.h>
23 #include <sys/utsname.h>
24
25 #define MAX_TARGET_PARAMSIZE 500000
26
27 /* FIXME Fix interface so this is used only by LVM */
28 #define UUID_PREFIX "LVM-"
29
30 #define REPLICATOR_LOCAL_SITE 0
31
32 /* Supported segment types */
33 enum {
34 SEG_CRYPT,
35 SEG_ERROR,
36 SEG_LINEAR,
37 SEG_MIRRORED,
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
42 SEG_SNAPSHOT_MERGE,
43 SEG_STRIPED,
44 SEG_ZERO,
45 };
46
47 /* FIXME Add crypt and multipath support */
48
49 struct {
50 unsigned type;
51 const char *target;
52 } dm_segtypes[] = {
53 { SEG_CRYPT, "crypt" },
54 { SEG_ERROR, "error" },
55 { SEG_LINEAR, "linear" },
56 { SEG_MIRRORED, "mirror" },
57 { SEG_REPLICATOR, "replicator" },
58 { SEG_REPLICATOR_DEV, "replicator-dev" },
59 { SEG_SNAPSHOT, "snapshot" },
60 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
61 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
62 { SEG_STRIPED, "striped" },
63 { SEG_ZERO, "zero"},
64 };
65
66 /* Some segment types have a list of areas of other devices attached */
67 struct seg_area {
68 struct dm_list list;
69
70 struct dm_tree_node *dev_node;
71
72 uint64_t offset;
73
74 unsigned rsite_index; /* Replicator site index */
75 struct dm_tree_node *slog; /* Replicator sync log node */
76 uint64_t region_size; /* Replicator sync log size */
77 uint32_t flags; /* Replicator sync log flags */
78 };
79
80 /* Replicator-log has a list of sites */
81 /* FIXME: maybe move to seg_area too? */
82 struct replicator_site {
83 struct dm_list list;
84
85 unsigned rsite_index;
86 dm_replicator_mode_t mode;
87 uint32_t async_timeout;
88 uint32_t fall_behind_ios;
89 uint64_t fall_behind_data;
90 };
91
92 /* Per-segment properties */
93 struct load_segment {
94 struct dm_list list;
95
96 unsigned type;
97
98 uint64_t size;
99
100 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
101 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
102
103 uint32_t stripe_size; /* Striped */
104
105 int persistent; /* Snapshot */
106 uint32_t chunk_size; /* Snapshot */
107 struct dm_tree_node *cow; /* Snapshot */
108 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
109 struct dm_tree_node *merge; /* Snapshot */
110
111 struct dm_tree_node *log; /* Mirror + Replicator */
112 uint32_t region_size; /* Mirror */
113 unsigned clustered; /* Mirror */
114 unsigned mirror_area_count; /* Mirror */
115 uint32_t flags; /* Mirror log */
116 char *uuid; /* Clustered mirror log */
117
118 const char *cipher; /* Crypt */
119 const char *chainmode; /* Crypt */
120 const char *iv; /* Crypt */
121 uint64_t iv_offset; /* Crypt */
122 const char *key; /* Crypt */
123
124 const char *rlog_type; /* Replicator */
125 struct dm_list rsites; /* Replicator */
126 unsigned rsite_count; /* Replicator */
127 unsigned rdevice_count; /* Replicator */
128 struct dm_tree_node *replicator;/* Replicator-dev */
129 uint64_t rdevice_index; /* Replicator-dev */
130 };
131
132 /* Per-device properties */
133 struct load_properties {
134 int read_only;
135 uint32_t major;
136 uint32_t minor;
137
138 uint32_t read_ahead;
139 uint32_t read_ahead_flags;
140
141 unsigned segment_count;
142 unsigned size_changed;
143 struct dm_list segs;
144
145 const char *new_name;
146
147 /* If immediate_dev_node is set to 1, try to create the dev node
148 * as soon as possible (e.g. in preload stage even during traversal
149 * and processing of dm tree). This will also flush all stacked dev
150 * node operations, synchronizing with udev.
151 */
152 int immediate_dev_node;
153 };
154
155 /* Two of these used to join two nodes with uses and used_by. */
156 struct dm_tree_link {
157 struct dm_list list;
158 struct dm_tree_node *node;
159 };
160
161 struct dm_tree_node {
162 struct dm_tree *dtree;
163
164 const char *name;
165 const char *uuid;
166 struct dm_info info;
167
168 struct dm_list uses; /* Nodes this node uses */
169 struct dm_list used_by; /* Nodes that use this node */
170
171 int activation_priority; /* 0 gets activated first */
172
173 uint16_t udev_flags; /* Udev control flags */
174
175 void *context; /* External supplied context */
176
177 struct load_properties props; /* For creation/table (re)load */
178
179 /*
180 * If presuspend of child node is needed
181 * Note: only direct child is allowed
182 */
183 struct dm_tree_node *presuspend_node;
184 };
185
186 struct dm_tree {
187 struct dm_pool *mem;
188 struct dm_hash_table *devs;
189 struct dm_hash_table *uuids;
190 struct dm_tree_node root;
191 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
192 int no_flush; /* 1 sets noflush (mirrors/multipath) */
193 uint32_t cookie;
194 };
195
196 struct dm_tree *dm_tree_create(void)
197 {
198 struct dm_tree *dtree;
199
200 if (!(dtree = dm_zalloc(sizeof(*dtree)))) {
201 log_error("dm_tree_create malloc failed");
202 return NULL;
203 }
204
205 dtree->root.dtree = dtree;
206 dm_list_init(&dtree->root.uses);
207 dm_list_init(&dtree->root.used_by);
208 dtree->skip_lockfs = 0;
209 dtree->no_flush = 0;
210
211 if (!(dtree->mem = dm_pool_create("dtree", 1024))) {
212 log_error("dtree pool creation failed");
213 dm_free(dtree);
214 return NULL;
215 }
216
217 if (!(dtree->devs = dm_hash_create(8))) {
218 log_error("dtree hash creation failed");
219 dm_pool_destroy(dtree->mem);
220 dm_free(dtree);
221 return NULL;
222 }
223
224 if (!(dtree->uuids = dm_hash_create(32))) {
225 log_error("dtree uuid hash creation failed");
226 dm_hash_destroy(dtree->devs);
227 dm_pool_destroy(dtree->mem);
228 dm_free(dtree);
229 return NULL;
230 }
231
232 return dtree;
233 }
234
235 void dm_tree_free(struct dm_tree *dtree)
236 {
237 if (!dtree)
238 return;
239
240 dm_hash_destroy(dtree->uuids);
241 dm_hash_destroy(dtree->devs);
242 dm_pool_destroy(dtree->mem);
243 dm_free(dtree);
244 }
245
246 static int _nodes_are_linked(const struct dm_tree_node *parent,
247 const struct dm_tree_node *child)
248 {
249 struct dm_tree_link *dlink;
250
251 dm_list_iterate_items(dlink, &parent->uses)
252 if (dlink->node == child)
253 return 1;
254
255 return 0;
256 }
257
258 static int _link(struct dm_list *list, struct dm_tree_node *node)
259 {
260 struct dm_tree_link *dlink;
261
262 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
263 log_error("dtree link allocation failed");
264 return 0;
265 }
266
267 dlink->node = node;
268 dm_list_add(list, &dlink->list);
269
270 return 1;
271 }
272
273 static int _link_nodes(struct dm_tree_node *parent,
274 struct dm_tree_node *child)
275 {
276 if (_nodes_are_linked(parent, child))
277 return 1;
278
279 if (!_link(&parent->uses, child))
280 return 0;
281
282 if (!_link(&child->used_by, parent))
283 return 0;
284
285 return 1;
286 }
287
288 static void _unlink(struct dm_list *list, struct dm_tree_node *node)
289 {
290 struct dm_tree_link *dlink;
291
292 dm_list_iterate_items(dlink, list)
293 if (dlink->node == node) {
294 dm_list_del(&dlink->list);
295 break;
296 }
297 }
298
299 static void _unlink_nodes(struct dm_tree_node *parent,
300 struct dm_tree_node *child)
301 {
302 if (!_nodes_are_linked(parent, child))
303 return;
304
305 _unlink(&parent->uses, child);
306 _unlink(&child->used_by, parent);
307 }
308
309 static int _add_to_toplevel(struct dm_tree_node *node)
310 {
311 return _link_nodes(&node->dtree->root, node);
312 }
313
314 static void _remove_from_toplevel(struct dm_tree_node *node)
315 {
316 _unlink_nodes(&node->dtree->root, node);
317 }
318
319 static int _add_to_bottomlevel(struct dm_tree_node *node)
320 {
321 return _link_nodes(node, &node->dtree->root);
322 }
323
324 static void _remove_from_bottomlevel(struct dm_tree_node *node)
325 {
326 _unlink_nodes(node, &node->dtree->root);
327 }
328
329 static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
330 {
331 /* Don't link to root node if child already has a parent */
332 if ((parent == &parent->dtree->root)) {
333 if (dm_tree_node_num_children(child, 1))
334 return 1;
335 } else
336 _remove_from_toplevel(child);
337
338 if ((child == &child->dtree->root)) {
339 if (dm_tree_node_num_children(parent, 0))
340 return 1;
341 } else
342 _remove_from_bottomlevel(parent);
343
344 return _link_nodes(parent, child);
345 }
346
347 static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
348 const char *name,
349 const char *uuid,
350 struct dm_info *info,
351 void *context,
352 uint16_t udev_flags)
353 {
354 struct dm_tree_node *node;
355 uint64_t dev;
356
357 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
358 log_error("_create_dm_tree_node alloc failed");
359 return NULL;
360 }
361
362 node->dtree = dtree;
363
364 node->name = name;
365 node->uuid = uuid;
366 node->info = *info;
367 node->context = context;
368 node->udev_flags = udev_flags;
369 node->activation_priority = 0;
370
371 dm_list_init(&node->uses);
372 dm_list_init(&node->used_by);
373 dm_list_init(&node->props.segs);
374
375 dev = MKDEV(info->major, info->minor);
376
377 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
378 sizeof(dev), node)) {
379 log_error("dtree node hash insertion failed");
380 dm_pool_free(dtree->mem, node);
381 return NULL;
382 }
383
384 if (uuid && *uuid &&
385 !dm_hash_insert(dtree->uuids, uuid, node)) {
386 log_error("dtree uuid hash insertion failed");
387 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
388 sizeof(dev));
389 dm_pool_free(dtree->mem, node);
390 return NULL;
391 }
392
393 return node;
394 }
395
396 static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
397 uint32_t major, uint32_t minor)
398 {
399 uint64_t dev = MKDEV(major, minor);
400
401 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
402 sizeof(dev));
403 }
404
405 static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
406 const char *uuid)
407 {
408 struct dm_tree_node *node;
409
410 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
411 return node;
412
413 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
414 return NULL;
415
416 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
417 }
418
419 static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
420 const char **name, const char **uuid,
421 struct dm_info *info, struct dm_deps **deps)
422 {
423 memset(info, 0, sizeof(*info));
424
425 if (!dm_is_dm_major(major)) {
426 *name = "";
427 *uuid = "";
428 *deps = NULL;
429 info->major = major;
430 info->minor = minor;
431 info->exists = 0;
432 info->live_table = 0;
433 info->inactive_table = 0;
434 info->read_only = 0;
435 return 1;
436 }
437
438 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
439 log_error("deps dm_task creation failed");
440 return 0;
441 }
442
443 if (!dm_task_set_major(*dmt, major)) {
444 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
445 major, minor);
446 goto failed;
447 }
448
449 if (!dm_task_set_minor(*dmt, minor)) {
450 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
451 major, minor);
452 goto failed;
453 }
454
455 if (!dm_task_run(*dmt)) {
456 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
457 major, minor);
458 goto failed;
459 }
460
461 if (!dm_task_get_info(*dmt, info)) {
462 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
463 major, minor);
464 goto failed;
465 }
466
467 if (!info->exists) {
468 *name = "";
469 *uuid = "";
470 *deps = NULL;
471 } else {
472 if (info->major != major) {
473 log_error("Inconsistent dtree major number: %u != %u",
474 major, info->major);
475 goto failed;
476 }
477 if (info->minor != minor) {
478 log_error("Inconsistent dtree minor number: %u != %u",
479 minor, info->minor);
480 goto failed;
481 }
482 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
483 log_error("name pool_strdup failed");
484 goto failed;
485 }
486 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
487 log_error("uuid pool_strdup failed");
488 goto failed;
489 }
490 *deps = dm_task_get_deps(*dmt);
491 }
492
493 return 1;
494
495 failed:
496 dm_task_destroy(*dmt);
497 return 0;
498 }
499
500 static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
501 struct dm_tree_node *parent,
502 uint32_t major, uint32_t minor,
503 uint16_t udev_flags)
504 {
505 struct dm_task *dmt = NULL;
506 struct dm_info info;
507 struct dm_deps *deps = NULL;
508 const char *name = NULL;
509 const char *uuid = NULL;
510 struct dm_tree_node *node = NULL;
511 uint32_t i;
512 int new = 0;
513
514 /* Already in tree? */
515 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
516 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
517 return_NULL;
518
519 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
520 NULL, udev_flags)))
521 goto_out;
522 new = 1;
523 }
524
525 if (!_link_tree_nodes(parent, node)) {
526 node = NULL;
527 goto_out;
528 }
529
530 /* If node was already in tree, no need to recurse. */
531 if (!new)
532 goto out;
533
534 /* Can't recurse if not a mapped device or there are no dependencies */
535 if (!node->info.exists || !deps->count) {
536 if (!_add_to_bottomlevel(node)) {
537 stack;
538 node = NULL;
539 }
540 goto out;
541 }
542
543 /* Add dependencies to tree */
544 for (i = 0; i < deps->count; i++)
545 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
546 MINOR(deps->device[i]), udev_flags)) {
547 node = NULL;
548 goto_out;
549 }
550
551 out:
552 if (dmt)
553 dm_task_destroy(dmt);
554
555 return node;
556 }
557
558 static int _node_clear_table(struct dm_tree_node *dnode)
559 {
560 struct dm_task *dmt;
561 struct dm_info *info;
562 const char *name;
563 int r;
564
565 if (!(info = &dnode->info)) {
566 log_error("_node_clear_table failed: missing info");
567 return 0;
568 }
569
570 if (!(name = dm_tree_node_get_name(dnode))) {
571 log_error("_node_clear_table failed: missing name");
572 return 0;
573 }
574
575 /* Is there a table? */
576 if (!info->exists || !info->inactive_table)
577 return 1;
578
579 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
580 name, info->major, info->minor);
581
582 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
583 log_error("Table clear dm_task creation failed for %s", name);
584 return 0;
585 }
586
587 if (!dm_task_set_major(dmt, info->major) ||
588 !dm_task_set_minor(dmt, info->minor)) {
589 log_error("Failed to set device number for %s table clear", name);
590 dm_task_destroy(dmt);
591 return 0;
592 }
593
594 r = dm_task_run(dmt);
595
596 if (!dm_task_get_info(dmt, info)) {
597 log_error("_node_clear_table failed: info missing after running task for %s", name);
598 r = 0;
599 }
600
601 dm_task_destroy(dmt);
602
603 return r;
604 }
605
606 struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
607 const char *name,
608 const char *uuid,
609 uint32_t major, uint32_t minor,
610 int read_only,
611 int clear_inactive,
612 void *context)
613 {
614 struct dm_tree_node *dnode;
615 struct dm_info info;
616 const char *name2;
617 const char *uuid2;
618
619 /* Do we need to add node to tree? */
620 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
621 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
622 log_error("name pool_strdup failed");
623 return NULL;
624 }
625 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
626 log_error("uuid pool_strdup failed");
627 return NULL;
628 }
629
630 info.major = 0;
631 info.minor = 0;
632 info.exists = 0;
633 info.live_table = 0;
634 info.inactive_table = 0;
635 info.read_only = 0;
636
637 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
638 context, 0)))
639 return_NULL;
640
641 /* Attach to root node until a table is supplied */
642 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
643 return_NULL;
644
645 dnode->props.major = major;
646 dnode->props.minor = minor;
647 dnode->props.new_name = NULL;
648 dnode->props.size_changed = 0;
649 } else if (strcmp(name, dnode->name)) {
650 /* Do we need to rename node? */
651 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
652 log_error("name pool_strdup failed");
653 return 0;
654 }
655 }
656
657 dnode->props.read_only = read_only ? 1 : 0;
658 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
659 dnode->props.read_ahead_flags = 0;
660
661 if (clear_inactive && !_node_clear_table(dnode))
662 return_NULL;
663
664 dnode->context = context;
665 dnode->udev_flags = 0;
666
667 return dnode;
668 }
669
670 struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
671 const char *name,
672 const char *uuid,
673 uint32_t major,
674 uint32_t minor,
675 int read_only,
676 int clear_inactive,
677 void *context,
678 uint16_t udev_flags)
679 {
680 struct dm_tree_node *node;
681
682 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
683 clear_inactive, context)))
684 node->udev_flags = udev_flags;
685
686 return node;
687 }
688
689
690 void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
691 uint32_t read_ahead,
692 uint32_t read_ahead_flags)
693 {
694 dnode->props.read_ahead = read_ahead;
695 dnode->props.read_ahead_flags = read_ahead_flags;
696 }
697
698 void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
699 struct dm_tree_node *presuspend_node)
700 {
701 node->presuspend_node = presuspend_node;
702 }
703
704 int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
705 {
706 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
707 }
708
709 int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
710 uint32_t minor, uint16_t udev_flags)
711 {
712 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
713 }
714
715 const char *dm_tree_node_get_name(const struct dm_tree_node *node)
716 {
717 return node->info.exists ? node->name : "";
718 }
719
720 const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
721 {
722 return node->info.exists ? node->uuid : "";
723 }
724
725 const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
726 {
727 return &node->info;
728 }
729
730 void *dm_tree_node_get_context(const struct dm_tree_node *node)
731 {
732 return node->context;
733 }
734
735 int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
736 {
737 return dnode->props.size_changed;
738 }
739
740 int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
741 {
742 if (inverted) {
743 if (_nodes_are_linked(&node->dtree->root, node))
744 return 0;
745 return dm_list_size(&node->used_by);
746 }
747
748 if (_nodes_are_linked(node, &node->dtree->root))
749 return 0;
750
751 return dm_list_size(&node->uses);
752 }
753
754 /*
755 * Returns 1 if no prefix supplied
756 */
757 static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
758 {
759 if (!uuid_prefix)
760 return 1;
761
762 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
763 return 1;
764
765 /* Handle transition: active device uuids might be missing the prefix */
766 if (uuid_prefix_len <= 4)
767 return 0;
768
769 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
770 return 0;
771
772 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
773 return 0;
774
775 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
776 return 1;
777
778 return 0;
779 }
780
781 /*
782 * Returns 1 if no children.
783 */
784 static int _children_suspended(struct dm_tree_node *node,
785 uint32_t inverted,
786 const char *uuid_prefix,
787 size_t uuid_prefix_len)
788 {
789 struct dm_list *list;
790 struct dm_tree_link *dlink;
791 const struct dm_info *dinfo;
792 const char *uuid;
793
794 if (inverted) {
795 if (_nodes_are_linked(&node->dtree->root, node))
796 return 1;
797 list = &node->used_by;
798 } else {
799 if (_nodes_are_linked(node, &node->dtree->root))
800 return 1;
801 list = &node->uses;
802 }
803
804 dm_list_iterate_items(dlink, list) {
805 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
806 stack;
807 continue;
808 }
809
810 /* Ignore if it doesn't belong to this VG */
811 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
812 continue;
813
814 /* Ignore if parent node wants to presuspend this node */
815 if (dlink->node->presuspend_node == node)
816 continue;
817
818 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
819 stack; /* FIXME Is this normal? */
820 return 0;
821 }
822
823 if (!dinfo->suspended)
824 return 0;
825 }
826
827 return 1;
828 }
829
830 /*
831 * Set major and minor to zero for root of tree.
832 */
833 struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
834 uint32_t major,
835 uint32_t minor)
836 {
837 if (!major && !minor)
838 return &dtree->root;
839
840 return _find_dm_tree_node(dtree, major, minor);
841 }
842
843 /*
844 * Set uuid to NULL for root of tree.
845 */
846 struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
847 const char *uuid)
848 {
849 if (!uuid || !*uuid)
850 return &dtree->root;
851
852 return _find_dm_tree_node_by_uuid(dtree, uuid);
853 }
854
855 /*
856 * First time set *handle to NULL.
857 * Set inverted to invert the tree.
858 */
859 struct dm_tree_node *dm_tree_next_child(void **handle,
860 const struct dm_tree_node *parent,
861 uint32_t inverted)
862 {
863 struct dm_list **dlink = (struct dm_list **) handle;
864 const struct dm_list *use_list;
865
866 if (inverted)
867 use_list = &parent->used_by;
868 else
869 use_list = &parent->uses;
870
871 if (!*dlink)
872 *dlink = dm_list_first(use_list);
873 else
874 *dlink = dm_list_next(use_list, *dlink);
875
876 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
877 }
878
879 /*
880 * Deactivate a device with its dependencies if the uuid prefix matches.
881 */
882 static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
883 struct dm_info *info)
884 {
885 struct dm_task *dmt;
886 int r;
887
888 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
889 log_error("_info_by_dev: dm_task creation failed");
890 return 0;
891 }
892
893 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
894 log_error("_info_by_dev: Failed to set device number");
895 dm_task_destroy(dmt);
896 return 0;
897 }
898
899 if (!with_open_count && !dm_task_no_open_count(dmt))
900 log_error("Failed to disable open_count");
901
902 if ((r = dm_task_run(dmt)))
903 r = dm_task_get_info(dmt, info);
904
905 dm_task_destroy(dmt);
906
907 return r;
908 }
909
910 /* Check if all parent nodes of given node have open_count == 0 */
911 static int _node_has_closed_parents(struct dm_tree_node *node,
912 const char *uuid_prefix,
913 size_t uuid_prefix_len)
914 {
915 struct dm_tree_link *dlink;
916 const struct dm_info *dinfo;
917 struct dm_info info;
918 const char *uuid;
919
920 /* Iterate through parents of this node */
921 dm_list_iterate_items(dlink, &node->used_by) {
922 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
923 stack;
924 continue;
925 }
926
927 /* Ignore if it doesn't belong to this VG */
928 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
929 continue;
930
931 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
932 stack; /* FIXME Is this normal? */
933 return 0;
934 }
935
936 /* Refresh open_count */
937 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
938 !info.exists)
939 continue;
940
941 if (info.open_count) {
942 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
943 dinfo->major, dinfo->minor, info.open_count);
944 return 0;
945 }
946 }
947
948 return 1;
949 }
950
951 static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
952 uint32_t *cookie, uint16_t udev_flags)
953 {
954 struct dm_task *dmt;
955 int r = 0;
956
957 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
958
959 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
960 log_error("Deactivation dm_task creation failed for %s", name);
961 return 0;
962 }
963
964 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
965 log_error("Failed to set device number for %s deactivation", name);
966 goto out;
967 }
968
969 if (!dm_task_no_open_count(dmt))
970 log_error("Failed to disable open_count");
971
972 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
973 goto out;
974
975 r = dm_task_run(dmt);
976
977 /* FIXME Until kernel returns actual name so dm-ioctl.c can handle it */
978 rm_dev_node(name, dmt->cookie_set &&
979 !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG));
980
981 /* FIXME Remove node from tree or mark invalid? */
982
983 out:
984 dm_task_destroy(dmt);
985
986 return r;
987 }
988
989 static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
990 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
991 {
992 struct dm_task *dmt;
993 int r = 0;
994
995 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
996
997 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
998 log_error("Rename dm_task creation failed for %s", old_name);
999 return 0;
1000 }
1001
1002 if (!dm_task_set_name(dmt, old_name)) {
1003 log_error("Failed to set name for %s rename.", old_name);
1004 goto out;
1005 }
1006
1007 if (!dm_task_set_newname(dmt, new_name))
1008 goto_out;
1009
1010 if (!dm_task_no_open_count(dmt))
1011 log_error("Failed to disable open_count");
1012
1013 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1014 goto out;
1015
1016 r = dm_task_run(dmt);
1017
1018 out:
1019 dm_task_destroy(dmt);
1020
1021 return r;
1022 }
1023
1024 /* FIXME Merge with _suspend_node? */
1025 static int _resume_node(const char *name, uint32_t major, uint32_t minor,
1026 uint32_t read_ahead, uint32_t read_ahead_flags,
1027 struct dm_info *newinfo, uint32_t *cookie,
1028 uint16_t udev_flags)
1029 {
1030 struct dm_task *dmt;
1031 int r = 0;
1032
1033 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1034
1035 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
1036 log_error("Suspend dm_task creation failed for %s", name);
1037 return 0;
1038 }
1039
1040 /* FIXME Kernel should fill in name on return instead */
1041 if (!dm_task_set_name(dmt, name)) {
1042 log_error("Failed to set readahead device name for %s", name);
1043 goto out;
1044 }
1045
1046 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1047 log_error("Failed to set device number for %s resumption.", name);
1048 goto out;
1049 }
1050
1051 if (!dm_task_no_open_count(dmt))
1052 log_error("Failed to disable open_count");
1053
1054 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1055 log_error("Failed to set read ahead");
1056
1057 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1058 goto out;
1059
1060 if ((r = dm_task_run(dmt)))
1061 r = dm_task_get_info(dmt, newinfo);
1062
1063 out:
1064 dm_task_destroy(dmt);
1065
1066 return r;
1067 }
1068
1069 static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
1070 int skip_lockfs, int no_flush, struct dm_info *newinfo)
1071 {
1072 struct dm_task *dmt;
1073 int r;
1074
1075 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1076 name, major, minor,
1077 skip_lockfs ? "" : " with filesystem sync",
1078 no_flush ? "" : " with device flush");
1079
1080 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1081 log_error("Suspend dm_task creation failed for %s", name);
1082 return 0;
1083 }
1084
1085 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1086 log_error("Failed to set device number for %s suspension.", name);
1087 dm_task_destroy(dmt);
1088 return 0;
1089 }
1090
1091 if (!dm_task_no_open_count(dmt))
1092 log_error("Failed to disable open_count");
1093
1094 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1095 log_error("Failed to set skip_lockfs flag.");
1096
1097 if (no_flush && !dm_task_no_flush(dmt))
1098 log_error("Failed to set no_flush flag.");
1099
1100 if ((r = dm_task_run(dmt)))
1101 r = dm_task_get_info(dmt, newinfo);
1102
1103 dm_task_destroy(dmt);
1104
1105 return r;
1106 }
1107
1108 /*
1109 * FIXME Don't attempt to deactivate known internal dependencies.
1110 */
1111 static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1112 const char *uuid_prefix,
1113 size_t uuid_prefix_len,
1114 unsigned level)
1115 {
1116 int r = 1;
1117 void *handle = NULL;
1118 struct dm_tree_node *child = dnode;
1119 struct dm_info info;
1120 const struct dm_info *dinfo;
1121 const char *name;
1122 const char *uuid;
1123
1124 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1125 if (!(dinfo = dm_tree_node_get_info(child))) {
1126 stack;
1127 continue;
1128 }
1129
1130 if (!(name = dm_tree_node_get_name(child))) {
1131 stack;
1132 continue;
1133 }
1134
1135 if (!(uuid = dm_tree_node_get_uuid(child))) {
1136 stack;
1137 continue;
1138 }
1139
1140 /* Ignore if it doesn't belong to this VG */
1141 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1142 continue;
1143
1144 /* Refresh open_count */
1145 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1146 !info.exists)
1147 continue;
1148
1149 /* Also checking open_count in parent nodes of presuspend_node */
1150 if (info.open_count ||
1151 (child->presuspend_node &&
1152 !_node_has_closed_parents(child->presuspend_node,
1153 uuid_prefix, uuid_prefix_len))) {
1154 /* Only report error from (likely non-internal) dependency at top level */
1155 if (!level) {
1156 log_error("Unable to deactivate open %s (%" PRIu32
1157 ":%" PRIu32 ")", name, info.major,
1158 info.minor);
1159 r = 0;
1160 }
1161 continue;
1162 }
1163
1164 /* Suspend child node first if requested */
1165 if (child->presuspend_node &&
1166 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1167 continue;
1168
1169 if (!_deactivate_node(name, info.major, info.minor,
1170 &child->dtree->cookie, child->udev_flags)) {
1171 log_error("Unable to deactivate %s (%" PRIu32
1172 ":%" PRIu32 ")", name, info.major,
1173 info.minor);
1174 r = 0;
1175 continue;
1176 }
1177
1178 if (dm_tree_node_num_children(child, 0)) {
1179 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
1180 return_0;
1181 }
1182 }
1183
1184 return r;
1185 }
1186
1187 int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1188 const char *uuid_prefix,
1189 size_t uuid_prefix_len)
1190 {
1191 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1192 }
1193
1194 void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1195 {
1196 dnode->dtree->skip_lockfs = 1;
1197 }
1198
1199 void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1200 {
1201 dnode->dtree->no_flush = 1;
1202 }
1203
1204 int dm_tree_suspend_children(struct dm_tree_node *dnode,
1205 const char *uuid_prefix,
1206 size_t uuid_prefix_len)
1207 {
1208 int r = 1;
1209 void *handle = NULL;
1210 struct dm_tree_node *child = dnode;
1211 struct dm_info info, newinfo;
1212 const struct dm_info *dinfo;
1213 const char *name;
1214 const char *uuid;
1215
1216 /* Suspend nodes at this level of the tree */
1217 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1218 if (!(dinfo = dm_tree_node_get_info(child))) {
1219 stack;
1220 continue;
1221 }
1222
1223 if (!(name = dm_tree_node_get_name(child))) {
1224 stack;
1225 continue;
1226 }
1227
1228 if (!(uuid = dm_tree_node_get_uuid(child))) {
1229 stack;
1230 continue;
1231 }
1232
1233 /* Ignore if it doesn't belong to this VG */
1234 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1235 continue;
1236
1237 /* Ensure immediate parents are already suspended */
1238 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1239 continue;
1240
1241 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
1242 !info.exists || info.suspended)
1243 continue;
1244
1245 if (!_suspend_node(name, info.major, info.minor,
1246 child->dtree->skip_lockfs,
1247 child->dtree->no_flush, &newinfo)) {
1248 log_error("Unable to suspend %s (%" PRIu32
1249 ":%" PRIu32 ")", name, info.major,
1250 info.minor);
1251 r = 0;
1252 continue;
1253 }
1254
1255 /* Update cached info */
1256 child->info = newinfo;
1257 }
1258
1259 /* Then suspend any child nodes */
1260 handle = NULL;
1261
1262 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1263 if (!(uuid = dm_tree_node_get_uuid(child))) {
1264 stack;
1265 continue;
1266 }
1267
1268 /* Ignore if it doesn't belong to this VG */
1269 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1270 continue;
1271
1272 if (dm_tree_node_num_children(child, 0))
1273 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1274 return_0;
1275 }
1276
1277 return r;
1278 }
1279
1280 int dm_tree_activate_children(struct dm_tree_node *dnode,
1281 const char *uuid_prefix,
1282 size_t uuid_prefix_len)
1283 {
1284 int r = 1;
1285 void *handle = NULL;
1286 struct dm_tree_node *child = dnode;
1287 struct dm_info newinfo;
1288 const char *name;
1289 const char *uuid;
1290 int priority;
1291
1292 /* Activate children first */
1293 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1294 if (!(uuid = dm_tree_node_get_uuid(child))) {
1295 stack;
1296 continue;
1297 }
1298
1299 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1300 continue;
1301
1302 if (dm_tree_node_num_children(child, 0))
1303 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1304 return_0;
1305 }
1306
1307 handle = NULL;
1308
1309 for (priority = 0; priority < 3; priority++) {
1310 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1311 if (!(uuid = dm_tree_node_get_uuid(child))) {
1312 stack;
1313 continue;
1314 }
1315
1316 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1317 continue;
1318
1319 if (priority != child->activation_priority)
1320 continue;
1321
1322 if (!(name = dm_tree_node_get_name(child))) {
1323 stack;
1324 continue;
1325 }
1326
1327 /* Rename? */
1328 if (child->props.new_name) {
1329 if (!_rename_node(name, child->props.new_name, child->info.major,
1330 child->info.minor, &child->dtree->cookie,
1331 child->udev_flags)) {
1332 log_error("Failed to rename %s (%" PRIu32
1333 ":%" PRIu32 ") to %s", name, child->info.major,
1334 child->info.minor, child->props.new_name);
1335 return 0;
1336 }
1337 child->name = child->props.new_name;
1338 child->props.new_name = NULL;
1339 }
1340
1341 if (!child->info.inactive_table && !child->info.suspended)
1342 continue;
1343
1344 if (!_resume_node(child->name, child->info.major, child->info.minor,
1345 child->props.read_ahead, child->props.read_ahead_flags,
1346 &newinfo, &child->dtree->cookie, child->udev_flags)) {
1347 log_error("Unable to resume %s (%" PRIu32
1348 ":%" PRIu32 ")", child->name, child->info.major,
1349 child->info.minor);
1350 r = 0;
1351 continue;
1352 }
1353
1354 /* Update cached info */
1355 child->info = newinfo;
1356 }
1357 }
1358
1359 handle = NULL;
1360
1361 return r;
1362 }
1363
1364 static int _create_node(struct dm_tree_node *dnode)
1365 {
1366 int r = 0;
1367 struct dm_task *dmt;
1368
1369 log_verbose("Creating %s", dnode->name);
1370
1371 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1372 log_error("Create dm_task creation failed for %s", dnode->name);
1373 return 0;
1374 }
1375
1376 if (!dm_task_set_name(dmt, dnode->name)) {
1377 log_error("Failed to set device name for %s", dnode->name);
1378 goto out;
1379 }
1380
1381 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1382 log_error("Failed to set uuid for %s", dnode->name);
1383 goto out;
1384 }
1385
1386 if (dnode->props.major &&
1387 (!dm_task_set_major(dmt, dnode->props.major) ||
1388 !dm_task_set_minor(dmt, dnode->props.minor))) {
1389 log_error("Failed to set device number for %s creation.", dnode->name);
1390 goto out;
1391 }
1392
1393 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1394 log_error("Failed to set read only flag for %s", dnode->name);
1395 goto out;
1396 }
1397
1398 if (!dm_task_no_open_count(dmt))
1399 log_error("Failed to disable open_count");
1400
1401 if ((r = dm_task_run(dmt)))
1402 r = dm_task_get_info(dmt, &dnode->info);
1403
1404 out:
1405 dm_task_destroy(dmt);
1406
1407 return r;
1408 }
1409
1410
1411 static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
1412 {
1413 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
1414 log_error("Failed to format %s device number for %s as dm "
1415 "target (%u,%u)",
1416 node->name, node->uuid, node->info.major, node->info.minor);
1417 return 0;
1418 }
1419
1420 return 1;
1421 }
1422
1423 /* simplify string emiting code */
1424 #define EMIT_PARAMS(p, str...)\
1425 do {\
1426 int w;\
1427 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1428 stack; /* Out of space */\
1429 return -1;\
1430 }\
1431 p += w;\
1432 } while (0)
1433
1434 /*
1435 * _emit_areas_line
1436 *
1437 * Returns: 1 on success, 0 on failure
1438 */
1439 static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
1440 struct load_segment *seg, char *params,
1441 size_t paramsize, int *pos)
1442 {
1443 struct seg_area *area;
1444 char devbuf[DM_FORMAT_DEV_BUFSIZE];
1445 unsigned first_time = 1;
1446 const char *logtype, *synctype;
1447 unsigned log_parm_count;
1448
1449 dm_list_iterate_items(area, &seg->areas) {
1450 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1451 return_0;
1452
1453 switch (seg->type) {
1454 case SEG_REPLICATOR_DEV:
1455 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1456 if (first_time)
1457 EMIT_PARAMS(*pos, " nolog 0");
1458 else {
1459 /* Remote devices */
1460 log_parm_count = (area->flags &
1461 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1462
1463 if (!area->slog) {
1464 devbuf[0] = 0; /* Only core log parameters */
1465 logtype = "core";
1466 } else {
1467 devbuf[0] = ' '; /* Extra space before device name */
1468 if (!_build_dev_string(devbuf + 1,
1469 sizeof(devbuf) - 1,
1470 area->slog))
1471 return_0;
1472 logtype = "disk";
1473 log_parm_count++; /* Extra sync log device name parameter */
1474 }
1475
1476 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1477 log_parm_count, devbuf, area->region_size);
1478
1479 synctype = (area->flags & DM_NOSYNC) ?
1480 " nosync" : (area->flags & DM_FORCESYNC) ?
1481 " sync" : NULL;
1482
1483 if (synctype)
1484 EMIT_PARAMS(*pos, "%s", synctype);
1485 }
1486 break;
1487 default:
1488 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1489 devbuf, area->offset);
1490 }
1491
1492 first_time = 0;
1493 }
1494
1495 return 1;
1496 }
1497
1498 static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1499 size_t paramsize, int *pos)
1500 {
1501 const struct load_segment *rlog_seg;
1502 struct replicator_site *rsite;
1503 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1504 unsigned parm_count;
1505
1506 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1507 return_0;
1508
1509 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1510 struct load_segment);
1511
1512 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1513 seg->rlog_type, rlogbuf, rlog_seg->size);
1514
1515 dm_list_iterate_items(rsite, &seg->rsites) {
1516 parm_count = (rsite->fall_behind_data
1517 || rsite->fall_behind_ios
1518 || rsite->async_timeout) ? 4 : 2;
1519
1520 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1521 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1522
1523 if (rsite->fall_behind_data)
1524 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1525 else if (rsite->fall_behind_ios)
1526 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1527 else if (rsite->async_timeout)
1528 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1529 }
1530
1531 return 1;
1532 }
1533
1534 /*
1535 * Returns: 1 on success, 0 on failure
1536 */
1537 static int _mirror_emit_segment_line(struct dm_task *dmt, uint32_t major,
1538 uint32_t minor, struct load_segment *seg,
1539 uint64_t *seg_start, char *params,
1540 size_t paramsize)
1541 {
1542 int block_on_error = 0;
1543 int handle_errors = 0;
1544 int dm_log_userspace = 0;
1545 struct utsname uts;
1546 unsigned log_parm_count;
1547 int pos = 0;
1548 char logbuf[DM_FORMAT_DEV_BUFSIZE];
1549 const char *logtype;
1550 unsigned kmaj, kmin, krel;
1551
1552 if (uname(&uts) == -1 || sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel) != 3) {
1553 log_error("Cannot read kernel release version");
1554 return 0;
1555 }
1556
1557 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1558 /*
1559 * Originally, block_on_error was an argument to the log
1560 * portion of the mirror CTR table. It was renamed to
1561 * "handle_errors" and now resides in the 'features'
1562 * section of the mirror CTR table (i.e. at the end).
1563 *
1564 * We can identify whether to use "block_on_error" or
1565 * "handle_errors" by the dm-mirror module's version
1566 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1567 */
1568 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
1569 handle_errors = 1;
1570 else
1571 block_on_error = 1;
1572 }
1573
1574 if (seg->clustered) {
1575 /* Cluster mirrors require a UUID */
1576 if (!seg->uuid)
1577 return_0;
1578
1579 /*
1580 * Cluster mirrors used to have their own log
1581 * types. Now they are accessed through the
1582 * userspace log type.
1583 *
1584 * The dm-log-userspace module was added to the
1585 * 2.6.31 kernel.
1586 */
1587 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
1588 dm_log_userspace = 1;
1589 }
1590
1591 /* Region size */
1592 log_parm_count = 1;
1593
1594 /* [no]sync, block_on_error etc. */
1595 log_parm_count += hweight32(seg->flags);
1596
1597 /* "handle_errors" is a feature arg now */
1598 if (handle_errors)
1599 log_parm_count--;
1600
1601 /* DM_CORELOG does not count in the param list */
1602 if (seg->flags & DM_CORELOG)
1603 log_parm_count--;
1604
1605 if (seg->clustered) {
1606 log_parm_count++; /* For UUID */
1607
1608 if (!dm_log_userspace)
1609 EMIT_PARAMS(pos, "clustered-");
1610 else
1611 /* For clustered-* type field inserted later */
1612 log_parm_count++;
1613 }
1614
1615 if (!seg->log)
1616 logtype = "core";
1617 else {
1618 logtype = "disk";
1619 log_parm_count++;
1620 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1621 return_0;
1622 }
1623
1624 if (dm_log_userspace)
1625 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1626 log_parm_count, seg->uuid, logtype);
1627 else
1628 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
1629
1630 if (seg->log)
1631 EMIT_PARAMS(pos, " %s", logbuf);
1632
1633 EMIT_PARAMS(pos, " %u", seg->region_size);
1634
1635 if (seg->clustered && !dm_log_userspace)
1636 EMIT_PARAMS(pos, " %s", seg->uuid);
1637
1638 if ((seg->flags & DM_NOSYNC))
1639 EMIT_PARAMS(pos, " nosync");
1640 else if ((seg->flags & DM_FORCESYNC))
1641 EMIT_PARAMS(pos, " sync");
1642
1643 if (block_on_error)
1644 EMIT_PARAMS(pos, " block_on_error");
1645
1646 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1647
1648 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1649 return_0;
1650
1651 if (handle_errors)
1652 EMIT_PARAMS(pos, " 1 handle_errors");
1653
1654 return 1;
1655 }
1656
1657 static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1658 uint32_t minor, struct load_segment *seg,
1659 uint64_t *seg_start, char *params,
1660 size_t paramsize)
1661 {
1662 int pos = 0;
1663 int r;
1664 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
1665
1666 switch(seg->type) {
1667 case SEG_ERROR:
1668 case SEG_ZERO:
1669 case SEG_LINEAR:
1670 break;
1671 case SEG_MIRRORED:
1672 /* Mirrors are pretty complicated - now in separate function */
1673 r = _mirror_emit_segment_line(dmt, major, minor, seg, seg_start,
1674 params, paramsize);
1675 if (!r)
1676 return_0;
1677 break;
1678 case SEG_REPLICATOR:
1679 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1680 &pos)) <= 0) {
1681 stack;
1682 return r;
1683 }
1684 break;
1685 case SEG_REPLICATOR_DEV:
1686 if (!seg->replicator || !_build_dev_string(originbuf,
1687 sizeof(originbuf),
1688 seg->replicator))
1689 return_0;
1690
1691 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1692 break;
1693 case SEG_SNAPSHOT:
1694 case SEG_SNAPSHOT_MERGE:
1695 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1696 return_0;
1697 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1698 return_0;
1699 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1700 seg->persistent ? 'P' : 'N', seg->chunk_size);
1701 break;
1702 case SEG_SNAPSHOT_ORIGIN:
1703 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1704 return_0;
1705 EMIT_PARAMS(pos, "%s", originbuf);
1706 break;
1707 case SEG_STRIPED:
1708 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
1709 break;
1710 case SEG_CRYPT:
1711 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
1712 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1713 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1714 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1715 seg->iv_offset : *seg_start);
1716 break;
1717 }
1718
1719 switch(seg->type) {
1720 case SEG_ERROR:
1721 case SEG_REPLICATOR:
1722 case SEG_SNAPSHOT:
1723 case SEG_SNAPSHOT_ORIGIN:
1724 case SEG_SNAPSHOT_MERGE:
1725 case SEG_ZERO:
1726 break;
1727 case SEG_CRYPT:
1728 case SEG_LINEAR:
1729 case SEG_REPLICATOR_DEV:
1730 case SEG_STRIPED:
1731 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
1732 stack;
1733 return r;
1734 }
1735 break;
1736 }
1737
1738 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
1739 " %" PRIu64 " %s %s", major, minor,
1740 *seg_start, seg->size, dm_segtypes[seg->type].target, params);
1741
1742 if (!dm_task_add_target(dmt, *seg_start, seg->size, dm_segtypes[seg->type].target, params))
1743 return_0;
1744
1745 *seg_start += seg->size;
1746
1747 return 1;
1748 }
1749
1750 #undef EMIT_PARAMS
1751
1752 static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
1753 struct load_segment *seg, uint64_t *seg_start)
1754 {
1755 char *params;
1756 size_t paramsize = 4096;
1757 int ret;
1758
1759 do {
1760 if (!(params = dm_malloc(paramsize))) {
1761 log_error("Insufficient space for target parameters.");
1762 return 0;
1763 }
1764
1765 params[0] = '\0';
1766 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
1767 params, paramsize);
1768 dm_free(params);
1769
1770 if (!ret)
1771 stack;
1772
1773 if (ret >= 0)
1774 return ret;
1775
1776 log_debug("Insufficient space in params[%" PRIsize_t
1777 "] for target parameters.", paramsize);
1778
1779 paramsize *= 2;
1780 } while (paramsize < MAX_TARGET_PARAMSIZE);
1781
1782 log_error("Target parameter size too big. Aborting.");
1783 return 0;
1784 }
1785
1786 static int _load_node(struct dm_tree_node *dnode)
1787 {
1788 int r = 0;
1789 struct dm_task *dmt;
1790 struct load_segment *seg;
1791 uint64_t seg_start = 0;
1792
1793 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
1794 dnode->info.major, dnode->info.minor);
1795
1796 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
1797 log_error("Reload dm_task creation failed for %s", dnode->name);
1798 return 0;
1799 }
1800
1801 if (!dm_task_set_major(dmt, dnode->info.major) ||
1802 !dm_task_set_minor(dmt, dnode->info.minor)) {
1803 log_error("Failed to set device number for %s reload.", dnode->name);
1804 goto out;
1805 }
1806
1807 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1808 log_error("Failed to set read only flag for %s", dnode->name);
1809 goto out;
1810 }
1811
1812 if (!dm_task_no_open_count(dmt))
1813 log_error("Failed to disable open_count");
1814
1815 dm_list_iterate_items(seg, &dnode->props.segs)
1816 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
1817 seg, &seg_start))
1818 goto_out;
1819
1820 if (!dm_task_suppress_identical_reload(dmt))
1821 log_error("Failed to suppress reload of identical tables.");
1822
1823 if ((r = dm_task_run(dmt))) {
1824 r = dm_task_get_info(dmt, &dnode->info);
1825 if (r && !dnode->info.inactive_table)
1826 log_verbose("Suppressed %s identical table reload.",
1827 dnode->name);
1828
1829 if ((dnode->props.size_changed =
1830 (dm_task_get_existing_table_size(dmt) == seg_start) ? 0 : 1))
1831 log_debug("Table size changed from %" PRIu64 " to %"
1832 PRIu64 " for %s",
1833 dm_task_get_existing_table_size(dmt),
1834 seg_start, dnode->name);
1835 }
1836
1837 dnode->props.segment_count = 0;
1838
1839 out:
1840 dm_task_destroy(dmt);
1841
1842 return r;
1843 }
1844
1845 int dm_tree_preload_children(struct dm_tree_node *dnode,
1846 const char *uuid_prefix,
1847 size_t uuid_prefix_len)
1848 {
1849 int r = 1;
1850 void *handle = NULL;
1851 struct dm_tree_node *child;
1852 struct dm_info newinfo;
1853 int update_devs_flag = 0;
1854
1855 /* Preload children first */
1856 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1857 /* Skip existing non-device-mapper devices */
1858 if (!child->info.exists && child->info.major)
1859 continue;
1860
1861 /* Ignore if it doesn't belong to this VG */
1862 if (child->info.exists &&
1863 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
1864 continue;
1865
1866 if (dm_tree_node_num_children(child, 0))
1867 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
1868 return_0;
1869
1870 /* FIXME Cope if name exists with no uuid? */
1871 if (!child->info.exists) {
1872 if (!_create_node(child)) {
1873 stack;
1874 return 0;
1875 }
1876 }
1877
1878 if (!child->info.inactive_table && child->props.segment_count) {
1879 if (!_load_node(child)) {
1880 stack;
1881 return 0;
1882 }
1883 }
1884
1885 /* Propagate device size change change */
1886 if (child->props.size_changed)
1887 dnode->props.size_changed = 1;
1888
1889 /* Resume device immediately if it has parents and its size changed */
1890 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
1891 continue;
1892
1893 if (!child->info.inactive_table && !child->info.suspended)
1894 continue;
1895
1896 if (!_resume_node(child->name, child->info.major, child->info.minor,
1897 child->props.read_ahead, child->props.read_ahead_flags,
1898 &newinfo, &child->dtree->cookie, child->udev_flags)) {
1899 log_error("Unable to resume %s (%" PRIu32
1900 ":%" PRIu32 ")", child->name, child->info.major,
1901 child->info.minor);
1902 r = 0;
1903 continue;
1904 }
1905
1906 /* Update cached info */
1907 child->info = newinfo;
1908
1909 /*
1910 * Prepare for immediate synchronization with udev and flush all stacked
1911 * dev node operations if requested by immediate_dev_node property. But
1912 * finish processing current level in the tree first.
1913 */
1914 if (child->props.immediate_dev_node)
1915 update_devs_flag = 1;
1916
1917 }
1918
1919 handle = NULL;
1920
1921 if (update_devs_flag) {
1922 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
1923 stack;
1924 dm_tree_set_cookie(dnode, 0);
1925 dm_task_update_nodes();
1926 }
1927
1928 return r;
1929 }
1930
1931 /*
1932 * Returns 1 if unsure.
1933 */
1934 int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
1935 const char *uuid_prefix,
1936 size_t uuid_prefix_len)
1937 {
1938 void *handle = NULL;
1939 struct dm_tree_node *child = dnode;
1940 const char *uuid;
1941
1942 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1943 if (!(uuid = dm_tree_node_get_uuid(child))) {
1944 log_error("Failed to get uuid for dtree node.");
1945 return 1;
1946 }
1947
1948 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1949 return 1;
1950
1951 if (dm_tree_node_num_children(child, 0))
1952 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
1953 }
1954
1955 return 0;
1956 }
1957
1958 /*
1959 * Target functions
1960 */
1961 static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
1962 {
1963 struct load_segment *seg;
1964
1965 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
1966 log_error("dtree node segment allocation failed");
1967 return NULL;
1968 }
1969
1970 seg->type = type;
1971 seg->size = size;
1972 seg->area_count = 0;
1973 dm_list_init(&seg->areas);
1974 seg->stripe_size = 0;
1975 seg->persistent = 0;
1976 seg->chunk_size = 0;
1977 seg->cow = NULL;
1978 seg->origin = NULL;
1979 seg->merge = NULL;
1980
1981 dm_list_add(&dnode->props.segs, &seg->list);
1982 dnode->props.segment_count++;
1983
1984 return seg;
1985 }
1986
1987 int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
1988 uint64_t size,
1989 const char *origin_uuid)
1990 {
1991 struct load_segment *seg;
1992 struct dm_tree_node *origin_node;
1993
1994 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
1995 return_0;
1996
1997 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
1998 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
1999 return 0;
2000 }
2001
2002 seg->origin = origin_node;
2003 if (!_link_tree_nodes(dnode, origin_node))
2004 return_0;
2005
2006 /* Resume snapshot origins after new snapshots */
2007 dnode->activation_priority = 1;
2008
2009 return 1;
2010 }
2011
2012 static int _add_snapshot_target(struct dm_tree_node *node,
2013 uint64_t size,
2014 const char *origin_uuid,
2015 const char *cow_uuid,
2016 const char *merge_uuid,
2017 int persistent,
2018 uint32_t chunk_size)
2019 {
2020 struct load_segment *seg;
2021 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2022 unsigned seg_type;
2023
2024 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
2025
2026 if (!(seg = _add_segment(node, seg_type, size)))
2027 return_0;
2028
2029 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
2030 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2031 return 0;
2032 }
2033
2034 seg->origin = origin_node;
2035 if (!_link_tree_nodes(node, origin_node))
2036 return_0;
2037
2038 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
2039 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
2040 return 0;
2041 }
2042
2043 seg->cow = cow_node;
2044 if (!_link_tree_nodes(node, cow_node))
2045 return_0;
2046
2047 seg->persistent = persistent ? 1 : 0;
2048 seg->chunk_size = chunk_size;
2049
2050 if (merge_uuid) {
2051 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2052 /* not a pure error, merging snapshot may have been deactivated */
2053 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2054 } else {
2055 seg->merge = merge_node;
2056 /* must not link merging snapshot, would undermine activation_priority below */
2057 }
2058
2059 /* Resume snapshot-merge (acting origin) after other snapshots */
2060 node->activation_priority = 1;
2061 if (seg->merge) {
2062 /* Resume merging snapshot after snapshot-merge */
2063 seg->merge->activation_priority = 2;
2064 }
2065 }
2066
2067 return 1;
2068 }
2069
2070
2071 int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2072 uint64_t size,
2073 const char *origin_uuid,
2074 const char *cow_uuid,
2075 int persistent,
2076 uint32_t chunk_size)
2077 {
2078 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2079 NULL, persistent, chunk_size);
2080 }
2081
2082 int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2083 uint64_t size,
2084 const char *origin_uuid,
2085 const char *cow_uuid,
2086 const char *merge_uuid,
2087 uint32_t chunk_size)
2088 {
2089 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2090 merge_uuid, 1, chunk_size);
2091 }
2092
2093 int dm_tree_node_add_error_target(struct dm_tree_node *node,
2094 uint64_t size)
2095 {
2096 if (!_add_segment(node, SEG_ERROR, size))
2097 return_0;
2098
2099 return 1;
2100 }
2101
2102 int dm_tree_node_add_zero_target(struct dm_tree_node *node,
2103 uint64_t size)
2104 {
2105 if (!_add_segment(node, SEG_ZERO, size))
2106 return_0;
2107
2108 return 1;
2109 }
2110
2111 int dm_tree_node_add_linear_target(struct dm_tree_node *node,
2112 uint64_t size)
2113 {
2114 if (!_add_segment(node, SEG_LINEAR, size))
2115 return_0;
2116
2117 return 1;
2118 }
2119
2120 int dm_tree_node_add_striped_target(struct dm_tree_node *node,
2121 uint64_t size,
2122 uint32_t stripe_size)
2123 {
2124 struct load_segment *seg;
2125
2126 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2127 return_0;
2128
2129 seg->stripe_size = stripe_size;
2130
2131 return 1;
2132 }
2133
2134 int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2135 uint64_t size,
2136 const char *cipher,
2137 const char *chainmode,
2138 const char *iv,
2139 uint64_t iv_offset,
2140 const char *key)
2141 {
2142 struct load_segment *seg;
2143
2144 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2145 return_0;
2146
2147 seg->cipher = cipher;
2148 seg->chainmode = chainmode;
2149 seg->iv = iv;
2150 seg->iv_offset = iv_offset;
2151 seg->key = key;
2152
2153 return 1;
2154 }
2155
2156 int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
2157 uint32_t region_size,
2158 unsigned clustered,
2159 const char *log_uuid,
2160 unsigned area_count,
2161 uint32_t flags)
2162 {
2163 struct dm_tree_node *log_node = NULL;
2164 struct load_segment *seg;
2165
2166 if (!node->props.segment_count) {
2167 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2168 return 0;
2169 }
2170
2171 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2172
2173 if (log_uuid) {
2174 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2175 log_error("log uuid pool_strdup failed");
2176 return 0;
2177 }
2178 if (!(flags & DM_CORELOG)) {
2179 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2180 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2181 return 0;
2182 }
2183
2184 if (clustered)
2185 log_node->props.immediate_dev_node = 1;
2186
2187 if (!_link_tree_nodes(node, log_node))
2188 return_0;
2189 }
2190 }
2191
2192 seg->log = log_node;
2193 seg->region_size = region_size;
2194 seg->clustered = clustered;
2195 seg->mirror_area_count = area_count;
2196 seg->flags = flags;
2197
2198 return 1;
2199 }
2200
2201 int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
2202 uint64_t size)
2203 {
2204 if (!_add_segment(node, SEG_MIRRORED, size))
2205 return_0;
2206
2207 return 1;
2208 }
2209
2210 int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2211 uint64_t size,
2212 const char *rlog_uuid,
2213 const char *rlog_type,
2214 unsigned rsite_index,
2215 dm_replicator_mode_t mode,
2216 uint32_t async_timeout,
2217 uint64_t fall_behind_data,
2218 uint32_t fall_behind_ios)
2219 {
2220 struct load_segment *rseg;
2221 struct replicator_site *rsite;
2222
2223 /* Local site0 - adds replicator segment and links rlog device */
2224 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2225 if (node->props.segment_count) {
2226 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2227 return 0;
2228 }
2229
2230 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2231 return_0;
2232
2233 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2234 log_error("Missing replicator log uuid %s.", rlog_uuid);
2235 return 0;
2236 }
2237
2238 if (!_link_tree_nodes(node, rseg->log))
2239 return_0;
2240
2241 if (strcmp(rlog_type, "ringbuffer") != 0) {
2242 log_error("Unsupported replicator log type %s.", rlog_type);
2243 return 0;
2244 }
2245
2246 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2247 return_0;
2248
2249 dm_list_init(&rseg->rsites);
2250 rseg->rdevice_count = 0;
2251 node->activation_priority = 1;
2252 }
2253
2254 /* Add site to segment */
2255 if (mode == DM_REPLICATOR_SYNC
2256 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2257 log_error("Async parameters passed for synchronnous replicator.");
2258 return 0;
2259 }
2260
2261 if (node->props.segment_count != 1) {
2262 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2263 return 0;
2264 }
2265
2266 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2267 if (rseg->type != SEG_REPLICATOR) {
2268 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2269 dm_segtypes[rseg->type].target);
2270 return 0;
2271 }
2272
2273 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2274 log_error("Failed to allocate remote site segment.");
2275 return 0;
2276 }
2277
2278 dm_list_add(&rseg->rsites, &rsite->list);
2279 rseg->rsite_count++;
2280
2281 rsite->mode = mode;
2282 rsite->async_timeout = async_timeout;
2283 rsite->fall_behind_data = fall_behind_data;
2284 rsite->fall_behind_ios = fall_behind_ios;
2285 rsite->rsite_index = rsite_index;
2286
2287 return 1;
2288 }
2289
2290 /* Appends device node to Replicator */
2291 int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2292 uint64_t size,
2293 const char *replicator_uuid,
2294 uint64_t rdevice_index,
2295 const char *rdev_uuid,
2296 unsigned rsite_index,
2297 const char *slog_uuid,
2298 uint32_t slog_flags,
2299 uint32_t slog_region_size)
2300 {
2301 struct seg_area *area;
2302 struct load_segment *rseg;
2303 struct load_segment *rep_seg;
2304
2305 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2306 /* Site index for local target */
2307 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2308 return_0;
2309
2310 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2311 log_error("Missing replicator uuid %s.", replicator_uuid);
2312 return 0;
2313 }
2314
2315 /* Local slink0 for replicator must be always initialized first */
2316 if (rseg->replicator->props.segment_count != 1) {
2317 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2318 return 0;
2319 }
2320
2321 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2322 if (rep_seg->type != SEG_REPLICATOR) {
2323 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2324 dm_segtypes[rep_seg->type].target);
2325 return 0;
2326 }
2327 rep_seg->rdevice_count++;
2328
2329 if (!_link_tree_nodes(node, rseg->replicator))
2330 return_0;
2331
2332 rseg->rdevice_index = rdevice_index;
2333 } else {
2334 /* Local slink0 for replicator must be always initialized first */
2335 if (node->props.segment_count != 1) {
2336 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2337 return 0;
2338 }
2339
2340 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2341 if (rseg->type != SEG_REPLICATOR_DEV) {
2342 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2343 dm_segtypes[rseg->type].target);
2344 return 0;
2345 }
2346 }
2347
2348 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2349 log_error("Unspecified sync log uuid.");
2350 return 0;
2351 }
2352
2353 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2354 return_0;
2355
2356 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2357
2358 if (!(slog_flags & DM_CORELOG)) {
2359 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2360 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2361 return 0;
2362 }
2363
2364 if (!_link_tree_nodes(node, area->slog))
2365 return_0;
2366 }
2367
2368 area->flags = slog_flags;
2369 area->region_size = slog_region_size;
2370 area->rsite_index = rsite_index;
2371
2372 return 1;
2373 }
2374
2375 static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
2376 {
2377 struct seg_area *area;
2378
2379 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
2380 log_error("Failed to allocate target segment area.");
2381 return 0;
2382 }
2383
2384 area->dev_node = dev_node;
2385 area->offset = offset;
2386
2387 dm_list_add(&seg->areas, &area->list);
2388 seg->area_count++;
2389
2390 return 1;
2391 }
2392
2393 int dm_tree_node_add_target_area(struct dm_tree_node *node,
2394 const char *dev_name,
2395 const char *uuid,
2396 uint64_t offset)
2397 {
2398 struct load_segment *seg;
2399 struct stat info;
2400 struct dm_tree_node *dev_node;
2401
2402 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
2403 log_error("dm_tree_node_add_target_area called without device");
2404 return 0;
2405 }
2406
2407 if (uuid) {
2408 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
2409 log_error("Couldn't find area uuid %s.", uuid);
2410 return 0;
2411 }
2412 if (!_link_tree_nodes(node, dev_node))
2413 return_0;
2414 } else {
2415 if (stat(dev_name, &info) < 0) {
2416 log_error("Device %s not found.", dev_name);
2417 return 0;
2418 }
2419
2420 if (!S_ISBLK(info.st_mode)) {
2421 log_error("Device %s is not a block device.", dev_name);
2422 return 0;
2423 }
2424
2425 /* FIXME Check correct macro use */
2426 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2427 MINOR(info.st_rdev), 0)))
2428 return_0;
2429 }
2430
2431 if (!node->props.segment_count) {
2432 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2433 return 0;
2434 }
2435
2436 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2437
2438 if (!_add_area(node, seg, dev_node, offset))
2439 return_0;
2440
2441 return 1;
2442 }
2443
2444 void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2445 {
2446 node->dtree->cookie = cookie;
2447 }
2448
2449 uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2450 {
2451 return node->dtree->cookie;
2452 }
This page took 0.145304 seconds and 6 git commands to generate.