]> sourceware.org Git - lvm2.git/blob - libdm/libdm-deptree.c
Replicator: support deactivate of replicator-dev nodes
[lvm2.git] / libdm / libdm-deptree.c
1 /*
2 * Copyright (C) 2005-2007 Red Hat, Inc. All rights reserved.
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
15 #include "dmlib.h"
16 #include "libdm-targets.h"
17 #include "libdm-common.h"
18 #include "kdev_t.h"
19 #include "dm-ioctl.h"
20
21 #include <stdarg.h>
22 #include <sys/param.h>
23 #include <sys/utsname.h>
24
25 #define MAX_TARGET_PARAMSIZE 500000
26
27 /* FIXME Fix interface so this is used only by LVM */
28 #define UUID_PREFIX "LVM-"
29
30 #define REPLICATOR_LOCAL_SITE 0
31
32 /* Supported segment types */
33 enum {
34 SEG_CRYPT,
35 SEG_ERROR,
36 SEG_LINEAR,
37 SEG_MIRRORED,
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
42 SEG_SNAPSHOT_MERGE,
43 SEG_STRIPED,
44 SEG_ZERO,
45 };
46
47 /* FIXME Add crypt and multipath support */
48
49 struct {
50 unsigned type;
51 const char *target;
52 } dm_segtypes[] = {
53 { SEG_CRYPT, "crypt" },
54 { SEG_ERROR, "error" },
55 { SEG_LINEAR, "linear" },
56 { SEG_MIRRORED, "mirror" },
57 { SEG_REPLICATOR, "replicator" },
58 { SEG_REPLICATOR_DEV, "replicator-dev" },
59 { SEG_SNAPSHOT, "snapshot" },
60 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
61 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
62 { SEG_STRIPED, "striped" },
63 { SEG_ZERO, "zero"},
64 };
65
66 /* Some segment types have a list of areas of other devices attached */
67 struct seg_area {
68 struct dm_list list;
69
70 struct dm_tree_node *dev_node;
71
72 uint64_t offset;
73
74 unsigned rsite_index; /* Replicator site index */
75 struct dm_tree_node *slog; /* Replicator sync log node */
76 uint64_t region_size; /* Replicator sync log size */
77 uint32_t flags; /* Replicator sync log flags */
78 };
79
80 /* Replicator-log has a list of sites */
81 /* FIXME: maybe move to seg_area too? */
82 struct replicator_site {
83 struct dm_list list;
84
85 unsigned rsite_index;
86 dm_replicator_mode_t mode;
87 uint32_t async_timeout;
88 uint32_t fall_behind_ios;
89 uint64_t fall_behind_data;
90 };
91
92 /* Per-segment properties */
93 struct load_segment {
94 struct dm_list list;
95
96 unsigned type;
97
98 uint64_t size;
99
100 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
101 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
102
103 uint32_t stripe_size; /* Striped */
104
105 int persistent; /* Snapshot */
106 uint32_t chunk_size; /* Snapshot */
107 struct dm_tree_node *cow; /* Snapshot */
108 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
109 struct dm_tree_node *merge; /* Snapshot */
110
111 struct dm_tree_node *log; /* Mirror + Replicator */
112 uint32_t region_size; /* Mirror */
113 unsigned clustered; /* Mirror */
114 unsigned mirror_area_count; /* Mirror */
115 uint32_t flags; /* Mirror log */
116 char *uuid; /* Clustered mirror log */
117
118 const char *cipher; /* Crypt */
119 const char *chainmode; /* Crypt */
120 const char *iv; /* Crypt */
121 uint64_t iv_offset; /* Crypt */
122 const char *key; /* Crypt */
123
124 const char *rlog_type; /* Replicator */
125 struct dm_list rsites; /* Replicator */
126 unsigned rsite_count; /* Replicator */
127 unsigned rdevice_count; /* Replicator */
128 struct dm_tree_node *replicator;/* Replicator-dev */
129 uint64_t rdevice_index; /* Replicator-dev */
130 };
131
132 /* Per-device properties */
133 struct load_properties {
134 int read_only;
135 uint32_t major;
136 uint32_t minor;
137
138 uint32_t read_ahead;
139 uint32_t read_ahead_flags;
140
141 unsigned segment_count;
142 unsigned size_changed;
143 struct dm_list segs;
144
145 const char *new_name;
146 };
147
148 /* Two of these used to join two nodes with uses and used_by. */
149 struct dm_tree_link {
150 struct dm_list list;
151 struct dm_tree_node *node;
152 };
153
154 struct dm_tree_node {
155 struct dm_tree *dtree;
156
157 const char *name;
158 const char *uuid;
159 struct dm_info info;
160
161 struct dm_list uses; /* Nodes this node uses */
162 struct dm_list used_by; /* Nodes that use this node */
163
164 int activation_priority; /* 0 gets activated first */
165
166 uint16_t udev_flags; /* Udev control flags */
167
168 void *context; /* External supplied context */
169
170 struct load_properties props; /* For creation/table (re)load */
171
172 /*
173 * If presuspend of child node is needed
174 * Note: only direct child is allowed
175 */
176 struct dm_tree_node *presuspend_node;
177 };
178
179 struct dm_tree {
180 struct dm_pool *mem;
181 struct dm_hash_table *devs;
182 struct dm_hash_table *uuids;
183 struct dm_tree_node root;
184 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
185 int no_flush; /* 1 sets noflush (mirrors/multipath) */
186 uint32_t cookie;
187 };
188
189 struct dm_tree *dm_tree_create(void)
190 {
191 struct dm_tree *dtree;
192
193 if (!(dtree = dm_malloc(sizeof(*dtree)))) {
194 log_error("dm_tree_create malloc failed");
195 return NULL;
196 }
197
198 memset(dtree, 0, sizeof(*dtree));
199 dtree->root.dtree = dtree;
200 dm_list_init(&dtree->root.uses);
201 dm_list_init(&dtree->root.used_by);
202 dtree->skip_lockfs = 0;
203 dtree->no_flush = 0;
204
205 if (!(dtree->mem = dm_pool_create("dtree", 1024))) {
206 log_error("dtree pool creation failed");
207 dm_free(dtree);
208 return NULL;
209 }
210
211 if (!(dtree->devs = dm_hash_create(8))) {
212 log_error("dtree hash creation failed");
213 dm_pool_destroy(dtree->mem);
214 dm_free(dtree);
215 return NULL;
216 }
217
218 if (!(dtree->uuids = dm_hash_create(32))) {
219 log_error("dtree uuid hash creation failed");
220 dm_hash_destroy(dtree->devs);
221 dm_pool_destroy(dtree->mem);
222 dm_free(dtree);
223 return NULL;
224 }
225
226 return dtree;
227 }
228
229 void dm_tree_free(struct dm_tree *dtree)
230 {
231 if (!dtree)
232 return;
233
234 dm_hash_destroy(dtree->uuids);
235 dm_hash_destroy(dtree->devs);
236 dm_pool_destroy(dtree->mem);
237 dm_free(dtree);
238 }
239
240 static int _nodes_are_linked(const struct dm_tree_node *parent,
241 const struct dm_tree_node *child)
242 {
243 struct dm_tree_link *dlink;
244
245 dm_list_iterate_items(dlink, &parent->uses)
246 if (dlink->node == child)
247 return 1;
248
249 return 0;
250 }
251
252 static int _link(struct dm_list *list, struct dm_tree_node *node)
253 {
254 struct dm_tree_link *dlink;
255
256 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
257 log_error("dtree link allocation failed");
258 return 0;
259 }
260
261 dlink->node = node;
262 dm_list_add(list, &dlink->list);
263
264 return 1;
265 }
266
267 static int _link_nodes(struct dm_tree_node *parent,
268 struct dm_tree_node *child)
269 {
270 if (_nodes_are_linked(parent, child))
271 return 1;
272
273 if (!_link(&parent->uses, child))
274 return 0;
275
276 if (!_link(&child->used_by, parent))
277 return 0;
278
279 return 1;
280 }
281
282 static void _unlink(struct dm_list *list, struct dm_tree_node *node)
283 {
284 struct dm_tree_link *dlink;
285
286 dm_list_iterate_items(dlink, list)
287 if (dlink->node == node) {
288 dm_list_del(&dlink->list);
289 break;
290 }
291 }
292
293 static void _unlink_nodes(struct dm_tree_node *parent,
294 struct dm_tree_node *child)
295 {
296 if (!_nodes_are_linked(parent, child))
297 return;
298
299 _unlink(&parent->uses, child);
300 _unlink(&child->used_by, parent);
301 }
302
303 static int _add_to_toplevel(struct dm_tree_node *node)
304 {
305 return _link_nodes(&node->dtree->root, node);
306 }
307
308 static void _remove_from_toplevel(struct dm_tree_node *node)
309 {
310 _unlink_nodes(&node->dtree->root, node);
311 }
312
313 static int _add_to_bottomlevel(struct dm_tree_node *node)
314 {
315 return _link_nodes(node, &node->dtree->root);
316 }
317
318 static void _remove_from_bottomlevel(struct dm_tree_node *node)
319 {
320 _unlink_nodes(node, &node->dtree->root);
321 }
322
323 static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
324 {
325 /* Don't link to root node if child already has a parent */
326 if ((parent == &parent->dtree->root)) {
327 if (dm_tree_node_num_children(child, 1))
328 return 1;
329 } else
330 _remove_from_toplevel(child);
331
332 if ((child == &child->dtree->root)) {
333 if (dm_tree_node_num_children(parent, 0))
334 return 1;
335 } else
336 _remove_from_bottomlevel(parent);
337
338 return _link_nodes(parent, child);
339 }
340
341 static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
342 const char *name,
343 const char *uuid,
344 struct dm_info *info,
345 void *context,
346 uint16_t udev_flags)
347 {
348 struct dm_tree_node *node;
349 uint64_t dev;
350
351 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
352 log_error("_create_dm_tree_node alloc failed");
353 return NULL;
354 }
355
356 node->dtree = dtree;
357
358 node->name = name;
359 node->uuid = uuid;
360 node->info = *info;
361 node->context = context;
362 node->udev_flags = udev_flags;
363 node->activation_priority = 0;
364
365 dm_list_init(&node->uses);
366 dm_list_init(&node->used_by);
367 dm_list_init(&node->props.segs);
368
369 dev = MKDEV(info->major, info->minor);
370
371 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
372 sizeof(dev), node)) {
373 log_error("dtree node hash insertion failed");
374 dm_pool_free(dtree->mem, node);
375 return NULL;
376 }
377
378 if (uuid && *uuid &&
379 !dm_hash_insert(dtree->uuids, uuid, node)) {
380 log_error("dtree uuid hash insertion failed");
381 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
382 sizeof(dev));
383 dm_pool_free(dtree->mem, node);
384 return NULL;
385 }
386
387 return node;
388 }
389
390 static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
391 uint32_t major, uint32_t minor)
392 {
393 uint64_t dev = MKDEV(major, minor);
394
395 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
396 sizeof(dev));
397 }
398
399 static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
400 const char *uuid)
401 {
402 struct dm_tree_node *node;
403
404 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
405 return node;
406
407 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
408 return NULL;
409
410 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
411 }
412
413 static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
414 const char **name, const char **uuid,
415 struct dm_info *info, struct dm_deps **deps)
416 {
417 memset(info, 0, sizeof(*info));
418
419 if (!dm_is_dm_major(major)) {
420 *name = "";
421 *uuid = "";
422 *deps = NULL;
423 info->major = major;
424 info->minor = minor;
425 info->exists = 0;
426 info->live_table = 0;
427 info->inactive_table = 0;
428 info->read_only = 0;
429 return 1;
430 }
431
432 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
433 log_error("deps dm_task creation failed");
434 return 0;
435 }
436
437 if (!dm_task_set_major(*dmt, major)) {
438 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
439 major, minor);
440 goto failed;
441 }
442
443 if (!dm_task_set_minor(*dmt, minor)) {
444 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
445 major, minor);
446 goto failed;
447 }
448
449 if (!dm_task_run(*dmt)) {
450 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
451 major, minor);
452 goto failed;
453 }
454
455 if (!dm_task_get_info(*dmt, info)) {
456 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
457 major, minor);
458 goto failed;
459 }
460
461 if (!info->exists) {
462 *name = "";
463 *uuid = "";
464 *deps = NULL;
465 } else {
466 if (info->major != major) {
467 log_error("Inconsistent dtree major number: %u != %u",
468 major, info->major);
469 goto failed;
470 }
471 if (info->minor != minor) {
472 log_error("Inconsistent dtree minor number: %u != %u",
473 minor, info->minor);
474 goto failed;
475 }
476 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
477 log_error("name pool_strdup failed");
478 goto failed;
479 }
480 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
481 log_error("uuid pool_strdup failed");
482 goto failed;
483 }
484 *deps = dm_task_get_deps(*dmt);
485 }
486
487 return 1;
488
489 failed:
490 dm_task_destroy(*dmt);
491 return 0;
492 }
493
494 static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
495 struct dm_tree_node *parent,
496 uint32_t major, uint32_t minor,
497 uint16_t udev_flags)
498 {
499 struct dm_task *dmt = NULL;
500 struct dm_info info;
501 struct dm_deps *deps = NULL;
502 const char *name = NULL;
503 const char *uuid = NULL;
504 struct dm_tree_node *node = NULL;
505 uint32_t i;
506 int new = 0;
507
508 /* Already in tree? */
509 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
510 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
511 return_NULL;
512
513 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
514 NULL, udev_flags)))
515 goto_out;
516 new = 1;
517 }
518
519 if (!_link_tree_nodes(parent, node)) {
520 node = NULL;
521 goto_out;
522 }
523
524 /* If node was already in tree, no need to recurse. */
525 if (!new)
526 goto out;
527
528 /* Can't recurse if not a mapped device or there are no dependencies */
529 if (!node->info.exists || !deps->count) {
530 if (!_add_to_bottomlevel(node)) {
531 stack;
532 node = NULL;
533 }
534 goto out;
535 }
536
537 /* Add dependencies to tree */
538 for (i = 0; i < deps->count; i++)
539 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
540 MINOR(deps->device[i]), udev_flags)) {
541 node = NULL;
542 goto_out;
543 }
544
545 out:
546 if (dmt)
547 dm_task_destroy(dmt);
548
549 return node;
550 }
551
552 static int _node_clear_table(struct dm_tree_node *dnode)
553 {
554 struct dm_task *dmt;
555 struct dm_info *info;
556 const char *name;
557 int r;
558
559 if (!(info = &dnode->info)) {
560 log_error("_node_clear_table failed: missing info");
561 return 0;
562 }
563
564 if (!(name = dm_tree_node_get_name(dnode))) {
565 log_error("_node_clear_table failed: missing name");
566 return 0;
567 }
568
569 /* Is there a table? */
570 if (!info->exists || !info->inactive_table)
571 return 1;
572
573 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
574 name, info->major, info->minor);
575
576 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
577 dm_task_destroy(dmt);
578 log_error("Table clear dm_task creation failed for %s", name);
579 return 0;
580 }
581
582 if (!dm_task_set_major(dmt, info->major) ||
583 !dm_task_set_minor(dmt, info->minor)) {
584 log_error("Failed to set device number for %s table clear", name);
585 dm_task_destroy(dmt);
586 return 0;
587 }
588
589 r = dm_task_run(dmt);
590
591 if (!dm_task_get_info(dmt, info)) {
592 log_error("_node_clear_table failed: info missing after running task for %s", name);
593 r = 0;
594 }
595
596 dm_task_destroy(dmt);
597
598 return r;
599 }
600
601 struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
602 const char *name,
603 const char *uuid,
604 uint32_t major, uint32_t minor,
605 int read_only,
606 int clear_inactive,
607 void *context)
608 {
609 struct dm_tree_node *dnode;
610 struct dm_info info;
611 const char *name2;
612 const char *uuid2;
613
614 /* Do we need to add node to tree? */
615 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
616 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
617 log_error("name pool_strdup failed");
618 return NULL;
619 }
620 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
621 log_error("uuid pool_strdup failed");
622 return NULL;
623 }
624
625 info.major = 0;
626 info.minor = 0;
627 info.exists = 0;
628 info.live_table = 0;
629 info.inactive_table = 0;
630 info.read_only = 0;
631
632 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
633 context, 0)))
634 return_NULL;
635
636 /* Attach to root node until a table is supplied */
637 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
638 return_NULL;
639
640 dnode->props.major = major;
641 dnode->props.minor = minor;
642 dnode->props.new_name = NULL;
643 dnode->props.size_changed = 0;
644 } else if (strcmp(name, dnode->name)) {
645 /* Do we need to rename node? */
646 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
647 log_error("name pool_strdup failed");
648 return 0;
649 }
650 }
651
652 dnode->props.read_only = read_only ? 1 : 0;
653 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
654 dnode->props.read_ahead_flags = 0;
655
656 if (clear_inactive && !_node_clear_table(dnode))
657 return_NULL;
658
659 dnode->context = context;
660 dnode->udev_flags = 0;
661
662 return dnode;
663 }
664
665 struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
666 const char *name,
667 const char *uuid,
668 uint32_t major,
669 uint32_t minor,
670 int read_only,
671 int clear_inactive,
672 void *context,
673 uint16_t udev_flags)
674 {
675 struct dm_tree_node *node;
676
677 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
678 clear_inactive, context)))
679 node->udev_flags = udev_flags;
680
681 return node;
682 }
683
684
685 void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
686 uint32_t read_ahead,
687 uint32_t read_ahead_flags)
688 {
689 dnode->props.read_ahead = read_ahead;
690 dnode->props.read_ahead_flags = read_ahead_flags;
691 }
692
693 void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
694 struct dm_tree_node *presuspend_node)
695 {
696 node->presuspend_node = presuspend_node;
697 }
698
699 int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
700 {
701 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
702 }
703
704 int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
705 uint32_t minor, uint16_t udev_flags)
706 {
707 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
708 }
709
710 const char *dm_tree_node_get_name(const struct dm_tree_node *node)
711 {
712 return node->info.exists ? node->name : "";
713 }
714
715 const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
716 {
717 return node->info.exists ? node->uuid : "";
718 }
719
720 const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
721 {
722 return &node->info;
723 }
724
725 void *dm_tree_node_get_context(const struct dm_tree_node *node)
726 {
727 return node->context;
728 }
729
730 int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
731 {
732 return dnode->props.size_changed;
733 }
734
735 int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
736 {
737 if (inverted) {
738 if (_nodes_are_linked(&node->dtree->root, node))
739 return 0;
740 return dm_list_size(&node->used_by);
741 }
742
743 if (_nodes_are_linked(node, &node->dtree->root))
744 return 0;
745
746 return dm_list_size(&node->uses);
747 }
748
749 /*
750 * Returns 1 if no prefix supplied
751 */
752 static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
753 {
754 if (!uuid_prefix)
755 return 1;
756
757 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
758 return 1;
759
760 /* Handle transition: active device uuids might be missing the prefix */
761 if (uuid_prefix_len <= 4)
762 return 0;
763
764 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
765 return 0;
766
767 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
768 return 0;
769
770 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
771 return 1;
772
773 return 0;
774 }
775
776 /*
777 * Returns 1 if no children.
778 */
779 static int _children_suspended(struct dm_tree_node *node,
780 uint32_t inverted,
781 const char *uuid_prefix,
782 size_t uuid_prefix_len)
783 {
784 struct dm_list *list;
785 struct dm_tree_link *dlink;
786 const struct dm_info *dinfo;
787 const char *uuid;
788
789 if (inverted) {
790 if (_nodes_are_linked(&node->dtree->root, node))
791 return 1;
792 list = &node->used_by;
793 } else {
794 if (_nodes_are_linked(node, &node->dtree->root))
795 return 1;
796 list = &node->uses;
797 }
798
799 dm_list_iterate_items(dlink, list) {
800 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
801 stack;
802 continue;
803 }
804
805 /* Ignore if it doesn't belong to this VG */
806 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
807 continue;
808
809 /* Ignore if parent node wants to presuspend this node */
810 if (dlink->node->presuspend_node == node)
811 continue;
812
813 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
814 stack; /* FIXME Is this normal? */
815 return 0;
816 }
817
818 if (!dinfo->suspended)
819 return 0;
820 }
821
822 return 1;
823 }
824
825 /*
826 * Set major and minor to zero for root of tree.
827 */
828 struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
829 uint32_t major,
830 uint32_t minor)
831 {
832 if (!major && !minor)
833 return &dtree->root;
834
835 return _find_dm_tree_node(dtree, major, minor);
836 }
837
838 /*
839 * Set uuid to NULL for root of tree.
840 */
841 struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
842 const char *uuid)
843 {
844 if (!uuid || !*uuid)
845 return &dtree->root;
846
847 return _find_dm_tree_node_by_uuid(dtree, uuid);
848 }
849
850 /*
851 * First time set *handle to NULL.
852 * Set inverted to invert the tree.
853 */
854 struct dm_tree_node *dm_tree_next_child(void **handle,
855 const struct dm_tree_node *parent,
856 uint32_t inverted)
857 {
858 struct dm_list **dlink = (struct dm_list **) handle;
859 const struct dm_list *use_list;
860
861 if (inverted)
862 use_list = &parent->used_by;
863 else
864 use_list = &parent->uses;
865
866 if (!*dlink)
867 *dlink = dm_list_first(use_list);
868 else
869 *dlink = dm_list_next(use_list, *dlink);
870
871 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
872 }
873
874 /*
875 * Deactivate a device with its dependencies if the uuid prefix matches.
876 */
877 static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
878 struct dm_info *info)
879 {
880 struct dm_task *dmt;
881 int r;
882
883 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
884 log_error("_info_by_dev: dm_task creation failed");
885 return 0;
886 }
887
888 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
889 log_error("_info_by_dev: Failed to set device number");
890 dm_task_destroy(dmt);
891 return 0;
892 }
893
894 if (!with_open_count && !dm_task_no_open_count(dmt))
895 log_error("Failed to disable open_count");
896
897 if ((r = dm_task_run(dmt)))
898 r = dm_task_get_info(dmt, info);
899
900 dm_task_destroy(dmt);
901
902 return r;
903 }
904
905 static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
906 uint32_t *cookie, uint16_t udev_flags)
907 {
908 struct dm_task *dmt;
909 int r = 0;
910
911 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
912
913 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
914 log_error("Deactivation dm_task creation failed for %s", name);
915 return 0;
916 }
917
918 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
919 log_error("Failed to set device number for %s deactivation", name);
920 goto out;
921 }
922
923 if (!dm_task_no_open_count(dmt))
924 log_error("Failed to disable open_count");
925
926 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
927 goto out;
928
929 r = dm_task_run(dmt);
930
931 /* FIXME Until kernel returns actual name so dm-ioctl.c can handle it */
932 rm_dev_node(name, dmt->cookie_set &&
933 !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG));
934
935 /* FIXME Remove node from tree or mark invalid? */
936
937 out:
938 dm_task_destroy(dmt);
939
940 return r;
941 }
942
943 static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
944 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
945 {
946 struct dm_task *dmt;
947 int r = 0;
948
949 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
950
951 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
952 log_error("Rename dm_task creation failed for %s", old_name);
953 return 0;
954 }
955
956 if (!dm_task_set_name(dmt, old_name)) {
957 log_error("Failed to set name for %s rename.", old_name);
958 goto out;
959 }
960
961 if (!dm_task_set_newname(dmt, new_name))
962 goto_out;
963
964 if (!dm_task_no_open_count(dmt))
965 log_error("Failed to disable open_count");
966
967 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
968 goto out;
969
970 r = dm_task_run(dmt);
971
972 out:
973 dm_task_destroy(dmt);
974
975 return r;
976 }
977
978 /* FIXME Merge with _suspend_node? */
979 static int _resume_node(const char *name, uint32_t major, uint32_t minor,
980 uint32_t read_ahead, uint32_t read_ahead_flags,
981 struct dm_info *newinfo, uint32_t *cookie,
982 uint16_t udev_flags)
983 {
984 struct dm_task *dmt;
985 int r = 0;
986
987 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
988
989 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
990 log_error("Suspend dm_task creation failed for %s", name);
991 return 0;
992 }
993
994 /* FIXME Kernel should fill in name on return instead */
995 if (!dm_task_set_name(dmt, name)) {
996 log_error("Failed to set readahead device name for %s", name);
997 goto out;
998 }
999
1000 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1001 log_error("Failed to set device number for %s resumption.", name);
1002 goto out;
1003 }
1004
1005 if (!dm_task_no_open_count(dmt))
1006 log_error("Failed to disable open_count");
1007
1008 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1009 log_error("Failed to set read ahead");
1010
1011 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1012 goto out;
1013
1014 if ((r = dm_task_run(dmt)))
1015 r = dm_task_get_info(dmt, newinfo);
1016
1017 out:
1018 dm_task_destroy(dmt);
1019
1020 return r;
1021 }
1022
1023 static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
1024 int skip_lockfs, int no_flush, struct dm_info *newinfo)
1025 {
1026 struct dm_task *dmt;
1027 int r;
1028
1029 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1030 name, major, minor,
1031 skip_lockfs ? "" : " with filesystem sync",
1032 no_flush ? "" : " with device flush");
1033
1034 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1035 log_error("Suspend dm_task creation failed for %s", name);
1036 return 0;
1037 }
1038
1039 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1040 log_error("Failed to set device number for %s suspension.", name);
1041 dm_task_destroy(dmt);
1042 return 0;
1043 }
1044
1045 if (!dm_task_no_open_count(dmt))
1046 log_error("Failed to disable open_count");
1047
1048 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1049 log_error("Failed to set skip_lockfs flag.");
1050
1051 if (no_flush && !dm_task_no_flush(dmt))
1052 log_error("Failed to set no_flush flag.");
1053
1054 if ((r = dm_task_run(dmt)))
1055 r = dm_task_get_info(dmt, newinfo);
1056
1057 dm_task_destroy(dmt);
1058
1059 return r;
1060 }
1061
1062 /*
1063 * FIXME Don't attempt to deactivate known internal dependencies.
1064 */
1065 static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1066 const char *uuid_prefix,
1067 size_t uuid_prefix_len,
1068 unsigned level)
1069 {
1070 int r = 1;
1071 void *handle = NULL;
1072 struct dm_tree_node *child = dnode;
1073 struct dm_info info;
1074 const struct dm_info *dinfo;
1075 const char *name;
1076 const char *uuid;
1077
1078 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1079 if (!(dinfo = dm_tree_node_get_info(child))) {
1080 stack;
1081 continue;
1082 }
1083
1084 if (!(name = dm_tree_node_get_name(child))) {
1085 stack;
1086 continue;
1087 }
1088
1089 if (!(uuid = dm_tree_node_get_uuid(child))) {
1090 stack;
1091 continue;
1092 }
1093
1094 /* Ignore if it doesn't belong to this VG */
1095 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1096 continue;
1097
1098 /* Refresh open_count */
1099 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1100 !info.exists)
1101 continue;
1102
1103 if (info.open_count) {
1104 /* Only report error from (likely non-internal) dependency at top level */
1105 if (!level) {
1106 log_error("Unable to deactivate open %s (%" PRIu32
1107 ":%" PRIu32 ")", name, info.major,
1108 info.minor);
1109 r = 0;
1110 }
1111 continue;
1112 }
1113
1114 /* Suspend child node first if requested */
1115 if (child->presuspend_node &&
1116 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1117 continue;
1118
1119 if (!_deactivate_node(name, info.major, info.minor,
1120 &child->dtree->cookie, child->udev_flags)) {
1121 log_error("Unable to deactivate %s (%" PRIu32
1122 ":%" PRIu32 ")", name, info.major,
1123 info.minor);
1124 r = 0;
1125 continue;
1126 }
1127
1128 if (dm_tree_node_num_children(child, 0)) {
1129 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
1130 return_0;
1131 }
1132 }
1133
1134 return r;
1135 }
1136
1137 int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1138 const char *uuid_prefix,
1139 size_t uuid_prefix_len)
1140 {
1141 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1142 }
1143
1144 void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1145 {
1146 dnode->dtree->skip_lockfs = 1;
1147 }
1148
1149 void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1150 {
1151 dnode->dtree->no_flush = 1;
1152 }
1153
1154 int dm_tree_suspend_children(struct dm_tree_node *dnode,
1155 const char *uuid_prefix,
1156 size_t uuid_prefix_len)
1157 {
1158 int r = 1;
1159 void *handle = NULL;
1160 struct dm_tree_node *child = dnode;
1161 struct dm_info info, newinfo;
1162 const struct dm_info *dinfo;
1163 const char *name;
1164 const char *uuid;
1165
1166 /* Suspend nodes at this level of the tree */
1167 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1168 if (!(dinfo = dm_tree_node_get_info(child))) {
1169 stack;
1170 continue;
1171 }
1172
1173 if (!(name = dm_tree_node_get_name(child))) {
1174 stack;
1175 continue;
1176 }
1177
1178 if (!(uuid = dm_tree_node_get_uuid(child))) {
1179 stack;
1180 continue;
1181 }
1182
1183 /* Ignore if it doesn't belong to this VG */
1184 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1185 continue;
1186
1187 /* Ensure immediate parents are already suspended */
1188 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1189 continue;
1190
1191 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
1192 !info.exists || info.suspended)
1193 continue;
1194
1195 if (!_suspend_node(name, info.major, info.minor,
1196 child->dtree->skip_lockfs,
1197 child->dtree->no_flush, &newinfo)) {
1198 log_error("Unable to suspend %s (%" PRIu32
1199 ":%" PRIu32 ")", name, info.major,
1200 info.minor);
1201 r = 0;
1202 continue;
1203 }
1204
1205 /* Update cached info */
1206 child->info = newinfo;
1207 }
1208
1209 /* Then suspend any child nodes */
1210 handle = NULL;
1211
1212 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1213 if (!(uuid = dm_tree_node_get_uuid(child))) {
1214 stack;
1215 continue;
1216 }
1217
1218 /* Ignore if it doesn't belong to this VG */
1219 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1220 continue;
1221
1222 if (dm_tree_node_num_children(child, 0))
1223 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1224 return_0;
1225 }
1226
1227 return r;
1228 }
1229
1230 int dm_tree_activate_children(struct dm_tree_node *dnode,
1231 const char *uuid_prefix,
1232 size_t uuid_prefix_len)
1233 {
1234 int r = 1;
1235 void *handle = NULL;
1236 struct dm_tree_node *child = dnode;
1237 struct dm_info newinfo;
1238 const char *name;
1239 const char *uuid;
1240 int priority;
1241
1242 /* Activate children first */
1243 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1244 if (!(uuid = dm_tree_node_get_uuid(child))) {
1245 stack;
1246 continue;
1247 }
1248
1249 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1250 continue;
1251
1252 if (dm_tree_node_num_children(child, 0))
1253 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1254 return_0;
1255 }
1256
1257 handle = NULL;
1258
1259 for (priority = 0; priority < 3; priority++) {
1260 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1261 if (!(uuid = dm_tree_node_get_uuid(child))) {
1262 stack;
1263 continue;
1264 }
1265
1266 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1267 continue;
1268
1269 if (priority != child->activation_priority)
1270 continue;
1271
1272 if (!(name = dm_tree_node_get_name(child))) {
1273 stack;
1274 continue;
1275 }
1276
1277 /* Rename? */
1278 if (child->props.new_name) {
1279 if (!_rename_node(name, child->props.new_name, child->info.major,
1280 child->info.minor, &child->dtree->cookie,
1281 child->udev_flags)) {
1282 log_error("Failed to rename %s (%" PRIu32
1283 ":%" PRIu32 ") to %s", name, child->info.major,
1284 child->info.minor, child->props.new_name);
1285 return 0;
1286 }
1287 child->name = child->props.new_name;
1288 child->props.new_name = NULL;
1289 }
1290
1291 if (!child->info.inactive_table && !child->info.suspended)
1292 continue;
1293
1294 if (!_resume_node(child->name, child->info.major, child->info.minor,
1295 child->props.read_ahead, child->props.read_ahead_flags,
1296 &newinfo, &child->dtree->cookie, child->udev_flags)) {
1297 log_error("Unable to resume %s (%" PRIu32
1298 ":%" PRIu32 ")", child->name, child->info.major,
1299 child->info.minor);
1300 r = 0;
1301 continue;
1302 }
1303
1304 /* Update cached info */
1305 child->info = newinfo;
1306 }
1307 }
1308
1309 handle = NULL;
1310
1311 return r;
1312 }
1313
1314 static int _create_node(struct dm_tree_node *dnode)
1315 {
1316 int r = 0;
1317 struct dm_task *dmt;
1318
1319 log_verbose("Creating %s", dnode->name);
1320
1321 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1322 log_error("Create dm_task creation failed for %s", dnode->name);
1323 return 0;
1324 }
1325
1326 if (!dm_task_set_name(dmt, dnode->name)) {
1327 log_error("Failed to set device name for %s", dnode->name);
1328 goto out;
1329 }
1330
1331 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1332 log_error("Failed to set uuid for %s", dnode->name);
1333 goto out;
1334 }
1335
1336 if (dnode->props.major &&
1337 (!dm_task_set_major(dmt, dnode->props.major) ||
1338 !dm_task_set_minor(dmt, dnode->props.minor))) {
1339 log_error("Failed to set device number for %s creation.", dnode->name);
1340 goto out;
1341 }
1342
1343 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1344 log_error("Failed to set read only flag for %s", dnode->name);
1345 goto out;
1346 }
1347
1348 if (!dm_task_no_open_count(dmt))
1349 log_error("Failed to disable open_count");
1350
1351 if ((r = dm_task_run(dmt)))
1352 r = dm_task_get_info(dmt, &dnode->info);
1353
1354 out:
1355 dm_task_destroy(dmt);
1356
1357 return r;
1358 }
1359
1360
1361 static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
1362 {
1363 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
1364 log_error("Failed to format %s device number for %s as dm "
1365 "target (%u,%u)",
1366 node->name, node->uuid, node->info.major, node->info.minor);
1367 return 0;
1368 }
1369
1370 return 1;
1371 }
1372
1373 /* simplify string emiting code */
1374 #define EMIT_PARAMS(p, str...)\
1375 do {\
1376 int w;\
1377 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1378 stack; /* Out of space */\
1379 return -1;\
1380 }\
1381 p += w;\
1382 } while (0)
1383
1384 /*
1385 * _emit_areas_line
1386 *
1387 * Returns: 1 on success, 0 on failure
1388 */
1389 static int _emit_areas_line(struct dm_task *dmt __attribute((unused)),
1390 struct load_segment *seg, char *params,
1391 size_t paramsize, int *pos)
1392 {
1393 struct seg_area *area;
1394 char devbuf[DM_FORMAT_DEV_BUFSIZE];
1395 unsigned first_time = 1;
1396 const char *logtype;
1397 unsigned log_parm_count;
1398
1399 dm_list_iterate_items(area, &seg->areas) {
1400 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1401 return_0;
1402
1403 switch (seg->type) {
1404 case SEG_REPLICATOR_DEV:
1405 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1406 if (first_time)
1407 EMIT_PARAMS(*pos, " nolog 0");
1408 else {
1409 /* Remote devices */
1410 log_parm_count = (area->flags &
1411 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1412
1413 if (!area->slog) {
1414 devbuf[0] = 0; /* Only core log parameters */
1415 logtype = "core";
1416 } else {
1417 devbuf[0] = ' '; /* Extra space before device name */
1418 if (!_build_dev_string(devbuf + 1,
1419 sizeof(devbuf) - 1,
1420 area->slog))
1421 return_0;
1422 logtype = "disk";
1423 log_parm_count++; /* Extra sync log device name parameter */
1424 }
1425
1426 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1427 log_parm_count, devbuf, area->region_size);
1428
1429 logtype = (area->flags & DM_NOSYNC) ?
1430 " nosync" : (area->flags & DM_FORCESYNC) ?
1431 " sync" : NULL;
1432
1433 if (logtype)
1434 EMIT_PARAMS(*pos, logtype);
1435 }
1436 break;
1437 default:
1438 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1439 devbuf, area->offset);
1440 }
1441
1442 first_time = 0;
1443 }
1444
1445 return 1;
1446 }
1447
1448 static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1449 size_t paramsize, int *pos)
1450 {
1451 const struct load_segment *rlog_seg;
1452 struct replicator_site *rsite;
1453 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1454 unsigned parm_count;
1455
1456 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1457 return_0;
1458
1459 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1460 struct load_segment);
1461
1462 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1463 seg->rlog_type, rlogbuf, rlog_seg->size);
1464
1465 dm_list_iterate_items(rsite, &seg->rsites) {
1466 parm_count = (rsite->fall_behind_data
1467 || rsite->fall_behind_ios
1468 || rsite->async_timeout) ? 4 : 2;
1469
1470 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1471 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1472
1473 if (rsite->fall_behind_data)
1474 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1475 else if (rsite->fall_behind_ios)
1476 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1477 else if (rsite->async_timeout)
1478 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1479 }
1480
1481 return 1;
1482 }
1483
1484 /*
1485 * Returns: 1 on success, 0 on failure
1486 */
1487 static int _mirror_emit_segment_line(struct dm_task *dmt, uint32_t major,
1488 uint32_t minor, struct load_segment *seg,
1489 uint64_t *seg_start, char *params,
1490 size_t paramsize)
1491 {
1492 int r;
1493 int block_on_error = 0;
1494 int handle_errors = 0;
1495 int dm_log_userspace = 0;
1496 struct utsname uts;
1497 unsigned log_parm_count;
1498 int pos = 0;
1499 char logbuf[DM_FORMAT_DEV_BUFSIZE];
1500 const char *logtype;
1501
1502 r = uname(&uts);
1503 if (r)
1504 return_0;
1505
1506 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1507 /*
1508 * Originally, block_on_error was an argument to the log
1509 * portion of the mirror CTR table. It was renamed to
1510 * "handle_errors" and now resides in the 'features'
1511 * section of the mirror CTR table (i.e. at the end).
1512 *
1513 * We can identify whether to use "block_on_error" or
1514 * "handle_errors" by the dm-mirror module's version
1515 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1516 */
1517 if (strncmp(uts.release, "2.6.22", 6) >= 0)
1518 handle_errors = 1;
1519 else
1520 block_on_error = 1;
1521 }
1522
1523 if (seg->clustered) {
1524 /* Cluster mirrors require a UUID */
1525 if (!seg->uuid)
1526 return_0;
1527
1528 /*
1529 * Cluster mirrors used to have their own log
1530 * types. Now they are accessed through the
1531 * userspace log type.
1532 *
1533 * The dm-log-userspace module was added to the
1534 * 2.6.31 kernel.
1535 */
1536 if (strncmp(uts.release, "2.6.31", 6) >= 0)
1537 dm_log_userspace = 1;
1538 }
1539
1540 /* Region size */
1541 log_parm_count = 1;
1542
1543 /* [no]sync, block_on_error etc. */
1544 log_parm_count += hweight32(seg->flags);
1545
1546 /* "handle_errors" is a feature arg now */
1547 if (handle_errors)
1548 log_parm_count--;
1549
1550 /* DM_CORELOG does not count in the param list */
1551 if (seg->flags & DM_CORELOG)
1552 log_parm_count--;
1553
1554 if (seg->clustered) {
1555 log_parm_count++; /* For UUID */
1556
1557 if (!dm_log_userspace)
1558 EMIT_PARAMS(pos, "clustered-");
1559 else
1560 /* For clustered-* type field inserted later */
1561 log_parm_count++;
1562 }
1563
1564 if (!seg->log)
1565 logtype = "core";
1566 else {
1567 logtype = "disk";
1568 log_parm_count++;
1569 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1570 return_0;
1571 }
1572
1573 if (dm_log_userspace)
1574 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1575 log_parm_count, seg->uuid, logtype);
1576 else
1577 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
1578
1579 if (seg->log)
1580 EMIT_PARAMS(pos, " %s", logbuf);
1581
1582 EMIT_PARAMS(pos, " %u", seg->region_size);
1583
1584 if (seg->clustered && !dm_log_userspace)
1585 EMIT_PARAMS(pos, " %s", seg->uuid);
1586
1587 if ((seg->flags & DM_NOSYNC))
1588 EMIT_PARAMS(pos, " nosync");
1589 else if ((seg->flags & DM_FORCESYNC))
1590 EMIT_PARAMS(pos, " sync");
1591
1592 if (block_on_error)
1593 EMIT_PARAMS(pos, " block_on_error");
1594
1595 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1596
1597 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0)
1598 return_0;
1599
1600 if (handle_errors)
1601 EMIT_PARAMS(pos, " 1 handle_errors");
1602
1603 return 1;
1604 }
1605
1606 static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1607 uint32_t minor, struct load_segment *seg,
1608 uint64_t *seg_start, char *params,
1609 size_t paramsize)
1610 {
1611 int pos = 0;
1612 int r;
1613 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
1614
1615 switch(seg->type) {
1616 case SEG_ERROR:
1617 case SEG_ZERO:
1618 case SEG_LINEAR:
1619 break;
1620 case SEG_MIRRORED:
1621 /* Mirrors are pretty complicated - now in separate function */
1622 r = _mirror_emit_segment_line(dmt, major, minor, seg, seg_start,
1623 params, paramsize);
1624 if (!r)
1625 return_0;
1626 break;
1627 case SEG_REPLICATOR:
1628 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1629 &pos)) <= 0) {
1630 stack;
1631 return r;
1632 }
1633 break;
1634 case SEG_REPLICATOR_DEV:
1635 if (!seg->replicator || !_build_dev_string(originbuf,
1636 sizeof(originbuf),
1637 seg->replicator))
1638 return_0;
1639
1640 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1641 break;
1642 case SEG_SNAPSHOT:
1643 case SEG_SNAPSHOT_MERGE:
1644 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1645 return_0;
1646 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1647 return_0;
1648 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1649 seg->persistent ? 'P' : 'N', seg->chunk_size);
1650 break;
1651 case SEG_SNAPSHOT_ORIGIN:
1652 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1653 return_0;
1654 EMIT_PARAMS(pos, "%s", originbuf);
1655 break;
1656 case SEG_STRIPED:
1657 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
1658 break;
1659 case SEG_CRYPT:
1660 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
1661 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1662 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1663 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1664 seg->iv_offset : *seg_start);
1665 break;
1666 }
1667
1668 switch(seg->type) {
1669 case SEG_ERROR:
1670 case SEG_REPLICATOR:
1671 case SEG_SNAPSHOT:
1672 case SEG_SNAPSHOT_ORIGIN:
1673 case SEG_SNAPSHOT_MERGE:
1674 case SEG_ZERO:
1675 break;
1676 case SEG_CRYPT:
1677 case SEG_LINEAR:
1678 case SEG_REPLICATOR_DEV:
1679 case SEG_STRIPED:
1680 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
1681 stack;
1682 return r;
1683 }
1684 break;
1685 }
1686
1687 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
1688 " %" PRIu64 " %s %s", major, minor,
1689 *seg_start, seg->size, dm_segtypes[seg->type].target, params);
1690
1691 if (!dm_task_add_target(dmt, *seg_start, seg->size, dm_segtypes[seg->type].target, params))
1692 return_0;
1693
1694 *seg_start += seg->size;
1695
1696 return 1;
1697 }
1698
1699 #undef EMIT_PARAMS
1700
1701 static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
1702 struct load_segment *seg, uint64_t *seg_start)
1703 {
1704 char *params;
1705 size_t paramsize = 4096;
1706 int ret;
1707
1708 do {
1709 if (!(params = dm_malloc(paramsize))) {
1710 log_error("Insufficient space for target parameters.");
1711 return 0;
1712 }
1713
1714 params[0] = '\0';
1715 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
1716 params, paramsize);
1717 dm_free(params);
1718
1719 if (!ret)
1720 stack;
1721
1722 if (ret >= 0)
1723 return ret;
1724
1725 log_debug("Insufficient space in params[%" PRIsize_t
1726 "] for target parameters.", paramsize);
1727
1728 paramsize *= 2;
1729 } while (paramsize < MAX_TARGET_PARAMSIZE);
1730
1731 log_error("Target parameter size too big. Aborting.");
1732 return 0;
1733 }
1734
1735 static int _load_node(struct dm_tree_node *dnode)
1736 {
1737 int r = 0;
1738 struct dm_task *dmt;
1739 struct load_segment *seg;
1740 uint64_t seg_start = 0;
1741
1742 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
1743 dnode->info.major, dnode->info.minor);
1744
1745 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
1746 log_error("Reload dm_task creation failed for %s", dnode->name);
1747 return 0;
1748 }
1749
1750 if (!dm_task_set_major(dmt, dnode->info.major) ||
1751 !dm_task_set_minor(dmt, dnode->info.minor)) {
1752 log_error("Failed to set device number for %s reload.", dnode->name);
1753 goto out;
1754 }
1755
1756 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1757 log_error("Failed to set read only flag for %s", dnode->name);
1758 goto out;
1759 }
1760
1761 if (!dm_task_no_open_count(dmt))
1762 log_error("Failed to disable open_count");
1763
1764 dm_list_iterate_items(seg, &dnode->props.segs)
1765 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
1766 seg, &seg_start))
1767 goto_out;
1768
1769 if (!dm_task_suppress_identical_reload(dmt))
1770 log_error("Failed to suppress reload of identical tables.");
1771
1772 if ((r = dm_task_run(dmt))) {
1773 r = dm_task_get_info(dmt, &dnode->info);
1774 if (r && !dnode->info.inactive_table)
1775 log_verbose("Suppressed %s identical table reload.",
1776 dnode->name);
1777
1778 if ((dnode->props.size_changed =
1779 (dm_task_get_existing_table_size(dmt) == seg_start) ? 0 : 1))
1780 log_debug("Table size changed from %" PRIu64 " to %"
1781 PRIu64 " for %s",
1782 dm_task_get_existing_table_size(dmt),
1783 seg_start, dnode->name);
1784 }
1785
1786 dnode->props.segment_count = 0;
1787
1788 out:
1789 dm_task_destroy(dmt);
1790
1791 return r;
1792 }
1793
1794 int dm_tree_preload_children(struct dm_tree_node *dnode,
1795 const char *uuid_prefix,
1796 size_t uuid_prefix_len)
1797 {
1798 int r = 1;
1799 void *handle = NULL;
1800 struct dm_tree_node *child;
1801 struct dm_info newinfo;
1802
1803 /* Preload children first */
1804 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1805 /* Skip existing non-device-mapper devices */
1806 if (!child->info.exists && child->info.major)
1807 continue;
1808
1809 /* Ignore if it doesn't belong to this VG */
1810 if (child->info.exists &&
1811 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
1812 continue;
1813
1814 if (dm_tree_node_num_children(child, 0))
1815 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
1816 return_0;
1817
1818 /* FIXME Cope if name exists with no uuid? */
1819 if (!child->info.exists) {
1820 if (!_create_node(child)) {
1821 stack;
1822 return 0;
1823 }
1824 }
1825
1826 if (!child->info.inactive_table && child->props.segment_count) {
1827 if (!_load_node(child)) {
1828 stack;
1829 return 0;
1830 }
1831 }
1832
1833 /* Propagate device size change change */
1834 if (child->props.size_changed)
1835 dnode->props.size_changed = 1;
1836
1837 /* Resume device immediately if it has parents and its size changed */
1838 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
1839 continue;
1840
1841 if (!child->info.inactive_table && !child->info.suspended)
1842 continue;
1843
1844 if (!_resume_node(child->name, child->info.major, child->info.minor,
1845 child->props.read_ahead, child->props.read_ahead_flags,
1846 &newinfo, &child->dtree->cookie, child->udev_flags)) {
1847 log_error("Unable to resume %s (%" PRIu32
1848 ":%" PRIu32 ")", child->name, child->info.major,
1849 child->info.minor);
1850 r = 0;
1851 continue;
1852 }
1853
1854 /* Update cached info */
1855 child->info = newinfo;
1856 }
1857
1858 handle = NULL;
1859
1860 return r;
1861 }
1862
1863 /*
1864 * Returns 1 if unsure.
1865 */
1866 int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
1867 const char *uuid_prefix,
1868 size_t uuid_prefix_len)
1869 {
1870 void *handle = NULL;
1871 struct dm_tree_node *child = dnode;
1872 const char *uuid;
1873
1874 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1875 if (!(uuid = dm_tree_node_get_uuid(child))) {
1876 log_error("Failed to get uuid for dtree node.");
1877 return 1;
1878 }
1879
1880 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1881 return 1;
1882
1883 if (dm_tree_node_num_children(child, 0))
1884 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
1885 }
1886
1887 return 0;
1888 }
1889
1890 /*
1891 * Target functions
1892 */
1893 static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
1894 {
1895 struct load_segment *seg;
1896
1897 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
1898 log_error("dtree node segment allocation failed");
1899 return NULL;
1900 }
1901
1902 seg->type = type;
1903 seg->size = size;
1904 seg->area_count = 0;
1905 dm_list_init(&seg->areas);
1906 seg->stripe_size = 0;
1907 seg->persistent = 0;
1908 seg->chunk_size = 0;
1909 seg->cow = NULL;
1910 seg->origin = NULL;
1911 seg->merge = NULL;
1912
1913 dm_list_add(&dnode->props.segs, &seg->list);
1914 dnode->props.segment_count++;
1915
1916 return seg;
1917 }
1918
1919 int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
1920 uint64_t size,
1921 const char *origin_uuid)
1922 {
1923 struct load_segment *seg;
1924 struct dm_tree_node *origin_node;
1925
1926 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
1927 return_0;
1928
1929 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
1930 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
1931 return 0;
1932 }
1933
1934 seg->origin = origin_node;
1935 if (!_link_tree_nodes(dnode, origin_node))
1936 return_0;
1937
1938 /* Resume snapshot origins after new snapshots */
1939 dnode->activation_priority = 1;
1940
1941 return 1;
1942 }
1943
1944 static int _add_snapshot_target(struct dm_tree_node *node,
1945 uint64_t size,
1946 const char *origin_uuid,
1947 const char *cow_uuid,
1948 const char *merge_uuid,
1949 int persistent,
1950 uint32_t chunk_size)
1951 {
1952 struct load_segment *seg;
1953 struct dm_tree_node *origin_node, *cow_node, *merge_node;
1954 unsigned seg_type;
1955
1956 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
1957
1958 if (!(seg = _add_segment(node, seg_type, size)))
1959 return_0;
1960
1961 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
1962 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
1963 return 0;
1964 }
1965
1966 seg->origin = origin_node;
1967 if (!_link_tree_nodes(node, origin_node))
1968 return_0;
1969
1970 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
1971 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
1972 return 0;
1973 }
1974
1975 seg->cow = cow_node;
1976 if (!_link_tree_nodes(node, cow_node))
1977 return_0;
1978
1979 seg->persistent = persistent ? 1 : 0;
1980 seg->chunk_size = chunk_size;
1981
1982 if (merge_uuid) {
1983 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
1984 /* not a pure error, merging snapshot may have been deactivated */
1985 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
1986 } else {
1987 seg->merge = merge_node;
1988 /* must not link merging snapshot, would undermine activation_priority below */
1989 }
1990
1991 /* Resume snapshot-merge (acting origin) after other snapshots */
1992 node->activation_priority = 1;
1993 if (seg->merge) {
1994 /* Resume merging snapshot after snapshot-merge */
1995 seg->merge->activation_priority = 2;
1996 }
1997 }
1998
1999 return 1;
2000 }
2001
2002
2003 int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2004 uint64_t size,
2005 const char *origin_uuid,
2006 const char *cow_uuid,
2007 int persistent,
2008 uint32_t chunk_size)
2009 {
2010 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2011 NULL, persistent, chunk_size);
2012 }
2013
2014 int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2015 uint64_t size,
2016 const char *origin_uuid,
2017 const char *cow_uuid,
2018 const char *merge_uuid,
2019 uint32_t chunk_size)
2020 {
2021 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2022 merge_uuid, 1, chunk_size);
2023 }
2024
2025 int dm_tree_node_add_error_target(struct dm_tree_node *node,
2026 uint64_t size)
2027 {
2028 if (!_add_segment(node, SEG_ERROR, size))
2029 return_0;
2030
2031 return 1;
2032 }
2033
2034 int dm_tree_node_add_zero_target(struct dm_tree_node *node,
2035 uint64_t size)
2036 {
2037 if (!_add_segment(node, SEG_ZERO, size))
2038 return_0;
2039
2040 return 1;
2041 }
2042
2043 int dm_tree_node_add_linear_target(struct dm_tree_node *node,
2044 uint64_t size)
2045 {
2046 if (!_add_segment(node, SEG_LINEAR, size))
2047 return_0;
2048
2049 return 1;
2050 }
2051
2052 int dm_tree_node_add_striped_target(struct dm_tree_node *node,
2053 uint64_t size,
2054 uint32_t stripe_size)
2055 {
2056 struct load_segment *seg;
2057
2058 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2059 return_0;
2060
2061 seg->stripe_size = stripe_size;
2062
2063 return 1;
2064 }
2065
2066 int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2067 uint64_t size,
2068 const char *cipher,
2069 const char *chainmode,
2070 const char *iv,
2071 uint64_t iv_offset,
2072 const char *key)
2073 {
2074 struct load_segment *seg;
2075
2076 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2077 return_0;
2078
2079 seg->cipher = cipher;
2080 seg->chainmode = chainmode;
2081 seg->iv = iv;
2082 seg->iv_offset = iv_offset;
2083 seg->key = key;
2084
2085 return 1;
2086 }
2087
2088 int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
2089 uint32_t region_size,
2090 unsigned clustered,
2091 const char *log_uuid,
2092 unsigned area_count,
2093 uint32_t flags)
2094 {
2095 struct dm_tree_node *log_node = NULL;
2096 struct load_segment *seg;
2097
2098 if (!node->props.segment_count) {
2099 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2100 return 0;
2101 }
2102
2103 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2104
2105 if (log_uuid) {
2106 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2107 log_error("log uuid pool_strdup failed");
2108 return 0;
2109 }
2110 if (!(flags & DM_CORELOG)) {
2111 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2112 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2113 return 0;
2114 }
2115
2116 if (!_link_tree_nodes(node, log_node))
2117 return_0;
2118 }
2119 }
2120
2121 seg->log = log_node;
2122 seg->region_size = region_size;
2123 seg->clustered = clustered;
2124 seg->mirror_area_count = area_count;
2125 seg->flags = flags;
2126
2127 return 1;
2128 }
2129
2130 int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
2131 uint64_t size)
2132 {
2133 struct load_segment *seg;
2134
2135 if (!(seg = _add_segment(node, SEG_MIRRORED, size)))
2136 return_0;
2137
2138 return 1;
2139 }
2140
2141 int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2142 uint64_t size,
2143 const char *rlog_uuid,
2144 const char *rlog_type,
2145 unsigned rsite_index,
2146 dm_replicator_mode_t mode,
2147 uint32_t async_timeout,
2148 uint64_t fall_behind_data,
2149 uint32_t fall_behind_ios)
2150 {
2151 struct load_segment *rseg;
2152 struct replicator_site *rsite;
2153
2154 /* Local site0 - adds replicator segment and links rlog device */
2155 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2156 if (node->props.segment_count) {
2157 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2158 return 0;
2159 }
2160
2161 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2162 return_0;
2163
2164 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2165 log_error("Missing replicator log uuid %s.", rlog_uuid);
2166 return 0;
2167 }
2168
2169 if (!_link_tree_nodes(node, rseg->log))
2170 return_0;
2171
2172 if (strcmp(rlog_type, "ringbuffer") != 0) {
2173 log_error("Unsupported replicator log type %s.", rlog_type);
2174 return 0;
2175 }
2176
2177 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2178 return_0;
2179
2180 dm_list_init(&rseg->rsites);
2181 rseg->rdevice_count = 0;
2182 node->activation_priority = 1;
2183 }
2184
2185 /* Add site to segment */
2186 if (mode == DM_REPLICATOR_SYNC
2187 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2188 log_error("Async parameters passed for synchronnous replicator.");
2189 return 0;
2190 }
2191
2192 if (node->props.segment_count != 1) {
2193 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2194 return 0;
2195 }
2196
2197 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2198 if (rseg->type != SEG_REPLICATOR) {
2199 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2200 dm_segtypes[rseg->type].target);
2201 return 0;
2202 }
2203
2204 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2205 log_error("Failed to allocate remote site segment.");
2206 return 0;
2207 }
2208
2209 dm_list_add(&rseg->rsites, &rsite->list);
2210 rseg->rsite_count++;
2211
2212 rsite->mode = mode;
2213 rsite->async_timeout = async_timeout;
2214 rsite->fall_behind_data = fall_behind_data;
2215 rsite->fall_behind_ios = fall_behind_ios;
2216 rsite->rsite_index = rsite_index;
2217
2218 return 1;
2219 }
2220
2221 /* Appends device node to Replicator */
2222 int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2223 uint64_t size,
2224 const char *replicator_uuid,
2225 uint64_t rdevice_index,
2226 const char *rdev_uuid,
2227 unsigned rsite_index,
2228 const char *slog_uuid,
2229 uint32_t slog_flags,
2230 uint32_t slog_region_size)
2231 {
2232 struct seg_area *area;
2233 struct load_segment *rseg;
2234 struct load_segment *rep_seg;
2235
2236 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2237 /* Site index for local target */
2238 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2239 return_0;
2240
2241 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2242 log_error("Missing replicator uuid %s.", replicator_uuid);
2243 return 0;
2244 }
2245
2246 /* Local slink0 for replicator must be always initialized first */
2247 if (rseg->replicator->props.segment_count != 1) {
2248 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2249 return 0;
2250 }
2251
2252 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2253 if (rep_seg->type != SEG_REPLICATOR) {
2254 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2255 dm_segtypes[rep_seg->type].target);
2256 return 0;
2257 }
2258 rep_seg->rdevice_count++;
2259
2260 if (!_link_tree_nodes(node, rseg->replicator))
2261 return_0;
2262
2263 rseg->rdevice_index = rdevice_index;
2264 } else {
2265 /* Local slink0 for replicator must be always initialized first */
2266 if (node->props.segment_count != 1) {
2267 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2268 return 0;
2269 }
2270
2271 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2272 if (rseg->type != SEG_REPLICATOR_DEV) {
2273 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2274 dm_segtypes[rseg->type].target);
2275 return 0;
2276 }
2277 }
2278
2279 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2280 log_error("Unspecified sync log uuid.");
2281 return 0;
2282 }
2283
2284 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2285 return_0;
2286
2287 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2288
2289 if (!(slog_flags & DM_CORELOG)) {
2290 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2291 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2292 return 0;
2293 }
2294
2295 if (!_link_tree_nodes(node, area->slog))
2296 return_0;
2297 }
2298
2299 area->flags = slog_flags;
2300 area->region_size = slog_region_size;
2301 area->rsite_index = rsite_index;
2302
2303 return 1;
2304 }
2305
2306 static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
2307 {
2308 struct seg_area *area;
2309
2310 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
2311 log_error("Failed to allocate target segment area.");
2312 return 0;
2313 }
2314
2315 area->dev_node = dev_node;
2316 area->offset = offset;
2317
2318 dm_list_add(&seg->areas, &area->list);
2319 seg->area_count++;
2320
2321 return 1;
2322 }
2323
2324 int dm_tree_node_add_target_area(struct dm_tree_node *node,
2325 const char *dev_name,
2326 const char *uuid,
2327 uint64_t offset)
2328 {
2329 struct load_segment *seg;
2330 struct stat info;
2331 struct dm_tree_node *dev_node;
2332
2333 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
2334 log_error("dm_tree_node_add_target_area called without device");
2335 return 0;
2336 }
2337
2338 if (uuid) {
2339 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
2340 log_error("Couldn't find area uuid %s.", uuid);
2341 return 0;
2342 }
2343 if (!_link_tree_nodes(node, dev_node))
2344 return_0;
2345 } else {
2346 if (stat(dev_name, &info) < 0) {
2347 log_error("Device %s not found.", dev_name);
2348 return 0;
2349 }
2350
2351 if (!S_ISBLK(info.st_mode)) {
2352 log_error("Device %s is not a block device.", dev_name);
2353 return 0;
2354 }
2355
2356 /* FIXME Check correct macro use */
2357 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2358 MINOR(info.st_rdev), 0)))
2359 return_0;
2360 }
2361
2362 if (!node->props.segment_count) {
2363 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
2364 return 0;
2365 }
2366
2367 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2368
2369 if (!_add_area(node, seg, dev_node, offset))
2370 return_0;
2371
2372 return 1;
2373 }
2374
2375 void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2376 {
2377 node->dtree->cookie = cookie;
2378 }
2379
2380 uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2381 {
2382 return node->dtree->cookie;
2383 }
This page took 0.153161 seconds and 6 git commands to generate.