]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Update error path tracing for _resume_node
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
87f98002
AK
27/* FIXME Fix interface so this is used only by LVM */
28#define UUID_PREFIX "LVM-"
29
b262f3e1
ZK
30#define REPLICATOR_LOCAL_SITE 0
31
4251236e
ZK
32#define THIN_MIN_DATA_SIZE 128
33#define THIN_MAX_DATA_SIZE 2097152
34#define THIN_MAX_DEVICE_ID ((1 << 24) - 1)
35
36#define QUOTE(x) #x
37
165e4a11
AK
38/* Supported segment types */
39enum {
12ca060e
MB
40 SEG_CRYPT,
41 SEG_ERROR,
165e4a11
AK
42 SEG_LINEAR,
43 SEG_MIRRORED,
b262f3e1
ZK
44 SEG_REPLICATOR,
45 SEG_REPLICATOR_DEV,
165e4a11
AK
46 SEG_SNAPSHOT,
47 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 48 SEG_SNAPSHOT_MERGE,
165e4a11
AK
49 SEG_STRIPED,
50 SEG_ZERO,
4251236e
ZK
51 SEG_THIN_POOL,
52 SEG_THIN,
cac52ca4
JEB
53 SEG_RAID1,
54 SEG_RAID4,
55 SEG_RAID5_LA,
56 SEG_RAID5_RA,
57 SEG_RAID5_LS,
58 SEG_RAID5_RS,
59 SEG_RAID6_ZR,
60 SEG_RAID6_NR,
61 SEG_RAID6_NC,
62 SEG_LAST,
165e4a11 63};
b4f1578f 64
165e4a11
AK
65/* FIXME Add crypt and multipath support */
66
67struct {
68 unsigned type;
69 const char *target;
70} dm_segtypes[] = {
12ca060e 71 { SEG_CRYPT, "crypt" },
165e4a11
AK
72 { SEG_ERROR, "error" },
73 { SEG_LINEAR, "linear" },
74 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
75 { SEG_REPLICATOR, "replicator" },
76 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
77 { SEG_SNAPSHOT, "snapshot" },
78 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 79 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
80 { SEG_STRIPED, "striped" },
81 { SEG_ZERO, "zero"},
4251236e
ZK
82 { SEG_THIN_POOL, "thin-pool"},
83 { SEG_THIN, "thin"},
cac52ca4
JEB
84 { SEG_RAID1, "raid1"},
85 { SEG_RAID4, "raid4"},
86 { SEG_RAID5_LA, "raid5_la"},
87 { SEG_RAID5_RA, "raid5_ra"},
88 { SEG_RAID5_LS, "raid5_ls"},
89 { SEG_RAID5_RS, "raid5_rs"},
90 { SEG_RAID6_ZR, "raid6_zr"},
91 { SEG_RAID6_NR, "raid6_nr"},
92 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
93
94 /*
95 *WARNING: Since 'raid' target overloads this 1:1 mapping table
96 * for search do not add new enum elements past them!
97 */
cac52ca4
JEB
98 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
99 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
100 { SEG_LAST, NULL },
165e4a11
AK
101};
102
103/* Some segment types have a list of areas of other devices attached */
104struct seg_area {
2c44337b 105 struct dm_list list;
165e4a11 106
b4f1578f 107 struct dm_tree_node *dev_node;
165e4a11
AK
108
109 uint64_t offset;
b262f3e1
ZK
110
111 unsigned rsite_index; /* Replicator site index */
112 struct dm_tree_node *slog; /* Replicator sync log node */
113 uint64_t region_size; /* Replicator sync log size */
114 uint32_t flags; /* Replicator sync log flags */
115};
116
117/* Replicator-log has a list of sites */
118/* FIXME: maybe move to seg_area too? */
119struct replicator_site {
120 struct dm_list list;
121
122 unsigned rsite_index;
123 dm_replicator_mode_t mode;
124 uint32_t async_timeout;
125 uint32_t fall_behind_ios;
126 uint64_t fall_behind_data;
165e4a11
AK
127};
128
129/* Per-segment properties */
130struct load_segment {
2c44337b 131 struct dm_list list;
165e4a11
AK
132
133 unsigned type;
134
135 uint64_t size;
136
b262f3e1
ZK
137 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
138 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 139
cac52ca4 140 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
141
142 int persistent; /* Snapshot */
143 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
144 struct dm_tree_node *cow; /* Snapshot */
145 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 146 struct dm_tree_node *merge; /* Snapshot */
165e4a11 147
b262f3e1 148 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 149 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
150 unsigned clustered; /* Mirror */
151 unsigned mirror_area_count; /* Mirror */
dbcb64b8 152 uint32_t flags; /* Mirror log */
67b25ed4 153 char *uuid; /* Clustered mirror log */
12ca060e
MB
154
155 const char *cipher; /* Crypt */
156 const char *chainmode; /* Crypt */
157 const char *iv; /* Crypt */
158 uint64_t iv_offset; /* Crypt */
159 const char *key; /* Crypt */
b262f3e1
ZK
160
161 const char *rlog_type; /* Replicator */
162 struct dm_list rsites; /* Replicator */
163 unsigned rsite_count; /* Replicator */
164 unsigned rdevice_count; /* Replicator */
165 struct dm_tree_node *replicator;/* Replicator-dev */
166 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 167
40e5fd8b 168 uint64_t rebuilds; /* raid */
4251236e
ZK
169
170 struct dm_tree_node *metadata; /* Thin_pool */
171 struct dm_tree_node *pool; /* Thin_pool, Thin */
172 uint32_t data_block_size; /* Thin_pool */
173 uint64_t low_water_mark; /* Thin_pool */
174 unsigned skip_block_zeroeing; /* Thin_pool */
175 uint32_t device_id; /* Thin */
176
165e4a11
AK
177};
178
179/* Per-device properties */
180struct load_properties {
181 int read_only;
182 uint32_t major;
183 uint32_t minor;
184
52b84409
AK
185 uint32_t read_ahead;
186 uint32_t read_ahead_flags;
187
165e4a11 188 unsigned segment_count;
bb875bb9 189 unsigned size_changed;
2c44337b 190 struct dm_list segs;
165e4a11
AK
191
192 const char *new_name;
566515c0
PR
193
194 /* If immediate_dev_node is set to 1, try to create the dev node
195 * as soon as possible (e.g. in preload stage even during traversal
196 * and processing of dm tree). This will also flush all stacked dev
197 * node operations, synchronizing with udev.
198 */
df390f17
AK
199 unsigned immediate_dev_node;
200
201 /*
202 * If the device size changed from zero and this is set,
203 * don't resume the device immediately, even if the device
204 * has parents. This works provided the parents do not
205 * validate the device size and is required by pvmove to
206 * avoid starting the mirror resync operation too early.
207 */
208 unsigned delay_resume_if_new;
165e4a11
AK
209};
210
211/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 212struct dm_tree_link {
2c44337b 213 struct dm_list list;
b4f1578f 214 struct dm_tree_node *node;
165e4a11
AK
215};
216
b4f1578f
AK
217struct dm_tree_node {
218 struct dm_tree *dtree;
3d0480ed 219
40e5fd8b
AK
220 const char *name;
221 const char *uuid;
222 struct dm_info info;
3d0480ed 223
40e5fd8b
AK
224 struct dm_list uses; /* Nodes this node uses */
225 struct dm_list used_by; /* Nodes that use this node */
165e4a11 226
56c28292
AK
227 int activation_priority; /* 0 gets activated first */
228
f16aea9e
PR
229 uint16_t udev_flags; /* Udev control flags */
230
165e4a11
AK
231 void *context; /* External supplied context */
232
233 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
234
235 /*
236 * If presuspend of child node is needed
237 * Note: only direct child is allowed
238 */
239 struct dm_tree_node *presuspend_node;
3d0480ed
AK
240};
241
b4f1578f 242struct dm_tree {
a3f6b2ce
AK
243 struct dm_pool *mem;
244 struct dm_hash_table *devs;
165e4a11 245 struct dm_hash_table *uuids;
b4f1578f 246 struct dm_tree_node root;
c55b1410 247 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
248 int no_flush; /* 1 sets noflush (mirrors/multipath) */
249 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 250 uint32_t cookie;
3d0480ed
AK
251};
252
b4f1578f 253struct dm_tree *dm_tree_create(void)
3d0480ed 254{
b4f1578f 255 struct dm_tree *dtree;
3d0480ed 256
ac0252ca 257 if (!(dtree = dm_zalloc(sizeof(*dtree)))) {
b4f1578f 258 log_error("dm_tree_create malloc failed");
3d0480ed
AK
259 return NULL;
260 }
261
b4f1578f 262 dtree->root.dtree = dtree;
2c44337b
AK
263 dm_list_init(&dtree->root.uses);
264 dm_list_init(&dtree->root.used_by);
c55b1410 265 dtree->skip_lockfs = 0;
b9ffd32c 266 dtree->no_flush = 0;
3d0480ed 267
b4f1578f
AK
268 if (!(dtree->mem = dm_pool_create("dtree", 1024))) {
269 log_error("dtree pool creation failed");
270 dm_free(dtree);
3d0480ed
AK
271 return NULL;
272 }
273
b4f1578f
AK
274 if (!(dtree->devs = dm_hash_create(8))) {
275 log_error("dtree hash creation failed");
276 dm_pool_destroy(dtree->mem);
277 dm_free(dtree);
3d0480ed
AK
278 return NULL;
279 }
280
b4f1578f
AK
281 if (!(dtree->uuids = dm_hash_create(32))) {
282 log_error("dtree uuid hash creation failed");
283 dm_hash_destroy(dtree->devs);
284 dm_pool_destroy(dtree->mem);
285 dm_free(dtree);
165e4a11
AK
286 return NULL;
287 }
288
b4f1578f 289 return dtree;
3d0480ed
AK
290}
291
b4f1578f 292void dm_tree_free(struct dm_tree *dtree)
3d0480ed 293{
b4f1578f 294 if (!dtree)
3d0480ed
AK
295 return;
296
b4f1578f
AK
297 dm_hash_destroy(dtree->uuids);
298 dm_hash_destroy(dtree->devs);
299 dm_pool_destroy(dtree->mem);
300 dm_free(dtree);
3d0480ed
AK
301}
302
04bde319
ZK
303static int _nodes_are_linked(const struct dm_tree_node *parent,
304 const struct dm_tree_node *child)
3d0480ed 305{
b4f1578f 306 struct dm_tree_link *dlink;
3d0480ed 307
2c44337b 308 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
309 if (dlink->node == child)
310 return 1;
3d0480ed
AK
311
312 return 0;
313}
314
2c44337b 315static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 316{
b4f1578f 317 struct dm_tree_link *dlink;
3d0480ed 318
b4f1578f
AK
319 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
320 log_error("dtree link allocation failed");
3d0480ed
AK
321 return 0;
322 }
323
324 dlink->node = node;
2c44337b 325 dm_list_add(list, &dlink->list);
3d0480ed
AK
326
327 return 1;
328}
329
b4f1578f
AK
330static int _link_nodes(struct dm_tree_node *parent,
331 struct dm_tree_node *child)
3d0480ed
AK
332{
333 if (_nodes_are_linked(parent, child))
334 return 1;
335
336 if (!_link(&parent->uses, child))
337 return 0;
338
339 if (!_link(&child->used_by, parent))
340 return 0;
341
342 return 1;
343}
344
2c44337b 345static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 346{
b4f1578f 347 struct dm_tree_link *dlink;
3d0480ed 348
2c44337b 349 dm_list_iterate_items(dlink, list)
3d0480ed 350 if (dlink->node == node) {
2c44337b 351 dm_list_del(&dlink->list);
3d0480ed
AK
352 break;
353 }
3d0480ed
AK
354}
355
b4f1578f
AK
356static void _unlink_nodes(struct dm_tree_node *parent,
357 struct dm_tree_node *child)
3d0480ed
AK
358{
359 if (!_nodes_are_linked(parent, child))
360 return;
361
362 _unlink(&parent->uses, child);
363 _unlink(&child->used_by, parent);
364}
365
b4f1578f 366static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 367{
b4f1578f 368 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
369}
370
b4f1578f 371static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 372{
b1ebf028 373 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
374}
375
b4f1578f 376static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 377{
b4f1578f 378 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
379}
380
b4f1578f 381static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 382{
b1ebf028 383 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
384}
385
b4f1578f 386static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
387{
388 /* Don't link to root node if child already has a parent */
f77736ca 389 if (parent == &parent->dtree->root) {
b4f1578f 390 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
391 return 1;
392 } else
393 _remove_from_toplevel(child);
394
f77736ca 395 if (child == &child->dtree->root) {
b4f1578f 396 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
397 return 1;
398 } else
399 _remove_from_bottomlevel(parent);
400
401 return _link_nodes(parent, child);
402}
403
b4f1578f 404static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
405 const char *name,
406 const char *uuid,
165e4a11 407 struct dm_info *info,
f16aea9e
PR
408 void *context,
409 uint16_t udev_flags)
3d0480ed 410{
b4f1578f 411 struct dm_tree_node *node;
3d0480ed
AK
412 uint64_t dev;
413
b4f1578f
AK
414 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
415 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
416 return NULL;
417 }
418
b4f1578f 419 node->dtree = dtree;
3d0480ed
AK
420
421 node->name = name;
422 node->uuid = uuid;
423 node->info = *info;
165e4a11 424 node->context = context;
f16aea9e 425 node->udev_flags = udev_flags;
56c28292 426 node->activation_priority = 0;
3d0480ed 427
2c44337b
AK
428 dm_list_init(&node->uses);
429 dm_list_init(&node->used_by);
430 dm_list_init(&node->props.segs);
3d0480ed
AK
431
432 dev = MKDEV(info->major, info->minor);
433
b4f1578f 434 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 435 sizeof(dev), node)) {
b4f1578f
AK
436 log_error("dtree node hash insertion failed");
437 dm_pool_free(dtree->mem, node);
3d0480ed
AK
438 return NULL;
439 }
440
165e4a11 441 if (uuid && *uuid &&
b4f1578f
AK
442 !dm_hash_insert(dtree->uuids, uuid, node)) {
443 log_error("dtree uuid hash insertion failed");
444 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 445 sizeof(dev));
b4f1578f 446 dm_pool_free(dtree->mem, node);
165e4a11
AK
447 return NULL;
448 }
449
3d0480ed
AK
450 return node;
451}
452
b4f1578f 453static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
454 uint32_t major, uint32_t minor)
455{
456 uint64_t dev = MKDEV(major, minor);
457
b4f1578f 458 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
459 sizeof(dev));
460}
461
b4f1578f 462static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
463 const char *uuid)
464{
87f98002
AK
465 struct dm_tree_node *node;
466
467 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
468 return node;
469
470 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
471 return NULL;
472
473 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
165e4a11
AK
474}
475
a3f6b2ce 476static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
3d0480ed
AK
477 const char **name, const char **uuid,
478 struct dm_info *info, struct dm_deps **deps)
479{
480 memset(info, 0, sizeof(*info));
481
482 if (!dm_is_dm_major(major)) {
483 *name = "";
484 *uuid = "";
485 *deps = NULL;
486 info->major = major;
487 info->minor = minor;
488 info->exists = 0;
165e4a11
AK
489 info->live_table = 0;
490 info->inactive_table = 0;
491 info->read_only = 0;
3d0480ed
AK
492 return 1;
493 }
494
495 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
496 log_error("deps dm_task creation failed");
497 return 0;
498 }
499
b4f1578f
AK
500 if (!dm_task_set_major(*dmt, major)) {
501 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
502 major, minor);
3d0480ed 503 goto failed;
b4f1578f 504 }
3d0480ed 505
b4f1578f
AK
506 if (!dm_task_set_minor(*dmt, minor)) {
507 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
508 major, minor);
3d0480ed 509 goto failed;
b4f1578f 510 }
3d0480ed 511
b4f1578f
AK
512 if (!dm_task_run(*dmt)) {
513 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
514 major, minor);
3d0480ed 515 goto failed;
b4f1578f 516 }
3d0480ed 517
b4f1578f
AK
518 if (!dm_task_get_info(*dmt, info)) {
519 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
520 major, minor);
3d0480ed 521 goto failed;
b4f1578f 522 }
3d0480ed
AK
523
524 if (!info->exists) {
525 *name = "";
526 *uuid = "";
527 *deps = NULL;
528 } else {
529 if (info->major != major) {
b4f1578f 530 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
531 major, info->major);
532 goto failed;
533 }
534 if (info->minor != minor) {
b4f1578f 535 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
536 minor, info->minor);
537 goto failed;
538 }
a3f6b2ce 539 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
540 log_error("name pool_strdup failed");
541 goto failed;
542 }
a3f6b2ce 543 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
544 log_error("uuid pool_strdup failed");
545 goto failed;
546 }
547 *deps = dm_task_get_deps(*dmt);
548 }
549
550 return 1;
551
552failed:
553 dm_task_destroy(*dmt);
554 return 0;
555}
556
b4f1578f
AK
557static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
558 struct dm_tree_node *parent,
cda69e17
PR
559 uint32_t major, uint32_t minor,
560 uint16_t udev_flags)
3d0480ed
AK
561{
562 struct dm_task *dmt = NULL;
563 struct dm_info info;
564 struct dm_deps *deps = NULL;
565 const char *name = NULL;
566 const char *uuid = NULL;
b4f1578f 567 struct dm_tree_node *node = NULL;
3d0480ed 568 uint32_t i;
3d0480ed
AK
569 int new = 0;
570
571 /* Already in tree? */
b4f1578f
AK
572 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
573 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
574 return_NULL;
3d0480ed 575
f16aea9e 576 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
cda69e17 577 NULL, udev_flags)))
b4f1578f 578 goto_out;
3d0480ed
AK
579 new = 1;
580 }
581
165e4a11
AK
582 if (!_link_tree_nodes(parent, node)) {
583 node = NULL;
b4f1578f 584 goto_out;
165e4a11 585 }
3d0480ed
AK
586
587 /* If node was already in tree, no need to recurse. */
588 if (!new)
165e4a11 589 goto out;
3d0480ed
AK
590
591 /* Can't recurse if not a mapped device or there are no dependencies */
592 if (!node->info.exists || !deps->count) {
b4f1578f
AK
593 if (!_add_to_bottomlevel(node)) {
594 stack;
165e4a11 595 node = NULL;
b4f1578f 596 }
165e4a11 597 goto out;
3d0480ed
AK
598 }
599
600 /* Add dependencies to tree */
601 for (i = 0; i < deps->count; i++)
b4f1578f 602 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
cda69e17 603 MINOR(deps->device[i]), udev_flags)) {
165e4a11 604 node = NULL;
b4f1578f 605 goto_out;
165e4a11 606 }
3d0480ed 607
3d0480ed
AK
608out:
609 if (dmt)
610 dm_task_destroy(dmt);
611
165e4a11
AK
612 return node;
613}
614
b4f1578f 615static int _node_clear_table(struct dm_tree_node *dnode)
165e4a11
AK
616{
617 struct dm_task *dmt;
618 struct dm_info *info;
619 const char *name;
620 int r;
621
622 if (!(info = &dnode->info)) {
b4f1578f 623 log_error("_node_clear_table failed: missing info");
165e4a11
AK
624 return 0;
625 }
626
b4f1578f
AK
627 if (!(name = dm_tree_node_get_name(dnode))) {
628 log_error("_node_clear_table failed: missing name");
165e4a11
AK
629 return 0;
630 }
631
632 /* Is there a table? */
633 if (!info->exists || !info->inactive_table)
634 return 1;
635
10d0d9c7
AK
636// FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
637
165e4a11
AK
638 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
639 name, info->major, info->minor);
640
641 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11
AK
642 log_error("Table clear dm_task creation failed for %s", name);
643 return 0;
644 }
645
646 if (!dm_task_set_major(dmt, info->major) ||
647 !dm_task_set_minor(dmt, info->minor)) {
648 log_error("Failed to set device number for %s table clear", name);
649 dm_task_destroy(dmt);
650 return 0;
651 }
652
653 r = dm_task_run(dmt);
654
655 if (!dm_task_get_info(dmt, info)) {
b4f1578f 656 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
657 r = 0;
658 }
659
660 dm_task_destroy(dmt);
661
3d0480ed
AK
662 return r;
663}
664
b4f1578f 665struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
165e4a11
AK
666 const char *name,
667 const char *uuid,
668 uint32_t major, uint32_t minor,
669 int read_only,
670 int clear_inactive,
671 void *context)
672{
b4f1578f 673 struct dm_tree_node *dnode;
165e4a11
AK
674 struct dm_info info;
675 const char *name2;
676 const char *uuid2;
677
678 /* Do we need to add node to tree? */
b4f1578f
AK
679 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
680 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
681 log_error("name pool_strdup failed");
682 return NULL;
683 }
b4f1578f 684 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
165e4a11
AK
685 log_error("uuid pool_strdup failed");
686 return NULL;
687 }
688
689 info.major = 0;
690 info.minor = 0;
691 info.exists = 0;
692 info.live_table = 0;
693 info.inactive_table = 0;
694 info.read_only = 0;
695
f16aea9e
PR
696 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
697 context, 0)))
b4f1578f 698 return_NULL;
165e4a11
AK
699
700 /* Attach to root node until a table is supplied */
b4f1578f
AK
701 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
702 return_NULL;
165e4a11
AK
703
704 dnode->props.major = major;
705 dnode->props.minor = minor;
706 dnode->props.new_name = NULL;
bb875bb9 707 dnode->props.size_changed = 0;
165e4a11
AK
708 } else if (strcmp(name, dnode->name)) {
709 /* Do we need to rename node? */
b4f1578f 710 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
711 log_error("name pool_strdup failed");
712 return 0;
713 }
714 }
715
716 dnode->props.read_only = read_only ? 1 : 0;
52b84409
AK
717 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
718 dnode->props.read_ahead_flags = 0;
165e4a11 719
b4f1578f
AK
720 if (clear_inactive && !_node_clear_table(dnode))
721 return_NULL;
165e4a11
AK
722
723 dnode->context = context;
f16aea9e 724 dnode->udev_flags = 0;
165e4a11
AK
725
726 return dnode;
727}
728
f16aea9e
PR
729struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
730 const char *name,
731 const char *uuid,
732 uint32_t major,
733 uint32_t minor,
734 int read_only,
735 int clear_inactive,
736 void *context,
737 uint16_t udev_flags)
738{
739 struct dm_tree_node *node;
740
741 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
742 clear_inactive, context)))
743 node->udev_flags = udev_flags;
744
745 return node;
746}
747
748
52b84409
AK
749void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
750 uint32_t read_ahead,
751 uint32_t read_ahead_flags)
08e64ce5 752{
52b84409
AK
753 dnode->props.read_ahead = read_ahead;
754 dnode->props.read_ahead_flags = read_ahead_flags;
755}
756
76d1aec8
ZK
757void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
758 struct dm_tree_node *presuspend_node)
759{
760 node->presuspend_node = presuspend_node;
761}
762
b4f1578f 763int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
3d0480ed 764{
cda69e17
PR
765 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
766}
767
768int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
769 uint32_t minor, uint16_t udev_flags)
770{
771 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
3d0480ed
AK
772}
773
04bde319 774const char *dm_tree_node_get_name(const struct dm_tree_node *node)
3d0480ed
AK
775{
776 return node->info.exists ? node->name : "";
777}
778
04bde319 779const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
3d0480ed
AK
780{
781 return node->info.exists ? node->uuid : "";
782}
783
04bde319 784const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
3d0480ed
AK
785{
786 return &node->info;
787}
788
04bde319 789void *dm_tree_node_get_context(const struct dm_tree_node *node)
165e4a11
AK
790{
791 return node->context;
792}
793
04bde319 794int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
eb91c4ee
MB
795{
796 return dnode->props.size_changed;
797}
798
04bde319 799int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
3d0480ed
AK
800{
801 if (inverted) {
b4f1578f 802 if (_nodes_are_linked(&node->dtree->root, node))
3d0480ed 803 return 0;
2c44337b 804 return dm_list_size(&node->used_by);
3d0480ed
AK
805 }
806
b4f1578f 807 if (_nodes_are_linked(node, &node->dtree->root))
3d0480ed
AK
808 return 0;
809
2c44337b 810 return dm_list_size(&node->uses);
3d0480ed
AK
811}
812
2b69db1f
AK
813/*
814 * Returns 1 if no prefix supplied
815 */
816static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
817{
818 if (!uuid_prefix)
819 return 1;
820
821 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
822 return 1;
823
824 /* Handle transition: active device uuids might be missing the prefix */
825 if (uuid_prefix_len <= 4)
826 return 0;
827
87f98002 828 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
872dea04
AK
829 return 0;
830
87f98002 831 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
2b69db1f
AK
832 return 0;
833
87f98002 834 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
2b69db1f
AK
835 return 1;
836
837 return 0;
838}
839
690a5da2
AK
840/*
841 * Returns 1 if no children.
842 */
b4f1578f 843static int _children_suspended(struct dm_tree_node *node,
690a5da2
AK
844 uint32_t inverted,
845 const char *uuid_prefix,
846 size_t uuid_prefix_len)
847{
2c44337b 848 struct dm_list *list;
b4f1578f 849 struct dm_tree_link *dlink;
690a5da2
AK
850 const struct dm_info *dinfo;
851 const char *uuid;
852
853 if (inverted) {
b4f1578f 854 if (_nodes_are_linked(&node->dtree->root, node))
690a5da2
AK
855 return 1;
856 list = &node->used_by;
857 } else {
b4f1578f 858 if (_nodes_are_linked(node, &node->dtree->root))
690a5da2
AK
859 return 1;
860 list = &node->uses;
861 }
862
2c44337b 863 dm_list_iterate_items(dlink, list) {
b4f1578f 864 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
690a5da2
AK
865 stack;
866 continue;
867 }
868
869 /* Ignore if it doesn't belong to this VG */
2b69db1f 870 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2
AK
871 continue;
872
76d1aec8
ZK
873 /* Ignore if parent node wants to presuspend this node */
874 if (dlink->node->presuspend_node == node)
875 continue;
876
b4f1578f
AK
877 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
878 stack; /* FIXME Is this normal? */
690a5da2
AK
879 return 0;
880 }
881
882 if (!dinfo->suspended)
883 return 0;
884 }
885
886 return 1;
887}
888
3d0480ed
AK
889/*
890 * Set major and minor to zero for root of tree.
891 */
b4f1578f 892struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
3d0480ed
AK
893 uint32_t major,
894 uint32_t minor)
895{
896 if (!major && !minor)
b4f1578f 897 return &dtree->root;
3d0480ed 898
b4f1578f 899 return _find_dm_tree_node(dtree, major, minor);
3d0480ed
AK
900}
901
165e4a11
AK
902/*
903 * Set uuid to NULL for root of tree.
904 */
b4f1578f 905struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
906 const char *uuid)
907{
908 if (!uuid || !*uuid)
b4f1578f 909 return &dtree->root;
165e4a11 910
b4f1578f 911 return _find_dm_tree_node_by_uuid(dtree, uuid);
165e4a11
AK
912}
913
3d0480ed
AK
914/*
915 * First time set *handle to NULL.
916 * Set inverted to invert the tree.
917 */
b4f1578f 918struct dm_tree_node *dm_tree_next_child(void **handle,
04bde319
ZK
919 const struct dm_tree_node *parent,
920 uint32_t inverted)
3d0480ed 921{
2c44337b 922 struct dm_list **dlink = (struct dm_list **) handle;
04bde319 923 const struct dm_list *use_list;
3d0480ed
AK
924
925 if (inverted)
926 use_list = &parent->used_by;
927 else
928 use_list = &parent->uses;
929
930 if (!*dlink)
2c44337b 931 *dlink = dm_list_first(use_list);
3d0480ed 932 else
2c44337b 933 *dlink = dm_list_next(use_list, *dlink);
3d0480ed 934
2c44337b 935 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
3d0480ed
AK
936}
937
3e8c6b73 938/*
a6d97ede 939 * Deactivate a device with its dependencies if the uuid prefix matches.
3e8c6b73 940 */
db208f51
AK
941static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
942 struct dm_info *info)
3e8c6b73
AK
943{
944 struct dm_task *dmt;
945 int r;
946
947 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
948 log_error("_info_by_dev: dm_task creation failed");
949 return 0;
950 }
951
952 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
953 log_error("_info_by_dev: Failed to set device number");
954 dm_task_destroy(dmt);
955 return 0;
956 }
957
db208f51
AK
958 if (!with_open_count && !dm_task_no_open_count(dmt))
959 log_error("Failed to disable open_count");
960
3e8c6b73
AK
961 if ((r = dm_task_run(dmt)))
962 r = dm_task_get_info(dmt, info);
963
964 dm_task_destroy(dmt);
965
966 return r;
967}
968
125712be
PR
969static int _check_device_not_in_use(struct dm_info *info)
970{
971 if (!info->exists)
972 return 1;
973
974 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
975 if (!*dm_sysfs_dir()) {
976 if (info->open_count) {
977 log_error("Device %" PRIu32 ":%" PRIu32 " in use",
978 info->major, info->minor);
979 return 0;
980 }
981
982 return 1;
983 }
125712be
PR
984
985 if (dm_device_has_holders(info->major, info->minor)) {
986 log_error("Device %" PRIu32 ":%" PRIu32 " is used "
987 "by another device.", info->major, info->minor);
988 return 0;
989 }
990
991 if (dm_device_has_mounted_fs(info->major, info->minor)) {
992 log_error("Device %" PRIu32 ":%" PRIu32 " contains "
993 "a filesystem in use.", info->major, info->minor);
994 return 0;
995 }
996
997 return 1;
998}
999
f3ef15ef
ZK
1000/* Check if all parent nodes of given node have open_count == 0 */
1001static int _node_has_closed_parents(struct dm_tree_node *node,
1002 const char *uuid_prefix,
1003 size_t uuid_prefix_len)
1004{
1005 struct dm_tree_link *dlink;
1006 const struct dm_info *dinfo;
1007 struct dm_info info;
1008 const char *uuid;
1009
1010 /* Iterate through parents of this node */
1011 dm_list_iterate_items(dlink, &node->used_by) {
1012 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1013 stack;
1014 continue;
1015 }
1016
1017 /* Ignore if it doesn't belong to this VG */
1018 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1019 continue;
1020
1021 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1022 stack; /* FIXME Is this normal? */
1023 return 0;
1024 }
1025
1026 /* Refresh open_count */
1027 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1028 !info.exists)
1029 continue;
1030
eb418883
ZK
1031 if (info.open_count) {
1032 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1033 dinfo->major, dinfo->minor, info.open_count);
f3ef15ef 1034 return 0;
eb418883 1035 }
f3ef15ef
ZK
1036 }
1037
1038 return 1;
1039}
1040
f16aea9e 1041static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
787200ef 1042 uint32_t *cookie, uint16_t udev_flags, int retry)
3e8c6b73
AK
1043{
1044 struct dm_task *dmt;
bd90c6b2 1045 int r = 0;
3e8c6b73
AK
1046
1047 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1048
1049 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1050 log_error("Deactivation dm_task creation failed for %s", name);
1051 return 0;
1052 }
1053
1054 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1055 log_error("Failed to set device number for %s deactivation", name);
bd90c6b2 1056 goto out;
3e8c6b73
AK
1057 }
1058
1059 if (!dm_task_no_open_count(dmt))
1060 log_error("Failed to disable open_count");
1061
f16aea9e 1062 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1063 goto out;
1064
787200ef
PR
1065
1066 if (retry)
1067 dm_task_retry_remove(dmt);
1068
3e8c6b73
AK
1069 r = dm_task_run(dmt);
1070
0437bccc
AK
1071 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1072 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
9032898e 1073 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
165e4a11 1074
db208f51
AK
1075 /* FIXME Remove node from tree or mark invalid? */
1076
bd90c6b2 1077out:
db208f51
AK
1078 dm_task_destroy(dmt);
1079
1080 return r;
1081}
1082
bd90c6b2 1083static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1084 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1085{
1086 struct dm_task *dmt;
1087 int r = 0;
1088
1089 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1090
1091 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1092 log_error("Rename dm_task creation failed for %s", old_name);
1093 return 0;
1094 }
1095
1096 if (!dm_task_set_name(dmt, old_name)) {
1097 log_error("Failed to set name for %s rename.", old_name);
1098 goto out;
1099 }
1100
b4f1578f 1101 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1102 goto_out;
165e4a11
AK
1103
1104 if (!dm_task_no_open_count(dmt))
1105 log_error("Failed to disable open_count");
1106
f16aea9e 1107 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1108 goto out;
1109
165e4a11
AK
1110 r = dm_task_run(dmt);
1111
1112out:
1113 dm_task_destroy(dmt);
1114
1115 return r;
1116}
1117
165e4a11
AK
1118/* FIXME Merge with _suspend_node? */
1119static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1120 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1121 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1122 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1123{
1124 struct dm_task *dmt;
bd90c6b2 1125 int r = 0;
165e4a11
AK
1126
1127 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1128
1129 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1130 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1131 return 0;
1132 }
1133
0b7d16bc
AK
1134 /* FIXME Kernel should fill in name on return instead */
1135 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1136 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1137 goto out;
0b7d16bc
AK
1138 }
1139
165e4a11
AK
1140 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1141 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1142 goto out;
165e4a11
AK
1143 }
1144
1145 if (!dm_task_no_open_count(dmt))
1146 log_error("Failed to disable open_count");
1147
52b84409
AK
1148 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1149 log_error("Failed to set read ahead");
1150
f16aea9e 1151 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1152 goto_out;
bd90c6b2 1153
9a8f192a
ZK
1154 if (!(r = dm_task_run(dmt)))
1155 goto_out;
1156
1157 if (already_suspended)
1158 dec_suspended();
1159
1160 if (!(r = dm_task_get_info(dmt, newinfo)))
1161 stack;
165e4a11 1162
bd90c6b2 1163out:
165e4a11
AK
1164 dm_task_destroy(dmt);
1165
1166 return r;
1167}
1168
db208f51 1169static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1170 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1171{
1172 struct dm_task *dmt;
1173 int r;
1174
b9ffd32c
AK
1175 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1176 name, major, minor,
1177 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1178 no_flush ? "" : " with device flush");
db208f51
AK
1179
1180 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1181 log_error("Suspend dm_task creation failed for %s", name);
1182 return 0;
1183 }
1184
1185 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1186 log_error("Failed to set device number for %s suspension.", name);
1187 dm_task_destroy(dmt);
1188 return 0;
1189 }
1190
1191 if (!dm_task_no_open_count(dmt))
1192 log_error("Failed to disable open_count");
1193
c55b1410
AK
1194 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1195 log_error("Failed to set skip_lockfs flag.");
1196
b9ffd32c
AK
1197 if (no_flush && !dm_task_no_flush(dmt))
1198 log_error("Failed to set no_flush flag.");
1199
1840aa09
AK
1200 if ((r = dm_task_run(dmt))) {
1201 inc_suspended();
db208f51 1202 r = dm_task_get_info(dmt, newinfo);
1840aa09 1203 }
db208f51 1204
3e8c6b73
AK
1205 dm_task_destroy(dmt);
1206
1207 return r;
1208}
1209
18e0f934
AK
1210/*
1211 * FIXME Don't attempt to deactivate known internal dependencies.
1212 */
1213static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1214 const char *uuid_prefix,
1215 size_t uuid_prefix_len,
1216 unsigned level)
3e8c6b73 1217{
b7eb2ad0 1218 int r = 1;
3e8c6b73 1219 void *handle = NULL;
b4f1578f 1220 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1221 struct dm_info info;
1222 const struct dm_info *dinfo;
1223 const char *name;
1224 const char *uuid;
1225
b4f1578f
AK
1226 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1227 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1228 stack;
1229 continue;
1230 }
1231
b4f1578f 1232 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1233 stack;
1234 continue;
1235 }
1236
b4f1578f 1237 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1238 stack;
1239 continue;
1240 }
1241
1242 /* Ignore if it doesn't belong to this VG */
2b69db1f 1243 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1244 continue;
3e8c6b73
AK
1245
1246 /* Refresh open_count */
db208f51 1247 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
f55021f4 1248 !info.exists)
3e8c6b73
AK
1249 continue;
1250
125712be
PR
1251 if (!_check_device_not_in_use(&info))
1252 continue;
1253
f3ef15ef 1254 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1255 if ((child->presuspend_node &&
f3ef15ef
ZK
1256 !_node_has_closed_parents(child->presuspend_node,
1257 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1258 /* Only report error from (likely non-internal) dependency at top level */
1259 if (!level) {
1260 log_error("Unable to deactivate open %s (%" PRIu32
1261 ":%" PRIu32 ")", name, info.major,
1262 info.minor);
1263 r = 0;
1264 }
f55021f4
AK
1265 continue;
1266 }
1267
76d1aec8
ZK
1268 /* Suspend child node first if requested */
1269 if (child->presuspend_node &&
1270 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1271 continue;
1272
f16aea9e 1273 if (!_deactivate_node(name, info.major, info.minor,
787200ef
PR
1274 &child->dtree->cookie, child->udev_flags,
1275 child->dtree->retry_remove)) {
3e8c6b73
AK
1276 log_error("Unable to deactivate %s (%" PRIu32
1277 ":%" PRIu32 ")", name, info.major,
1278 info.minor);
b7eb2ad0 1279 r = 0;
3e8c6b73 1280 continue;
f4249251
AK
1281 } else if (info.suspended)
1282 dec_suspended();
3e8c6b73 1283
18e0f934
AK
1284 if (dm_tree_node_num_children(child, 0)) {
1285 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1286 return_0;
18e0f934 1287 }
3e8c6b73
AK
1288 }
1289
b7eb2ad0 1290 return r;
3e8c6b73 1291}
db208f51 1292
18e0f934
AK
1293int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1294 const char *uuid_prefix,
1295 size_t uuid_prefix_len)
1296{
1297 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1298}
1299
c55b1410
AK
1300void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1301{
1302 dnode->dtree->skip_lockfs = 1;
1303}
1304
b9ffd32c
AK
1305void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1306{
1307 dnode->dtree->no_flush = 1;
1308}
1309
787200ef
PR
1310void dm_tree_retry_remove(struct dm_tree_node *dnode)
1311{
1312 dnode->dtree->retry_remove = 1;
1313}
1314
b4f1578f 1315int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1316 const char *uuid_prefix,
1317 size_t uuid_prefix_len)
db208f51 1318{
68085c93 1319 int r = 1;
db208f51 1320 void *handle = NULL;
b4f1578f 1321 struct dm_tree_node *child = dnode;
db208f51
AK
1322 struct dm_info info, newinfo;
1323 const struct dm_info *dinfo;
1324 const char *name;
1325 const char *uuid;
1326
690a5da2 1327 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1328 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1329 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1330 stack;
1331 continue;
1332 }
1333
b4f1578f 1334 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1335 stack;
1336 continue;
1337 }
1338
b4f1578f 1339 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1340 stack;
1341 continue;
1342 }
1343
1344 /* Ignore if it doesn't belong to this VG */
2b69db1f 1345 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1346 continue;
1347
690a5da2
AK
1348 /* Ensure immediate parents are already suspended */
1349 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1350 continue;
1351
db208f51 1352 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
b700541f 1353 !info.exists || info.suspended)
db208f51
AK
1354 continue;
1355
c55b1410 1356 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1357 child->dtree->skip_lockfs,
1358 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1359 log_error("Unable to suspend %s (%" PRIu32
1360 ":%" PRIu32 ")", name, info.major,
1361 info.minor);
68085c93 1362 r = 0;
db208f51
AK
1363 continue;
1364 }
1365
1366 /* Update cached info */
1367 child->info = newinfo;
690a5da2
AK
1368 }
1369
1370 /* Then suspend any child nodes */
1371 handle = NULL;
1372
b4f1578f
AK
1373 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1374 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1375 stack;
1376 continue;
1377 }
1378
1379 /* Ignore if it doesn't belong to this VG */
87f98002 1380 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1381 continue;
db208f51 1382
b4f1578f 1383 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1384 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1385 return_0;
db208f51
AK
1386 }
1387
68085c93 1388 return r;
db208f51
AK
1389}
1390
b4f1578f 1391int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1392 const char *uuid_prefix,
1393 size_t uuid_prefix_len)
1394{
2ca6b865 1395 int r = 1;
db208f51 1396 void *handle = NULL;
b4f1578f 1397 struct dm_tree_node *child = dnode;
165e4a11
AK
1398 struct dm_info newinfo;
1399 const char *name;
db208f51 1400 const char *uuid;
56c28292 1401 int priority;
db208f51 1402
165e4a11 1403 /* Activate children first */
b4f1578f
AK
1404 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1405 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1406 stack;
1407 continue;
db208f51
AK
1408 }
1409
908db078
AK
1410 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1411 continue;
db208f51 1412
b4f1578f 1413 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1414 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1415 return_0;
56c28292 1416 }
165e4a11 1417
56c28292 1418 handle = NULL;
165e4a11 1419
aa6f4e51 1420 for (priority = 0; priority < 3; priority++) {
56c28292
AK
1421 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1422 if (!(uuid = dm_tree_node_get_uuid(child))) {
1423 stack;
1424 continue;
165e4a11 1425 }
165e4a11 1426
56c28292
AK
1427 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1428 continue;
165e4a11 1429
56c28292
AK
1430 if (priority != child->activation_priority)
1431 continue;
165e4a11 1432
56c28292
AK
1433 if (!(name = dm_tree_node_get_name(child))) {
1434 stack;
1435 continue;
1436 }
1437
1438 /* Rename? */
1439 if (child->props.new_name) {
bd90c6b2 1440 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1441 child->info.minor, &child->dtree->cookie,
1442 child->udev_flags)) {
56c28292
AK
1443 log_error("Failed to rename %s (%" PRIu32
1444 ":%" PRIu32 ") to %s", name, child->info.major,
1445 child->info.minor, child->props.new_name);
1446 return 0;
1447 }
1448 child->name = child->props.new_name;
1449 child->props.new_name = NULL;
1450 }
1451
1452 if (!child->info.inactive_table && !child->info.suspended)
1453 continue;
1454
bafa2f39 1455 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1456 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1457 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1458 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1459 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1460 child->info.minor);
2ca6b865 1461 r = 0;
56c28292
AK
1462 continue;
1463 }
1464
1465 /* Update cached info */
1466 child->info = newinfo;
1467 }
db208f51
AK
1468 }
1469
165e4a11
AK
1470 handle = NULL;
1471
2ca6b865 1472 return r;
165e4a11
AK
1473}
1474
b4f1578f 1475static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1476{
1477 int r = 0;
1478 struct dm_task *dmt;
1479
1480 log_verbose("Creating %s", dnode->name);
1481
1482 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1483 log_error("Create dm_task creation failed for %s", dnode->name);
1484 return 0;
1485 }
1486
1487 if (!dm_task_set_name(dmt, dnode->name)) {
1488 log_error("Failed to set device name for %s", dnode->name);
1489 goto out;
1490 }
1491
1492 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1493 log_error("Failed to set uuid for %s", dnode->name);
1494 goto out;
1495 }
1496
1497 if (dnode->props.major &&
1498 (!dm_task_set_major(dmt, dnode->props.major) ||
1499 !dm_task_set_minor(dmt, dnode->props.minor))) {
1500 log_error("Failed to set device number for %s creation.", dnode->name);
1501 goto out;
1502 }
1503
1504 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1505 log_error("Failed to set read only flag for %s", dnode->name);
1506 goto out;
1507 }
1508
1509 if (!dm_task_no_open_count(dmt))
1510 log_error("Failed to disable open_count");
1511
1512 if ((r = dm_task_run(dmt)))
1513 r = dm_task_get_info(dmt, &dnode->info);
1514
1515out:
1516 dm_task_destroy(dmt);
1517
1518 return r;
1519}
1520
1521
b4f1578f 1522static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1523{
1524 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1525 log_error("Failed to format %s device number for %s as dm "
1526 "target (%u,%u)",
1527 node->name, node->uuid, node->info.major, node->info.minor);
1528 return 0;
165e4a11
AK
1529 }
1530
1531 return 1;
1532}
1533
ffa9b6a5
ZK
1534/* simplify string emiting code */
1535#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1536do {\
1537 int w;\
1538 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1539 stack; /* Out of space */\
1540 return -1;\
1541 }\
1542 p += w;\
1543} while (0)
ffa9b6a5 1544
3c74075f
JEB
1545/*
1546 * _emit_areas_line
1547 *
1548 * Returns: 1 on success, 0 on failure
1549 */
08f1ddea 1550static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1551 struct load_segment *seg, char *params,
1552 size_t paramsize, int *pos)
165e4a11
AK
1553{
1554 struct seg_area *area;
7d7d93ac 1555 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1556 unsigned first_time = 1;
db3c1ac1 1557 const char *logtype, *synctype;
b262f3e1 1558 unsigned log_parm_count;
165e4a11 1559
2c44337b 1560 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1561 switch (seg->type) {
1562 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1563 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1564 return_0;
1565
b262f3e1
ZK
1566 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1567 if (first_time)
1568 EMIT_PARAMS(*pos, " nolog 0");
1569 else {
1570 /* Remote devices */
1571 log_parm_count = (area->flags &
1572 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1573
1574 if (!area->slog) {
1575 devbuf[0] = 0; /* Only core log parameters */
1576 logtype = "core";
1577 } else {
1578 devbuf[0] = ' '; /* Extra space before device name */
1579 if (!_build_dev_string(devbuf + 1,
1580 sizeof(devbuf) - 1,
1581 area->slog))
1582 return_0;
1583 logtype = "disk";
1584 log_parm_count++; /* Extra sync log device name parameter */
1585 }
1586
1587 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1588 log_parm_count, devbuf, area->region_size);
1589
db3c1ac1
AK
1590 synctype = (area->flags & DM_NOSYNC) ?
1591 " nosync" : (area->flags & DM_FORCESYNC) ?
1592 " sync" : NULL;
b262f3e1 1593
db3c1ac1
AK
1594 if (synctype)
1595 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1596 }
1597 break;
cac52ca4
JEB
1598 case SEG_RAID1:
1599 case SEG_RAID4:
1600 case SEG_RAID5_LA:
1601 case SEG_RAID5_RA:
1602 case SEG_RAID5_LS:
1603 case SEG_RAID5_RS:
1604 case SEG_RAID6_ZR:
1605 case SEG_RAID6_NR:
1606 case SEG_RAID6_NC:
6d04311e
JEB
1607 if (!area->dev_node) {
1608 EMIT_PARAMS(*pos, " -");
1609 break;
1610 }
1611 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1612 return_0;
1613
cac52ca4
JEB
1614 EMIT_PARAMS(*pos, " %s", devbuf);
1615 break;
b262f3e1 1616 default:
6d04311e
JEB
1617 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1618 return_0;
1619
b262f3e1
ZK
1620 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1621 devbuf, area->offset);
1622 }
609faae9
AK
1623
1624 first_time = 0;
165e4a11
AK
1625 }
1626
1627 return 1;
1628}
1629
b262f3e1
ZK
1630static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1631 size_t paramsize, int *pos)
1632{
1633 const struct load_segment *rlog_seg;
1634 struct replicator_site *rsite;
1635 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1636 unsigned parm_count;
1637
1638 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1639 return_0;
1640
1641 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1642 struct load_segment);
1643
1644 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1645 seg->rlog_type, rlogbuf, rlog_seg->size);
1646
1647 dm_list_iterate_items(rsite, &seg->rsites) {
1648 parm_count = (rsite->fall_behind_data
1649 || rsite->fall_behind_ios
1650 || rsite->async_timeout) ? 4 : 2;
1651
1652 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1653 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1654
1655 if (rsite->fall_behind_data)
1656 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1657 else if (rsite->fall_behind_ios)
1658 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1659 else if (rsite->async_timeout)
1660 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1661 }
1662
1663 return 1;
1664}
1665
3c74075f 1666/*
3c74075f
JEB
1667 * Returns: 1 on success, 0 on failure
1668 */
beecb1e1
ZK
1669static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1670 char *params, size_t paramsize)
165e4a11 1671{
8f26e18c
JEB
1672 int block_on_error = 0;
1673 int handle_errors = 0;
1674 int dm_log_userspace = 0;
1675 struct utsname uts;
dbcb64b8 1676 unsigned log_parm_count;
b39fdcf4 1677 int pos = 0, parts;
7d7d93ac 1678 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1679 const char *logtype;
b39fdcf4 1680 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1681
b39fdcf4
MB
1682 if (uname(&uts) == -1) {
1683 log_error("Cannot read kernel release version.");
1684 return 0;
1685 }
1686
1687 /* Kernels with a major number of 2 always had 3 parts. */
1688 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1689 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1690 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1691 return 0;
1692 }
67b25ed4 1693
8f26e18c
JEB
1694 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1695 /*
1696 * Originally, block_on_error was an argument to the log
1697 * portion of the mirror CTR table. It was renamed to
1698 * "handle_errors" and now resides in the 'features'
1699 * section of the mirror CTR table (i.e. at the end).
1700 *
1701 * We can identify whether to use "block_on_error" or
1702 * "handle_errors" by the dm-mirror module's version
1703 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1704 */
ba61f848 1705 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
1706 handle_errors = 1;
1707 else
1708 block_on_error = 1;
1709 }
1710
1711 if (seg->clustered) {
1712 /* Cluster mirrors require a UUID */
1713 if (!seg->uuid)
1714 return_0;
1715
1716 /*
1717 * Cluster mirrors used to have their own log
1718 * types. Now they are accessed through the
1719 * userspace log type.
1720 *
1721 * The dm-log-userspace module was added to the
1722 * 2.6.31 kernel.
1723 */
ba61f848 1724 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
1725 dm_log_userspace = 1;
1726 }
1727
1728 /* Region size */
1729 log_parm_count = 1;
1730
1731 /* [no]sync, block_on_error etc. */
1732 log_parm_count += hweight32(seg->flags);
311d6d81 1733
8f26e18c
JEB
1734 /* "handle_errors" is a feature arg now */
1735 if (handle_errors)
1736 log_parm_count--;
1737
1738 /* DM_CORELOG does not count in the param list */
1739 if (seg->flags & DM_CORELOG)
1740 log_parm_count--;
1741
1742 if (seg->clustered) {
1743 log_parm_count++; /* For UUID */
1744
1745 if (!dm_log_userspace)
ffa9b6a5 1746 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
1747 else
1748 /* For clustered-* type field inserted later */
1749 log_parm_count++;
8f26e18c 1750 }
dbcb64b8 1751
8f26e18c
JEB
1752 if (!seg->log)
1753 logtype = "core";
1754 else {
1755 logtype = "disk";
1756 log_parm_count++;
1757 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1758 return_0;
1759 }
dbcb64b8 1760
8f26e18c
JEB
1761 if (dm_log_userspace)
1762 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1763 log_parm_count, seg->uuid, logtype);
1764 else
ffa9b6a5 1765 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 1766
8f26e18c
JEB
1767 if (seg->log)
1768 EMIT_PARAMS(pos, " %s", logbuf);
1769
1770 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 1771
8f26e18c
JEB
1772 if (seg->clustered && !dm_log_userspace)
1773 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 1774
8f26e18c
JEB
1775 if ((seg->flags & DM_NOSYNC))
1776 EMIT_PARAMS(pos, " nosync");
1777 else if ((seg->flags & DM_FORCESYNC))
1778 EMIT_PARAMS(pos, " sync");
dbcb64b8 1779
8f26e18c
JEB
1780 if (block_on_error)
1781 EMIT_PARAMS(pos, " block_on_error");
1782
1783 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1784
5f3325fc 1785 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 1786 return_0;
dbcb64b8 1787
8f26e18c
JEB
1788 if (handle_errors)
1789 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 1790
3c74075f 1791 return 1;
8f26e18c
JEB
1792}
1793
cac52ca4
JEB
1794static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1795 uint32_t minor, struct load_segment *seg,
1796 uint64_t *seg_start, char *params,
1797 size_t paramsize)
1798{
f439e65b 1799 uint32_t i, *tmp;
cac52ca4
JEB
1800 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1801 int pos = 0;
1802
1803 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1804 param_count++;
1805
1806 if (seg->region_size)
1807 param_count += 2;
1808
f439e65b
JEB
1809 tmp = (uint32_t *)(&seg->rebuilds); /* rebuilds is 64-bit */
1810 param_count += 2 * hweight32(tmp[0]);
1811 param_count += 2 * hweight32(tmp[1]);
1812
cac52ca4
JEB
1813 if ((seg->type == SEG_RAID1) && seg->stripe_size)
1814 log_error("WARNING: Ignoring RAID1 stripe size");
1815
1816 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
1817 param_count, seg->stripe_size);
1818
1819 if (seg->flags & DM_NOSYNC)
1820 EMIT_PARAMS(pos, " nosync");
1821 else if (seg->flags & DM_FORCESYNC)
1822 EMIT_PARAMS(pos, " sync");
1823
1824 if (seg->region_size)
1825 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
1826
f439e65b
JEB
1827 for (i = 0; i < (seg->area_count / 2); i++)
1828 if (seg->rebuilds & (1 << i))
1829 EMIT_PARAMS(pos, " rebuild %u", i);
1830
cac52ca4
JEB
1831 /* Print number of metadata/data device pairs */
1832 EMIT_PARAMS(pos, " %u", seg->area_count/2);
1833
1834 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1835 return_0;
1836
1837 return 1;
1838}
1839
8f26e18c
JEB
1840static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1841 uint32_t minor, struct load_segment *seg,
1842 uint64_t *seg_start, char *params,
1843 size_t paramsize)
1844{
1845 int pos = 0;
1846 int r;
cac52ca4 1847 int target_type_is_raid = 0;
8f26e18c 1848 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 1849 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1850
8f26e18c
JEB
1851 switch(seg->type) {
1852 case SEG_ERROR:
1853 case SEG_ZERO:
1854 case SEG_LINEAR:
1855 break;
1856 case SEG_MIRRORED:
1857 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 1858 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
1859 if (!r)
1860 return_0;
165e4a11 1861 break;
b262f3e1
ZK
1862 case SEG_REPLICATOR:
1863 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1864 &pos)) <= 0) {
1865 stack;
1866 return r;
1867 }
1868 break;
1869 case SEG_REPLICATOR_DEV:
1870 if (!seg->replicator || !_build_dev_string(originbuf,
1871 sizeof(originbuf),
1872 seg->replicator))
1873 return_0;
1874
1875 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1876 break;
165e4a11 1877 case SEG_SNAPSHOT:
aa6f4e51 1878 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
1879 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1880 return_0;
1881 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1882 return_0;
ffa9b6a5
ZK
1883 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1884 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
1885 break;
1886 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
1887 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1888 return_0;
ffa9b6a5 1889 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
1890 break;
1891 case SEG_STRIPED:
609faae9 1892 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 1893 break;
12ca060e 1894 case SEG_CRYPT:
609faae9 1895 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
1896 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1897 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1898 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1899 seg->iv_offset : *seg_start);
1900 break;
cac52ca4
JEB
1901 case SEG_RAID1:
1902 case SEG_RAID4:
1903 case SEG_RAID5_LA:
1904 case SEG_RAID5_RA:
1905 case SEG_RAID5_LS:
1906 case SEG_RAID5_RS:
1907 case SEG_RAID6_ZR:
1908 case SEG_RAID6_NR:
1909 case SEG_RAID6_NC:
1910 target_type_is_raid = 1;
1911 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
1912 params, paramsize);
1913 if (!r)
1914 return_0;
1915
1916 break;
4251236e
ZK
1917 case SEG_THIN_POOL:
1918 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
1919 return_0;
1920 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1921 return_0;
1922 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
1923 seg->data_block_size, seg->low_water_mark,
1924 seg->skip_block_zeroeing ? "1 skip_block_zeroing" : "");
1925 break;
1926 case SEG_THIN:
1927 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1928 return_0;
1929 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
1930 break;
165e4a11
AK
1931 }
1932
1933 switch(seg->type) {
1934 case SEG_ERROR:
b262f3e1 1935 case SEG_REPLICATOR:
165e4a11
AK
1936 case SEG_SNAPSHOT:
1937 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 1938 case SEG_SNAPSHOT_MERGE:
165e4a11 1939 case SEG_ZERO:
4251236e
ZK
1940 case SEG_THIN_POOL:
1941 case SEG_THIN:
165e4a11 1942 break;
12ca060e 1943 case SEG_CRYPT:
165e4a11 1944 case SEG_LINEAR:
b262f3e1 1945 case SEG_REPLICATOR_DEV:
165e4a11
AK
1946 case SEG_STRIPED:
1947 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
1948 stack;
1949 return r;
1950 }
b6793963
AK
1951 if (!params[0]) {
1952 log_error("No parameters supplied for %s target "
1953 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 1954 major, minor);
b6793963
AK
1955 return 0;
1956 }
165e4a11
AK
1957 break;
1958 }
1959
4b2cae46
AK
1960 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
1961 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
1962 *seg_start, seg->size, target_type_is_raid ? "raid" :
1963 dm_segtypes[seg->type].target, params);
165e4a11 1964
cac52ca4
JEB
1965 if (!dm_task_add_target(dmt, *seg_start, seg->size,
1966 target_type_is_raid ? "raid" :
1967 dm_segtypes[seg->type].target, params))
b4f1578f 1968 return_0;
165e4a11
AK
1969
1970 *seg_start += seg->size;
1971
1972 return 1;
1973}
1974
ffa9b6a5
ZK
1975#undef EMIT_PARAMS
1976
4b2cae46
AK
1977static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
1978 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
1979{
1980 char *params;
1981 size_t paramsize = 4096;
1982 int ret;
1983
1984 do {
1985 if (!(params = dm_malloc(paramsize))) {
1986 log_error("Insufficient space for target parameters.");
1987 return 0;
1988 }
1989
12ea7cb1 1990 params[0] = '\0';
4b2cae46
AK
1991 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
1992 params, paramsize);
165e4a11
AK
1993 dm_free(params);
1994
1995 if (!ret)
1996 stack;
1997
1998 if (ret >= 0)
1999 return ret;
2000
2001 log_debug("Insufficient space in params[%" PRIsize_t
2002 "] for target parameters.", paramsize);
2003
2004 paramsize *= 2;
2005 } while (paramsize < MAX_TARGET_PARAMSIZE);
2006
2007 log_error("Target parameter size too big. Aborting.");
2008 return 0;
2009}
2010
b4f1578f 2011static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2012{
2013 int r = 0;
2014 struct dm_task *dmt;
2015 struct load_segment *seg;
df390f17 2016 uint64_t seg_start = 0, existing_table_size;
165e4a11 2017
4b2cae46
AK
2018 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2019 dnode->info.major, dnode->info.minor);
165e4a11
AK
2020
2021 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2022 log_error("Reload dm_task creation failed for %s", dnode->name);
2023 return 0;
2024 }
2025
2026 if (!dm_task_set_major(dmt, dnode->info.major) ||
2027 !dm_task_set_minor(dmt, dnode->info.minor)) {
2028 log_error("Failed to set device number for %s reload.", dnode->name);
2029 goto out;
2030 }
2031
2032 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2033 log_error("Failed to set read only flag for %s", dnode->name);
2034 goto out;
2035 }
2036
2037 if (!dm_task_no_open_count(dmt))
2038 log_error("Failed to disable open_count");
2039
2c44337b 2040 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2041 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2042 seg, &seg_start))
b4f1578f 2043 goto_out;
165e4a11 2044
ec289b64
AK
2045 if (!dm_task_suppress_identical_reload(dmt))
2046 log_error("Failed to suppress reload of identical tables.");
2047
2048 if ((r = dm_task_run(dmt))) {
165e4a11 2049 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2050 if (r && !dnode->info.inactive_table)
2051 log_verbose("Suppressed %s identical table reload.",
2052 dnode->name);
bb875bb9 2053
df390f17 2054 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2055 if ((dnode->props.size_changed =
df390f17 2056 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2057 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2058 PRIu64 " for %s", existing_table_size,
bb875bb9 2059 seg_start, dnode->name);
df390f17
AK
2060 /*
2061 * Kernel usually skips size validation on zero-length devices
2062 * now so no need to preload them.
2063 */
2064 /* FIXME In which kernel version did this begin? */
2065 if (!existing_table_size && dnode->props.delay_resume_if_new)
2066 dnode->props.size_changed = 0;
2067 }
ec289b64 2068 }
165e4a11
AK
2069
2070 dnode->props.segment_count = 0;
2071
2072out:
2073 dm_task_destroy(dmt);
2074
2075 return r;
165e4a11
AK
2076}
2077
b4f1578f 2078int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2079 const char *uuid_prefix,
2080 size_t uuid_prefix_len)
165e4a11 2081{
2ca6b865 2082 int r = 1;
165e4a11 2083 void *handle = NULL;
b4f1578f 2084 struct dm_tree_node *child;
165e4a11 2085 struct dm_info newinfo;
566515c0 2086 int update_devs_flag = 0;
165e4a11
AK
2087
2088 /* Preload children first */
b4f1578f 2089 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2090 /* Skip existing non-device-mapper devices */
2091 if (!child->info.exists && child->info.major)
2092 continue;
2093
2094 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2095 if (child->info.exists &&
2096 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2097 continue;
2098
b4f1578f 2099 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2100 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2101 return_0;
165e4a11 2102
165e4a11
AK
2103 /* FIXME Cope if name exists with no uuid? */
2104 if (!child->info.exists) {
2105 if (!_create_node(child)) {
2106 stack;
2107 return 0;
2108 }
2109 }
2110
2111 if (!child->info.inactive_table && child->props.segment_count) {
2112 if (!_load_node(child)) {
2113 stack;
2114 return 0;
2115 }
2116 }
2117
eb91c4ee
MB
2118 /* Propagate device size change change */
2119 if (child->props.size_changed)
2120 dnode->props.size_changed = 1;
2121
bb875bb9 2122 /* Resume device immediately if it has parents and its size changed */
3776c494 2123 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2124 continue;
2125
7707ea90
AK
2126 if (!child->info.inactive_table && !child->info.suspended)
2127 continue;
2128
fc795d87 2129 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2130 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2131 &newinfo, &child->dtree->cookie, child->udev_flags,
2132 child->info.suspended)) {
165e4a11 2133 log_error("Unable to resume %s (%" PRIu32
fc795d87 2134 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2135 child->info.minor);
2ca6b865 2136 r = 0;
165e4a11
AK
2137 continue;
2138 }
2139
2140 /* Update cached info */
2141 child->info = newinfo;
566515c0
PR
2142
2143 /*
2144 * Prepare for immediate synchronization with udev and flush all stacked
2145 * dev node operations if requested by immediate_dev_node property. But
2146 * finish processing current level in the tree first.
2147 */
2148 if (child->props.immediate_dev_node)
2149 update_devs_flag = 1;
2150
165e4a11
AK
2151 }
2152
2153 handle = NULL;
2154
566515c0
PR
2155 if (update_devs_flag) {
2156 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2157 stack;
2158 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2159 }
2160
2ca6b865 2161 return r;
165e4a11
AK
2162}
2163
165e4a11
AK
2164/*
2165 * Returns 1 if unsure.
2166 */
b4f1578f 2167int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2168 const char *uuid_prefix,
2169 size_t uuid_prefix_len)
2170{
2171 void *handle = NULL;
b4f1578f 2172 struct dm_tree_node *child = dnode;
165e4a11
AK
2173 const char *uuid;
2174
b4f1578f
AK
2175 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2176 if (!(uuid = dm_tree_node_get_uuid(child))) {
2177 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2178 return 1;
2179 }
2180
87f98002 2181 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2182 return 1;
2183
b4f1578f
AK
2184 if (dm_tree_node_num_children(child, 0))
2185 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2186 }
2187
2188 return 0;
2189}
2190
2191/*
2192 * Target functions
2193 */
b4f1578f 2194static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2195{
2196 struct load_segment *seg;
2197
b4f1578f
AK
2198 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2199 log_error("dtree node segment allocation failed");
165e4a11
AK
2200 return NULL;
2201 }
2202
2203 seg->type = type;
2204 seg->size = size;
2205 seg->area_count = 0;
2c44337b 2206 dm_list_init(&seg->areas);
165e4a11
AK
2207 seg->stripe_size = 0;
2208 seg->persistent = 0;
2209 seg->chunk_size = 0;
2210 seg->cow = NULL;
2211 seg->origin = NULL;
aa6f4e51 2212 seg->merge = NULL;
165e4a11 2213
2c44337b 2214 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2215 dnode->props.segment_count++;
2216
2217 return seg;
2218}
2219
b4f1578f 2220int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2221 uint64_t size,
2222 const char *origin_uuid)
165e4a11
AK
2223{
2224 struct load_segment *seg;
b4f1578f 2225 struct dm_tree_node *origin_node;
165e4a11 2226
b4f1578f
AK
2227 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2228 return_0;
165e4a11 2229
b4f1578f 2230 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2231 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2232 return 0;
2233 }
2234
2235 seg->origin = origin_node;
b4f1578f
AK
2236 if (!_link_tree_nodes(dnode, origin_node))
2237 return_0;
165e4a11 2238
56c28292
AK
2239 /* Resume snapshot origins after new snapshots */
2240 dnode->activation_priority = 1;
2241
165e4a11
AK
2242 return 1;
2243}
2244
aa6f4e51
MS
2245static int _add_snapshot_target(struct dm_tree_node *node,
2246 uint64_t size,
2247 const char *origin_uuid,
2248 const char *cow_uuid,
2249 const char *merge_uuid,
2250 int persistent,
2251 uint32_t chunk_size)
165e4a11
AK
2252{
2253 struct load_segment *seg;
aa6f4e51
MS
2254 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2255 unsigned seg_type;
2256
2257 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2258
aa6f4e51 2259 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2260 return_0;
165e4a11 2261
b4f1578f 2262 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2263 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2264 return 0;
2265 }
2266
2267 seg->origin = origin_node;
b4f1578f
AK
2268 if (!_link_tree_nodes(node, origin_node))
2269 return_0;
165e4a11 2270
b4f1578f 2271 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2272 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2273 return 0;
2274 }
2275
2276 seg->cow = cow_node;
b4f1578f
AK
2277 if (!_link_tree_nodes(node, cow_node))
2278 return_0;
165e4a11
AK
2279
2280 seg->persistent = persistent ? 1 : 0;
2281 seg->chunk_size = chunk_size;
2282
aa6f4e51
MS
2283 if (merge_uuid) {
2284 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2285 /* not a pure error, merging snapshot may have been deactivated */
2286 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2287 } else {
2288 seg->merge = merge_node;
2289 /* must not link merging snapshot, would undermine activation_priority below */
2290 }
2291
2292 /* Resume snapshot-merge (acting origin) after other snapshots */
2293 node->activation_priority = 1;
2294 if (seg->merge) {
2295 /* Resume merging snapshot after snapshot-merge */
2296 seg->merge->activation_priority = 2;
2297 }
2298 }
2299
165e4a11
AK
2300 return 1;
2301}
2302
aa6f4e51
MS
2303
2304int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2305 uint64_t size,
2306 const char *origin_uuid,
2307 const char *cow_uuid,
2308 int persistent,
2309 uint32_t chunk_size)
2310{
2311 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2312 NULL, persistent, chunk_size);
2313}
2314
2315int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2316 uint64_t size,
2317 const char *origin_uuid,
2318 const char *cow_uuid,
2319 const char *merge_uuid,
2320 uint32_t chunk_size)
2321{
2322 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2323 merge_uuid, 1, chunk_size);
2324}
2325
b4f1578f 2326int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2327 uint64_t size)
165e4a11 2328{
b4f1578f
AK
2329 if (!_add_segment(node, SEG_ERROR, size))
2330 return_0;
165e4a11
AK
2331
2332 return 1;
2333}
2334
b4f1578f 2335int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2336 uint64_t size)
165e4a11 2337{
b4f1578f
AK
2338 if (!_add_segment(node, SEG_ZERO, size))
2339 return_0;
165e4a11
AK
2340
2341 return 1;
2342}
2343
b4f1578f 2344int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2345 uint64_t size)
165e4a11 2346{
b4f1578f
AK
2347 if (!_add_segment(node, SEG_LINEAR, size))
2348 return_0;
165e4a11
AK
2349
2350 return 1;
2351}
2352
b4f1578f 2353int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2354 uint64_t size,
2355 uint32_t stripe_size)
165e4a11
AK
2356{
2357 struct load_segment *seg;
2358
b4f1578f
AK
2359 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2360 return_0;
165e4a11
AK
2361
2362 seg->stripe_size = stripe_size;
2363
2364 return 1;
2365}
2366
12ca060e
MB
2367int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2368 uint64_t size,
2369 const char *cipher,
2370 const char *chainmode,
2371 const char *iv,
2372 uint64_t iv_offset,
2373 const char *key)
2374{
2375 struct load_segment *seg;
2376
2377 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2378 return_0;
2379
2380 seg->cipher = cipher;
2381 seg->chainmode = chainmode;
2382 seg->iv = iv;
2383 seg->iv_offset = iv_offset;
2384 seg->key = key;
2385
2386 return 1;
2387}
2388
b4f1578f 2389int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2390 uint32_t region_size,
08e64ce5 2391 unsigned clustered,
165e4a11 2392 const char *log_uuid,
ce7ed2c0
AK
2393 unsigned area_count,
2394 uint32_t flags)
165e4a11 2395{
908db078 2396 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2397 struct load_segment *seg;
2398
2399 if (!node->props.segment_count) {
b8175c33 2400 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2401 return 0;
2402 }
2403
2c44337b 2404 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2405
24b026e3 2406 if (log_uuid) {
67b25ed4
AK
2407 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2408 log_error("log uuid pool_strdup failed");
2409 return 0;
2410 }
df390f17
AK
2411 if ((flags & DM_CORELOG))
2412 /* For pvmove: immediate resume (for size validation) isn't needed. */
2413 node->props.delay_resume_if_new = 1;
2414 else {
9723090c
AK
2415 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2416 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2417 return 0;
2418 }
2419
566515c0
PR
2420 if (clustered)
2421 log_node->props.immediate_dev_node = 1;
2422
0a99713e
AK
2423 /* The kernel validates the size of disk logs. */
2424 /* FIXME Propagate to any devices below */
2425 log_node->props.delay_resume_if_new = 0;
2426
9723090c
AK
2427 if (!_link_tree_nodes(node, log_node))
2428 return_0;
2429 }
165e4a11
AK
2430 }
2431
2432 seg->log = log_node;
165e4a11
AK
2433 seg->region_size = region_size;
2434 seg->clustered = clustered;
2435 seg->mirror_area_count = area_count;
dbcb64b8 2436 seg->flags = flags;
165e4a11
AK
2437
2438 return 1;
2439}
2440
b4f1578f 2441int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2442 uint64_t size)
165e4a11 2443{
cbecd3cd 2444 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2445 return_0;
165e4a11
AK
2446
2447 return 1;
2448}
2449
cac52ca4
JEB
2450int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2451 uint64_t size,
2452 const char *raid_type,
2453 uint32_t region_size,
2454 uint32_t stripe_size,
f439e65b 2455 uint64_t rebuilds,
cac52ca4
JEB
2456 uint64_t reserved2)
2457{
2458 int i;
2459 struct load_segment *seg = NULL;
2460
2461 for (i = 0; dm_segtypes[i].target && !seg; i++)
2462 if (!strcmp(raid_type, dm_segtypes[i].target))
2463 if (!(seg = _add_segment(node,
2464 dm_segtypes[i].type, size)))
2465 return_0;
2466
b2fa9b43
JEB
2467 if (!seg)
2468 return_0;
2469
cac52ca4
JEB
2470 seg->region_size = region_size;
2471 seg->stripe_size = stripe_size;
2472 seg->area_count = 0;
f439e65b 2473 seg->rebuilds = rebuilds;
cac52ca4
JEB
2474
2475 return 1;
2476}
2477
b262f3e1
ZK
2478int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2479 uint64_t size,
2480 const char *rlog_uuid,
2481 const char *rlog_type,
2482 unsigned rsite_index,
2483 dm_replicator_mode_t mode,
2484 uint32_t async_timeout,
2485 uint64_t fall_behind_data,
2486 uint32_t fall_behind_ios)
2487{
2488 struct load_segment *rseg;
2489 struct replicator_site *rsite;
2490
2491 /* Local site0 - adds replicator segment and links rlog device */
2492 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2493 if (node->props.segment_count) {
2494 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2495 return 0;
2496 }
2497
2498 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2499 return_0;
2500
2501 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2502 log_error("Missing replicator log uuid %s.", rlog_uuid);
2503 return 0;
2504 }
2505
2506 if (!_link_tree_nodes(node, rseg->log))
2507 return_0;
2508
2509 if (strcmp(rlog_type, "ringbuffer") != 0) {
2510 log_error("Unsupported replicator log type %s.", rlog_type);
2511 return 0;
2512 }
2513
2514 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2515 return_0;
2516
2517 dm_list_init(&rseg->rsites);
2518 rseg->rdevice_count = 0;
2519 node->activation_priority = 1;
2520 }
2521
2522 /* Add site to segment */
2523 if (mode == DM_REPLICATOR_SYNC
2524 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2525 log_error("Async parameters passed for synchronnous replicator.");
2526 return 0;
2527 }
2528
2529 if (node->props.segment_count != 1) {
2530 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2531 return 0;
2532 }
2533
2534 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2535 if (rseg->type != SEG_REPLICATOR) {
2536 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2537 dm_segtypes[rseg->type].target);
2538 return 0;
2539 }
2540
2541 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2542 log_error("Failed to allocate remote site segment.");
2543 return 0;
2544 }
2545
2546 dm_list_add(&rseg->rsites, &rsite->list);
2547 rseg->rsite_count++;
2548
2549 rsite->mode = mode;
2550 rsite->async_timeout = async_timeout;
2551 rsite->fall_behind_data = fall_behind_data;
2552 rsite->fall_behind_ios = fall_behind_ios;
2553 rsite->rsite_index = rsite_index;
2554
2555 return 1;
2556}
2557
2558/* Appends device node to Replicator */
2559int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2560 uint64_t size,
2561 const char *replicator_uuid,
2562 uint64_t rdevice_index,
2563 const char *rdev_uuid,
2564 unsigned rsite_index,
2565 const char *slog_uuid,
2566 uint32_t slog_flags,
2567 uint32_t slog_region_size)
2568{
2569 struct seg_area *area;
2570 struct load_segment *rseg;
2571 struct load_segment *rep_seg;
2572
2573 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2574 /* Site index for local target */
2575 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2576 return_0;
2577
2578 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2579 log_error("Missing replicator uuid %s.", replicator_uuid);
2580 return 0;
2581 }
2582
2583 /* Local slink0 for replicator must be always initialized first */
2584 if (rseg->replicator->props.segment_count != 1) {
2585 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2586 return 0;
2587 }
2588
2589 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2590 if (rep_seg->type != SEG_REPLICATOR) {
2591 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2592 dm_segtypes[rep_seg->type].target);
2593 return 0;
2594 }
2595 rep_seg->rdevice_count++;
2596
2597 if (!_link_tree_nodes(node, rseg->replicator))
2598 return_0;
2599
2600 rseg->rdevice_index = rdevice_index;
2601 } else {
2602 /* Local slink0 for replicator must be always initialized first */
2603 if (node->props.segment_count != 1) {
2604 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2605 return 0;
2606 }
2607
2608 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2609 if (rseg->type != SEG_REPLICATOR_DEV) {
2610 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2611 dm_segtypes[rseg->type].target);
2612 return 0;
2613 }
2614 }
2615
2616 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2617 log_error("Unspecified sync log uuid.");
2618 return 0;
2619 }
2620
2621 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2622 return_0;
2623
2624 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2625
2626 if (!(slog_flags & DM_CORELOG)) {
2627 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2628 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2629 return 0;
2630 }
2631
2632 if (!_link_tree_nodes(node, area->slog))
2633 return_0;
2634 }
2635
2636 area->flags = slog_flags;
2637 area->region_size = slog_region_size;
2638 area->rsite_index = rsite_index;
2639
2640 return 1;
2641}
2642
4251236e
ZK
2643int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2644 uint64_t size,
2645 uint64_t transation_id,
2646 const char *pool_uuid,
2647 const char *metadata_uuid,
2648 uint32_t data_block_size,
2649 uint64_t low_water_mark,
2650 unsigned skip_block_zeroeing)
2651{
2652 struct load_segment *seg;
2653
2654 if (data_block_size < THIN_MIN_DATA_SIZE) {
2655 log_error("Data block size %d is lower then "
2656 QUOTE(THIN_MIN_DATA_SIZE) " sectors.",
2657 data_block_size);
2658 return 0;
2659 }
2660
2661 if (data_block_size > THIN_MAX_DATA_SIZE) {
2662 log_error("Data block size %d is higher then "
2663 QUOTE(THIN_MAX_DATA_SIZE) " sectors.",
2664 data_block_size);
2665 return 0;
2666 }
2667
2668 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2669 return_0;
2670
2671 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2672 log_error("Missing metadata uuid %s.", metadata_uuid);
2673 return 0;
2674 }
2675
2676 if (!_link_tree_nodes(node, seg->metadata))
2677 return_0;
2678
2679 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2680 log_error("Missing pool uuid %s.", pool_uuid);
2681 return 0;
2682 }
2683
2684 if (!_link_tree_nodes(node, seg->pool))
2685 return_0;
2686
2687 seg->data_block_size = data_block_size;
2688 seg->low_water_mark = low_water_mark;
2689 seg->skip_block_zeroeing = skip_block_zeroeing;
2690
2691 return 1;
2692}
2693
2694int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2695 uint64_t size,
4251236e
ZK
2696 const char *thin_pool_uuid,
2697 uint32_t device_id)
2698{
2699 struct load_segment *seg;
2700
2701 if (device_id > THIN_MAX_DEVICE_ID) {
2702 log_error("Device id %d is higher then " QUOTE(THIN_MAX_DEVICE_ID) ".",
2703 device_id);
2704 return 0;
2705 }
2706
2707 if (!(seg = _add_segment(node, SEG_THIN, size)))
2708 return_0;
2709
2710 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2711 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2712 return 0;
2713 }
2714
2715 if (!_link_tree_nodes(node, seg->pool))
2716 return_0;
2717
1419bf1c
ZK
2718 seg->device_id = device_id;
2719
4251236e
ZK
2720 return 1;
2721}
2722
b4f1578f 2723static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
2724{
2725 struct seg_area *area;
2726
b4f1578f 2727 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
2728 log_error("Failed to allocate target segment area.");
2729 return 0;
2730 }
2731
2732 area->dev_node = dev_node;
2733 area->offset = offset;
2734
2c44337b 2735 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
2736 seg->area_count++;
2737
2738 return 1;
2739}
2740
b4f1578f 2741int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
2742 const char *dev_name,
2743 const char *uuid,
2744 uint64_t offset)
165e4a11
AK
2745{
2746 struct load_segment *seg;
2747 struct stat info;
b4f1578f 2748 struct dm_tree_node *dev_node;
165e4a11
AK
2749
2750 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 2751 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
2752 return 0;
2753 }
2754
2755 if (uuid) {
b4f1578f 2756 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
2757 log_error("Couldn't find area uuid %s.", uuid);
2758 return 0;
2759 }
b4f1578f
AK
2760 if (!_link_tree_nodes(node, dev_node))
2761 return_0;
165e4a11 2762 } else {
6d04311e 2763 if (stat(dev_name, &info) < 0) {
165e4a11
AK
2764 log_error("Device %s not found.", dev_name);
2765 return 0;
2766 }
2767
40e5fd8b 2768 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
2769 log_error("Device %s is not a block device.", dev_name);
2770 return 0;
2771 }
2772
2773 /* FIXME Check correct macro use */
cda69e17
PR
2774 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2775 MINOR(info.st_rdev), 0)))
b4f1578f 2776 return_0;
165e4a11
AK
2777 }
2778
2779 if (!node->props.segment_count) {
b8175c33 2780 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2781 return 0;
2782 }
2783
2c44337b 2784 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2785
b4f1578f
AK
2786 if (!_add_area(node, seg, dev_node, offset))
2787 return_0;
165e4a11
AK
2788
2789 return 1;
db208f51 2790}
bd90c6b2 2791
6d04311e
JEB
2792int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
2793{
2794 struct load_segment *seg;
2795
2796 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2797
415c0690
AK
2798 switch (seg->type) {
2799 case SEG_RAID1:
2800 case SEG_RAID4:
2801 case SEG_RAID5_LA:
2802 case SEG_RAID5_RA:
2803 case SEG_RAID5_LS:
2804 case SEG_RAID5_RS:
2805 case SEG_RAID6_ZR:
2806 case SEG_RAID6_NR:
2807 case SEG_RAID6_NC:
2808 break;
2809 default:
2810 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
2811 return 0;
2812 }
2813
6d04311e
JEB
2814 if (!_add_area(node, seg, NULL, offset))
2815 return_0;
2816
2817 return 1;
2818}
2819
bd90c6b2
AK
2820void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2821{
2822 node->dtree->cookie = cookie;
2823}
2824
2825uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2826{
2827 return node->dtree->cookie;
2828}
This page took 0.402827 seconds and 5 git commands to generate.