]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Cleanup backtraces
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
87f98002
AK
27/* FIXME Fix interface so this is used only by LVM */
28#define UUID_PREFIX "LVM-"
29
b262f3e1
ZK
30#define REPLICATOR_LOCAL_SITE 0
31
165e4a11
AK
32/* Supported segment types */
33enum {
12ca060e
MB
34 SEG_CRYPT,
35 SEG_ERROR,
165e4a11
AK
36 SEG_LINEAR,
37 SEG_MIRRORED,
b262f3e1
ZK
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
165e4a11
AK
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 42 SEG_SNAPSHOT_MERGE,
165e4a11
AK
43 SEG_STRIPED,
44 SEG_ZERO,
4251236e
ZK
45 SEG_THIN_POOL,
46 SEG_THIN,
cac52ca4
JEB
47 SEG_RAID1,
48 SEG_RAID4,
49 SEG_RAID5_LA,
50 SEG_RAID5_RA,
51 SEG_RAID5_LS,
52 SEG_RAID5_RS,
53 SEG_RAID6_ZR,
54 SEG_RAID6_NR,
55 SEG_RAID6_NC,
56 SEG_LAST,
165e4a11 57};
b4f1578f 58
165e4a11
AK
59/* FIXME Add crypt and multipath support */
60
61struct {
62 unsigned type;
63 const char *target;
64} dm_segtypes[] = {
12ca060e 65 { SEG_CRYPT, "crypt" },
165e4a11
AK
66 { SEG_ERROR, "error" },
67 { SEG_LINEAR, "linear" },
68 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
69 { SEG_REPLICATOR, "replicator" },
70 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
71 { SEG_SNAPSHOT, "snapshot" },
72 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 73 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
74 { SEG_STRIPED, "striped" },
75 { SEG_ZERO, "zero"},
4251236e
ZK
76 { SEG_THIN_POOL, "thin-pool"},
77 { SEG_THIN, "thin"},
cac52ca4
JEB
78 { SEG_RAID1, "raid1"},
79 { SEG_RAID4, "raid4"},
80 { SEG_RAID5_LA, "raid5_la"},
81 { SEG_RAID5_RA, "raid5_ra"},
82 { SEG_RAID5_LS, "raid5_ls"},
83 { SEG_RAID5_RS, "raid5_rs"},
84 { SEG_RAID6_ZR, "raid6_zr"},
85 { SEG_RAID6_NR, "raid6_nr"},
86 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
87
88 /*
89 *WARNING: Since 'raid' target overloads this 1:1 mapping table
90 * for search do not add new enum elements past them!
91 */
cac52ca4
JEB
92 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
93 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
94 { SEG_LAST, NULL },
165e4a11
AK
95};
96
97/* Some segment types have a list of areas of other devices attached */
98struct seg_area {
2c44337b 99 struct dm_list list;
165e4a11 100
b4f1578f 101 struct dm_tree_node *dev_node;
165e4a11
AK
102
103 uint64_t offset;
b262f3e1
ZK
104
105 unsigned rsite_index; /* Replicator site index */
106 struct dm_tree_node *slog; /* Replicator sync log node */
107 uint64_t region_size; /* Replicator sync log size */
108 uint32_t flags; /* Replicator sync log flags */
109};
110
25e6ab87
ZK
111struct thin_message {
112 struct dm_list list;
113 struct dm_thin_message message;
660a42bc 114 int expected_errno;
25e6ab87
ZK
115};
116
b262f3e1
ZK
117/* Replicator-log has a list of sites */
118/* FIXME: maybe move to seg_area too? */
119struct replicator_site {
120 struct dm_list list;
121
122 unsigned rsite_index;
123 dm_replicator_mode_t mode;
124 uint32_t async_timeout;
125 uint32_t fall_behind_ios;
126 uint64_t fall_behind_data;
165e4a11
AK
127};
128
129/* Per-segment properties */
130struct load_segment {
2c44337b 131 struct dm_list list;
165e4a11
AK
132
133 unsigned type;
134
135 uint64_t size;
136
b262f3e1
ZK
137 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
138 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 139
cac52ca4 140 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
141
142 int persistent; /* Snapshot */
143 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
144 struct dm_tree_node *cow; /* Snapshot */
145 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 146 struct dm_tree_node *merge; /* Snapshot */
165e4a11 147
b262f3e1 148 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 149 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
150 unsigned clustered; /* Mirror */
151 unsigned mirror_area_count; /* Mirror */
dbcb64b8 152 uint32_t flags; /* Mirror log */
67b25ed4 153 char *uuid; /* Clustered mirror log */
12ca060e
MB
154
155 const char *cipher; /* Crypt */
156 const char *chainmode; /* Crypt */
157 const char *iv; /* Crypt */
158 uint64_t iv_offset; /* Crypt */
159 const char *key; /* Crypt */
b262f3e1
ZK
160
161 const char *rlog_type; /* Replicator */
162 struct dm_list rsites; /* Replicator */
163 unsigned rsite_count; /* Replicator */
164 unsigned rdevice_count; /* Replicator */
165 struct dm_tree_node *replicator;/* Replicator-dev */
166 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 167
40e5fd8b 168 uint64_t rebuilds; /* raid */
4251236e
ZK
169
170 struct dm_tree_node *metadata; /* Thin_pool */
171 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 172 struct dm_list thin_messages; /* Thin_pool */
e9156c2b 173 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 174 uint32_t data_block_size; /* Thin_pool */
460c5991 175 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
176 uint32_t device_id; /* Thin */
177
165e4a11
AK
178};
179
180/* Per-device properties */
181struct load_properties {
182 int read_only;
183 uint32_t major;
184 uint32_t minor;
185
52b84409
AK
186 uint32_t read_ahead;
187 uint32_t read_ahead_flags;
188
e0ea24be
ZK
189 uint64_t thin_pool_transaction_id; /* Thin_pool */
190
165e4a11 191 unsigned segment_count;
bb875bb9 192 unsigned size_changed;
2c44337b 193 struct dm_list segs;
165e4a11
AK
194
195 const char *new_name;
566515c0
PR
196
197 /* If immediate_dev_node is set to 1, try to create the dev node
198 * as soon as possible (e.g. in preload stage even during traversal
199 * and processing of dm tree). This will also flush all stacked dev
200 * node operations, synchronizing with udev.
201 */
df390f17
AK
202 unsigned immediate_dev_node;
203
204 /*
205 * If the device size changed from zero and this is set,
206 * don't resume the device immediately, even if the device
207 * has parents. This works provided the parents do not
208 * validate the device size and is required by pvmove to
209 * avoid starting the mirror resync operation too early.
210 */
211 unsigned delay_resume_if_new;
165e4a11
AK
212};
213
214/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 215struct dm_tree_link {
2c44337b 216 struct dm_list list;
b4f1578f 217 struct dm_tree_node *node;
165e4a11
AK
218};
219
b4f1578f
AK
220struct dm_tree_node {
221 struct dm_tree *dtree;
3d0480ed 222
40e5fd8b
AK
223 const char *name;
224 const char *uuid;
225 struct dm_info info;
3d0480ed 226
40e5fd8b
AK
227 struct dm_list uses; /* Nodes this node uses */
228 struct dm_list used_by; /* Nodes that use this node */
165e4a11 229
56c28292
AK
230 int activation_priority; /* 0 gets activated first */
231
f16aea9e
PR
232 uint16_t udev_flags; /* Udev control flags */
233
165e4a11
AK
234 void *context; /* External supplied context */
235
236 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
237
238 /*
239 * If presuspend of child node is needed
240 * Note: only direct child is allowed
241 */
242 struct dm_tree_node *presuspend_node;
3d0480ed
AK
243};
244
b4f1578f 245struct dm_tree {
a3f6b2ce
AK
246 struct dm_pool *mem;
247 struct dm_hash_table *devs;
165e4a11 248 struct dm_hash_table *uuids;
b4f1578f 249 struct dm_tree_node root;
c55b1410 250 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
251 int no_flush; /* 1 sets noflush (mirrors/multipath) */
252 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 253 uint32_t cookie;
3d0480ed
AK
254};
255
b4f1578f 256struct dm_tree *dm_tree_create(void)
3d0480ed 257{
0395dd22 258 struct dm_pool *dmem;
b4f1578f 259 struct dm_tree *dtree;
3d0480ed 260
0395dd22
ZK
261 if (!(dmem = dm_pool_create("dtree", 1024)) ||
262 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
263 log_error("Failed to allocate dtree.");
264 if (dmem)
265 dm_pool_destroy(dmem);
3d0480ed
AK
266 return NULL;
267 }
268
b4f1578f 269 dtree->root.dtree = dtree;
2c44337b
AK
270 dm_list_init(&dtree->root.uses);
271 dm_list_init(&dtree->root.used_by);
c55b1410 272 dtree->skip_lockfs = 0;
b9ffd32c 273 dtree->no_flush = 0;
0395dd22 274 dtree->mem = dmem;
3d0480ed 275
b4f1578f
AK
276 if (!(dtree->devs = dm_hash_create(8))) {
277 log_error("dtree hash creation failed");
278 dm_pool_destroy(dtree->mem);
3d0480ed
AK
279 return NULL;
280 }
281
b4f1578f
AK
282 if (!(dtree->uuids = dm_hash_create(32))) {
283 log_error("dtree uuid hash creation failed");
284 dm_hash_destroy(dtree->devs);
285 dm_pool_destroy(dtree->mem);
165e4a11
AK
286 return NULL;
287 }
288
b4f1578f 289 return dtree;
3d0480ed
AK
290}
291
b4f1578f 292void dm_tree_free(struct dm_tree *dtree)
3d0480ed 293{
b4f1578f 294 if (!dtree)
3d0480ed
AK
295 return;
296
b4f1578f
AK
297 dm_hash_destroy(dtree->uuids);
298 dm_hash_destroy(dtree->devs);
299 dm_pool_destroy(dtree->mem);
3d0480ed
AK
300}
301
04bde319
ZK
302static int _nodes_are_linked(const struct dm_tree_node *parent,
303 const struct dm_tree_node *child)
3d0480ed 304{
b4f1578f 305 struct dm_tree_link *dlink;
3d0480ed 306
2c44337b 307 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
308 if (dlink->node == child)
309 return 1;
3d0480ed
AK
310
311 return 0;
312}
313
2c44337b 314static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 315{
b4f1578f 316 struct dm_tree_link *dlink;
3d0480ed 317
b4f1578f
AK
318 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
319 log_error("dtree link allocation failed");
3d0480ed
AK
320 return 0;
321 }
322
323 dlink->node = node;
2c44337b 324 dm_list_add(list, &dlink->list);
3d0480ed
AK
325
326 return 1;
327}
328
b4f1578f
AK
329static int _link_nodes(struct dm_tree_node *parent,
330 struct dm_tree_node *child)
3d0480ed
AK
331{
332 if (_nodes_are_linked(parent, child))
333 return 1;
334
335 if (!_link(&parent->uses, child))
336 return 0;
337
338 if (!_link(&child->used_by, parent))
339 return 0;
340
341 return 1;
342}
343
2c44337b 344static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 345{
b4f1578f 346 struct dm_tree_link *dlink;
3d0480ed 347
2c44337b 348 dm_list_iterate_items(dlink, list)
3d0480ed 349 if (dlink->node == node) {
2c44337b 350 dm_list_del(&dlink->list);
3d0480ed
AK
351 break;
352 }
3d0480ed
AK
353}
354
b4f1578f
AK
355static void _unlink_nodes(struct dm_tree_node *parent,
356 struct dm_tree_node *child)
3d0480ed
AK
357{
358 if (!_nodes_are_linked(parent, child))
359 return;
360
361 _unlink(&parent->uses, child);
362 _unlink(&child->used_by, parent);
363}
364
b4f1578f 365static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 366{
b4f1578f 367 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
368}
369
b4f1578f 370static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 371{
b1ebf028 372 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
373}
374
b4f1578f 375static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 376{
b4f1578f 377 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
378}
379
b4f1578f 380static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 381{
b1ebf028 382 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
383}
384
b4f1578f 385static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
386{
387 /* Don't link to root node if child already has a parent */
f77736ca 388 if (parent == &parent->dtree->root) {
b4f1578f 389 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
390 return 1;
391 } else
392 _remove_from_toplevel(child);
393
f77736ca 394 if (child == &child->dtree->root) {
b4f1578f 395 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
396 return 1;
397 } else
398 _remove_from_bottomlevel(parent);
399
400 return _link_nodes(parent, child);
401}
402
b4f1578f 403static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
404 const char *name,
405 const char *uuid,
165e4a11 406 struct dm_info *info,
f16aea9e
PR
407 void *context,
408 uint16_t udev_flags)
3d0480ed 409{
b4f1578f 410 struct dm_tree_node *node;
3d0480ed
AK
411 uint64_t dev;
412
b4f1578f
AK
413 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
414 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
415 return NULL;
416 }
417
b4f1578f 418 node->dtree = dtree;
3d0480ed
AK
419
420 node->name = name;
421 node->uuid = uuid;
422 node->info = *info;
165e4a11 423 node->context = context;
f16aea9e 424 node->udev_flags = udev_flags;
56c28292 425 node->activation_priority = 0;
3d0480ed 426
2c44337b
AK
427 dm_list_init(&node->uses);
428 dm_list_init(&node->used_by);
429 dm_list_init(&node->props.segs);
3d0480ed
AK
430
431 dev = MKDEV(info->major, info->minor);
432
b4f1578f 433 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 434 sizeof(dev), node)) {
b4f1578f
AK
435 log_error("dtree node hash insertion failed");
436 dm_pool_free(dtree->mem, node);
3d0480ed
AK
437 return NULL;
438 }
439
165e4a11 440 if (uuid && *uuid &&
b4f1578f
AK
441 !dm_hash_insert(dtree->uuids, uuid, node)) {
442 log_error("dtree uuid hash insertion failed");
443 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 444 sizeof(dev));
b4f1578f 445 dm_pool_free(dtree->mem, node);
165e4a11
AK
446 return NULL;
447 }
448
3d0480ed
AK
449 return node;
450}
451
b4f1578f 452static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
453 uint32_t major, uint32_t minor)
454{
455 uint64_t dev = MKDEV(major, minor);
456
b4f1578f 457 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
458 sizeof(dev));
459}
460
b4f1578f 461static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
462 const char *uuid)
463{
87f98002
AK
464 struct dm_tree_node *node;
465
466 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
467 return node;
468
469 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
470 return NULL;
471
472 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
165e4a11
AK
473}
474
a3f6b2ce 475static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
3d0480ed
AK
476 const char **name, const char **uuid,
477 struct dm_info *info, struct dm_deps **deps)
478{
479 memset(info, 0, sizeof(*info));
480
481 if (!dm_is_dm_major(major)) {
482 *name = "";
483 *uuid = "";
484 *deps = NULL;
485 info->major = major;
486 info->minor = minor;
487 info->exists = 0;
165e4a11
AK
488 info->live_table = 0;
489 info->inactive_table = 0;
490 info->read_only = 0;
3d0480ed
AK
491 return 1;
492 }
493
494 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
495 log_error("deps dm_task creation failed");
496 return 0;
497 }
498
b4f1578f
AK
499 if (!dm_task_set_major(*dmt, major)) {
500 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
501 major, minor);
3d0480ed 502 goto failed;
b4f1578f 503 }
3d0480ed 504
b4f1578f
AK
505 if (!dm_task_set_minor(*dmt, minor)) {
506 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
507 major, minor);
3d0480ed 508 goto failed;
b4f1578f 509 }
3d0480ed 510
b4f1578f
AK
511 if (!dm_task_run(*dmt)) {
512 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
513 major, minor);
3d0480ed 514 goto failed;
b4f1578f 515 }
3d0480ed 516
b4f1578f
AK
517 if (!dm_task_get_info(*dmt, info)) {
518 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
519 major, minor);
3d0480ed 520 goto failed;
b4f1578f 521 }
3d0480ed
AK
522
523 if (!info->exists) {
524 *name = "";
525 *uuid = "";
526 *deps = NULL;
527 } else {
528 if (info->major != major) {
b4f1578f 529 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
530 major, info->major);
531 goto failed;
532 }
533 if (info->minor != minor) {
b4f1578f 534 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
535 minor, info->minor);
536 goto failed;
537 }
a3f6b2ce 538 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
539 log_error("name pool_strdup failed");
540 goto failed;
541 }
a3f6b2ce 542 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
543 log_error("uuid pool_strdup failed");
544 goto failed;
545 }
546 *deps = dm_task_get_deps(*dmt);
547 }
548
549 return 1;
550
551failed:
552 dm_task_destroy(*dmt);
553 return 0;
554}
555
b4f1578f
AK
556static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
557 struct dm_tree_node *parent,
cda69e17
PR
558 uint32_t major, uint32_t minor,
559 uint16_t udev_flags)
3d0480ed
AK
560{
561 struct dm_task *dmt = NULL;
562 struct dm_info info;
563 struct dm_deps *deps = NULL;
564 const char *name = NULL;
565 const char *uuid = NULL;
b4f1578f 566 struct dm_tree_node *node = NULL;
3d0480ed 567 uint32_t i;
3d0480ed
AK
568 int new = 0;
569
570 /* Already in tree? */
b4f1578f
AK
571 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
572 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
573 return_NULL;
3d0480ed 574
f16aea9e 575 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
cda69e17 576 NULL, udev_flags)))
b4f1578f 577 goto_out;
3d0480ed
AK
578 new = 1;
579 }
580
165e4a11
AK
581 if (!_link_tree_nodes(parent, node)) {
582 node = NULL;
b4f1578f 583 goto_out;
165e4a11 584 }
3d0480ed
AK
585
586 /* If node was already in tree, no need to recurse. */
587 if (!new)
165e4a11 588 goto out;
3d0480ed
AK
589
590 /* Can't recurse if not a mapped device or there are no dependencies */
591 if (!node->info.exists || !deps->count) {
b4f1578f
AK
592 if (!_add_to_bottomlevel(node)) {
593 stack;
165e4a11 594 node = NULL;
b4f1578f 595 }
165e4a11 596 goto out;
3d0480ed
AK
597 }
598
599 /* Add dependencies to tree */
600 for (i = 0; i < deps->count; i++)
b4f1578f 601 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
cda69e17 602 MINOR(deps->device[i]), udev_flags)) {
165e4a11 603 node = NULL;
b4f1578f 604 goto_out;
165e4a11 605 }
3d0480ed 606
3d0480ed
AK
607out:
608 if (dmt)
609 dm_task_destroy(dmt);
610
165e4a11
AK
611 return node;
612}
613
b4f1578f 614static int _node_clear_table(struct dm_tree_node *dnode)
165e4a11
AK
615{
616 struct dm_task *dmt;
617 struct dm_info *info;
618 const char *name;
619 int r;
620
621 if (!(info = &dnode->info)) {
b4f1578f 622 log_error("_node_clear_table failed: missing info");
165e4a11
AK
623 return 0;
624 }
625
b4f1578f
AK
626 if (!(name = dm_tree_node_get_name(dnode))) {
627 log_error("_node_clear_table failed: missing name");
165e4a11
AK
628 return 0;
629 }
630
631 /* Is there a table? */
632 if (!info->exists || !info->inactive_table)
633 return 1;
634
10d0d9c7
AK
635// FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
636
165e4a11
AK
637 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
638 name, info->major, info->minor);
639
640 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11
AK
641 log_error("Table clear dm_task creation failed for %s", name);
642 return 0;
643 }
644
645 if (!dm_task_set_major(dmt, info->major) ||
646 !dm_task_set_minor(dmt, info->minor)) {
647 log_error("Failed to set device number for %s table clear", name);
648 dm_task_destroy(dmt);
649 return 0;
650 }
651
652 r = dm_task_run(dmt);
653
654 if (!dm_task_get_info(dmt, info)) {
b4f1578f 655 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
656 r = 0;
657 }
658
659 dm_task_destroy(dmt);
660
3d0480ed
AK
661 return r;
662}
663
b4f1578f 664struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
165e4a11
AK
665 const char *name,
666 const char *uuid,
667 uint32_t major, uint32_t minor,
668 int read_only,
669 int clear_inactive,
670 void *context)
671{
b4f1578f 672 struct dm_tree_node *dnode;
165e4a11
AK
673 struct dm_info info;
674 const char *name2;
675 const char *uuid2;
676
677 /* Do we need to add node to tree? */
b4f1578f
AK
678 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
679 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
680 log_error("name pool_strdup failed");
681 return NULL;
682 }
b4f1578f 683 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
165e4a11
AK
684 log_error("uuid pool_strdup failed");
685 return NULL;
686 }
687
688 info.major = 0;
689 info.minor = 0;
690 info.exists = 0;
691 info.live_table = 0;
692 info.inactive_table = 0;
693 info.read_only = 0;
694
f16aea9e
PR
695 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
696 context, 0)))
b4f1578f 697 return_NULL;
165e4a11
AK
698
699 /* Attach to root node until a table is supplied */
b4f1578f
AK
700 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
701 return_NULL;
165e4a11
AK
702
703 dnode->props.major = major;
704 dnode->props.minor = minor;
705 dnode->props.new_name = NULL;
bb875bb9 706 dnode->props.size_changed = 0;
165e4a11
AK
707 } else if (strcmp(name, dnode->name)) {
708 /* Do we need to rename node? */
b4f1578f 709 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
710 log_error("name pool_strdup failed");
711 return 0;
712 }
713 }
714
715 dnode->props.read_only = read_only ? 1 : 0;
52b84409
AK
716 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
717 dnode->props.read_ahead_flags = 0;
165e4a11 718
b4f1578f
AK
719 if (clear_inactive && !_node_clear_table(dnode))
720 return_NULL;
165e4a11
AK
721
722 dnode->context = context;
f16aea9e 723 dnode->udev_flags = 0;
165e4a11
AK
724
725 return dnode;
726}
727
f16aea9e
PR
728struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
729 const char *name,
730 const char *uuid,
731 uint32_t major,
732 uint32_t minor,
733 int read_only,
734 int clear_inactive,
735 void *context,
736 uint16_t udev_flags)
737{
738 struct dm_tree_node *node;
739
740 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
741 clear_inactive, context)))
742 node->udev_flags = udev_flags;
743
744 return node;
745}
746
83c606ae
JEB
747void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
748
749{
750 struct dm_info *dinfo = &dnode->info;
751
752 if (udev_flags != dnode->udev_flags)
753 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
754 ") udev_flags from 0x%x to 0x%x",
755 dnode->name, dinfo->major, dinfo->minor,
756 dnode->udev_flags, udev_flags);
757 dnode->udev_flags = udev_flags;
758}
f16aea9e 759
52b84409
AK
760void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
761 uint32_t read_ahead,
762 uint32_t read_ahead_flags)
08e64ce5 763{
52b84409
AK
764 dnode->props.read_ahead = read_ahead;
765 dnode->props.read_ahead_flags = read_ahead_flags;
766}
767
76d1aec8
ZK
768void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
769 struct dm_tree_node *presuspend_node)
770{
771 node->presuspend_node = presuspend_node;
772}
773
b4f1578f 774int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
3d0480ed 775{
cda69e17
PR
776 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
777}
778
779int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
780 uint32_t minor, uint16_t udev_flags)
781{
782 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
3d0480ed
AK
783}
784
04bde319 785const char *dm_tree_node_get_name(const struct dm_tree_node *node)
3d0480ed
AK
786{
787 return node->info.exists ? node->name : "";
788}
789
04bde319 790const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
3d0480ed
AK
791{
792 return node->info.exists ? node->uuid : "";
793}
794
04bde319 795const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
3d0480ed
AK
796{
797 return &node->info;
798}
799
04bde319 800void *dm_tree_node_get_context(const struct dm_tree_node *node)
165e4a11
AK
801{
802 return node->context;
803}
804
04bde319 805int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
eb91c4ee
MB
806{
807 return dnode->props.size_changed;
808}
809
04bde319 810int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
3d0480ed
AK
811{
812 if (inverted) {
b4f1578f 813 if (_nodes_are_linked(&node->dtree->root, node))
3d0480ed 814 return 0;
2c44337b 815 return dm_list_size(&node->used_by);
3d0480ed
AK
816 }
817
b4f1578f 818 if (_nodes_are_linked(node, &node->dtree->root))
3d0480ed
AK
819 return 0;
820
2c44337b 821 return dm_list_size(&node->uses);
3d0480ed
AK
822}
823
2b69db1f
AK
824/*
825 * Returns 1 if no prefix supplied
826 */
827static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
828{
829 if (!uuid_prefix)
830 return 1;
831
832 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
833 return 1;
834
835 /* Handle transition: active device uuids might be missing the prefix */
836 if (uuid_prefix_len <= 4)
837 return 0;
838
87f98002 839 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
872dea04
AK
840 return 0;
841
87f98002 842 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
2b69db1f
AK
843 return 0;
844
87f98002 845 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
2b69db1f
AK
846 return 1;
847
848 return 0;
849}
850
690a5da2
AK
851/*
852 * Returns 1 if no children.
853 */
b4f1578f 854static int _children_suspended(struct dm_tree_node *node,
690a5da2
AK
855 uint32_t inverted,
856 const char *uuid_prefix,
857 size_t uuid_prefix_len)
858{
2c44337b 859 struct dm_list *list;
b4f1578f 860 struct dm_tree_link *dlink;
690a5da2
AK
861 const struct dm_info *dinfo;
862 const char *uuid;
863
864 if (inverted) {
b4f1578f 865 if (_nodes_are_linked(&node->dtree->root, node))
690a5da2
AK
866 return 1;
867 list = &node->used_by;
868 } else {
b4f1578f 869 if (_nodes_are_linked(node, &node->dtree->root))
690a5da2
AK
870 return 1;
871 list = &node->uses;
872 }
873
2c44337b 874 dm_list_iterate_items(dlink, list) {
b4f1578f 875 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
690a5da2
AK
876 stack;
877 continue;
878 }
879
880 /* Ignore if it doesn't belong to this VG */
2b69db1f 881 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2
AK
882 continue;
883
76d1aec8
ZK
884 /* Ignore if parent node wants to presuspend this node */
885 if (dlink->node->presuspend_node == node)
886 continue;
887
b4f1578f
AK
888 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
889 stack; /* FIXME Is this normal? */
690a5da2
AK
890 return 0;
891 }
892
893 if (!dinfo->suspended)
894 return 0;
895 }
896
897 return 1;
898}
899
3d0480ed
AK
900/*
901 * Set major and minor to zero for root of tree.
902 */
b4f1578f 903struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
3d0480ed
AK
904 uint32_t major,
905 uint32_t minor)
906{
907 if (!major && !minor)
b4f1578f 908 return &dtree->root;
3d0480ed 909
b4f1578f 910 return _find_dm_tree_node(dtree, major, minor);
3d0480ed
AK
911}
912
165e4a11
AK
913/*
914 * Set uuid to NULL for root of tree.
915 */
b4f1578f 916struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
917 const char *uuid)
918{
919 if (!uuid || !*uuid)
b4f1578f 920 return &dtree->root;
165e4a11 921
b4f1578f 922 return _find_dm_tree_node_by_uuid(dtree, uuid);
165e4a11
AK
923}
924
3d0480ed
AK
925/*
926 * First time set *handle to NULL.
927 * Set inverted to invert the tree.
928 */
b4f1578f 929struct dm_tree_node *dm_tree_next_child(void **handle,
04bde319
ZK
930 const struct dm_tree_node *parent,
931 uint32_t inverted)
3d0480ed 932{
2c44337b 933 struct dm_list **dlink = (struct dm_list **) handle;
04bde319 934 const struct dm_list *use_list;
3d0480ed
AK
935
936 if (inverted)
937 use_list = &parent->used_by;
938 else
939 use_list = &parent->uses;
940
941 if (!*dlink)
2c44337b 942 *dlink = dm_list_first(use_list);
3d0480ed 943 else
2c44337b 944 *dlink = dm_list_next(use_list, *dlink);
3d0480ed 945
2c44337b 946 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
3d0480ed
AK
947}
948
3e8c6b73 949/*
a6d97ede 950 * Deactivate a device with its dependencies if the uuid prefix matches.
3e8c6b73 951 */
db208f51
AK
952static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
953 struct dm_info *info)
3e8c6b73
AK
954{
955 struct dm_task *dmt;
956 int r;
957
958 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
959 log_error("_info_by_dev: dm_task creation failed");
960 return 0;
961 }
962
963 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
964 log_error("_info_by_dev: Failed to set device number");
965 dm_task_destroy(dmt);
966 return 0;
967 }
968
db208f51
AK
969 if (!with_open_count && !dm_task_no_open_count(dmt))
970 log_error("Failed to disable open_count");
971
3e8c6b73
AK
972 if ((r = dm_task_run(dmt)))
973 r = dm_task_get_info(dmt, info);
974
975 dm_task_destroy(dmt);
976
977 return r;
978}
979
125712be
PR
980static int _check_device_not_in_use(struct dm_info *info)
981{
982 if (!info->exists)
983 return 1;
984
985 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
986 if (!*dm_sysfs_dir()) {
987 if (info->open_count) {
988 log_error("Device %" PRIu32 ":%" PRIu32 " in use",
989 info->major, info->minor);
990 return 0;
991 }
992
993 return 1;
994 }
125712be
PR
995
996 if (dm_device_has_holders(info->major, info->minor)) {
997 log_error("Device %" PRIu32 ":%" PRIu32 " is used "
998 "by another device.", info->major, info->minor);
999 return 0;
1000 }
1001
1002 if (dm_device_has_mounted_fs(info->major, info->minor)) {
1003 log_error("Device %" PRIu32 ":%" PRIu32 " contains "
1004 "a filesystem in use.", info->major, info->minor);
1005 return 0;
1006 }
1007
1008 return 1;
1009}
1010
f3ef15ef
ZK
1011/* Check if all parent nodes of given node have open_count == 0 */
1012static int _node_has_closed_parents(struct dm_tree_node *node,
1013 const char *uuid_prefix,
1014 size_t uuid_prefix_len)
1015{
1016 struct dm_tree_link *dlink;
1017 const struct dm_info *dinfo;
1018 struct dm_info info;
1019 const char *uuid;
1020
1021 /* Iterate through parents of this node */
1022 dm_list_iterate_items(dlink, &node->used_by) {
1023 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1024 stack;
1025 continue;
1026 }
1027
1028 /* Ignore if it doesn't belong to this VG */
1029 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1030 continue;
1031
1032 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1033 stack; /* FIXME Is this normal? */
1034 return 0;
1035 }
1036
1037 /* Refresh open_count */
1038 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1039 !info.exists)
1040 continue;
1041
eb418883
ZK
1042 if (info.open_count) {
1043 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1044 dinfo->major, dinfo->minor, info.open_count);
f3ef15ef 1045 return 0;
eb418883 1046 }
f3ef15ef
ZK
1047 }
1048
1049 return 1;
1050}
1051
f16aea9e 1052static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
787200ef 1053 uint32_t *cookie, uint16_t udev_flags, int retry)
3e8c6b73
AK
1054{
1055 struct dm_task *dmt;
bd90c6b2 1056 int r = 0;
3e8c6b73
AK
1057
1058 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1059
1060 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1061 log_error("Deactivation dm_task creation failed for %s", name);
1062 return 0;
1063 }
1064
1065 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1066 log_error("Failed to set device number for %s deactivation", name);
bd90c6b2 1067 goto out;
3e8c6b73
AK
1068 }
1069
1070 if (!dm_task_no_open_count(dmt))
1071 log_error("Failed to disable open_count");
1072
f16aea9e 1073 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1074 goto out;
1075
787200ef
PR
1076
1077 if (retry)
1078 dm_task_retry_remove(dmt);
1079
3e8c6b73
AK
1080 r = dm_task_run(dmt);
1081
0437bccc
AK
1082 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1083 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
9032898e 1084 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
165e4a11 1085
db208f51
AK
1086 /* FIXME Remove node from tree or mark invalid? */
1087
bd90c6b2 1088out:
db208f51
AK
1089 dm_task_destroy(dmt);
1090
1091 return r;
1092}
1093
bd90c6b2 1094static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1095 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1096{
1097 struct dm_task *dmt;
1098 int r = 0;
1099
1100 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1101
1102 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1103 log_error("Rename dm_task creation failed for %s", old_name);
1104 return 0;
1105 }
1106
1107 if (!dm_task_set_name(dmt, old_name)) {
1108 log_error("Failed to set name for %s rename.", old_name);
1109 goto out;
1110 }
1111
b4f1578f 1112 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1113 goto_out;
165e4a11
AK
1114
1115 if (!dm_task_no_open_count(dmt))
1116 log_error("Failed to disable open_count");
1117
f16aea9e 1118 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1119 goto out;
1120
165e4a11
AK
1121 r = dm_task_run(dmt);
1122
1123out:
1124 dm_task_destroy(dmt);
1125
1126 return r;
1127}
1128
165e4a11
AK
1129/* FIXME Merge with _suspend_node? */
1130static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1131 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1132 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1133 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1134{
1135 struct dm_task *dmt;
bd90c6b2 1136 int r = 0;
165e4a11
AK
1137
1138 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1139
1140 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1141 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1142 return 0;
1143 }
1144
0b7d16bc
AK
1145 /* FIXME Kernel should fill in name on return instead */
1146 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1147 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1148 goto out;
0b7d16bc
AK
1149 }
1150
165e4a11
AK
1151 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1152 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1153 goto out;
165e4a11
AK
1154 }
1155
1156 if (!dm_task_no_open_count(dmt))
1157 log_error("Failed to disable open_count");
1158
52b84409
AK
1159 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1160 log_error("Failed to set read ahead");
1161
f16aea9e 1162 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1163 goto_out;
bd90c6b2 1164
9a8f192a
ZK
1165 if (!(r = dm_task_run(dmt)))
1166 goto_out;
1167
1168 if (already_suspended)
1169 dec_suspended();
1170
1171 if (!(r = dm_task_get_info(dmt, newinfo)))
1172 stack;
165e4a11 1173
bd90c6b2 1174out:
165e4a11
AK
1175 dm_task_destroy(dmt);
1176
1177 return r;
1178}
1179
db208f51 1180static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1181 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1182{
1183 struct dm_task *dmt;
1184 int r;
1185
b9ffd32c
AK
1186 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1187 name, major, minor,
1188 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1189 no_flush ? "" : " with device flush");
db208f51
AK
1190
1191 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1192 log_error("Suspend dm_task creation failed for %s", name);
1193 return 0;
1194 }
1195
1196 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1197 log_error("Failed to set device number for %s suspension.", name);
1198 dm_task_destroy(dmt);
1199 return 0;
1200 }
1201
1202 if (!dm_task_no_open_count(dmt))
1203 log_error("Failed to disable open_count");
1204
c55b1410
AK
1205 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1206 log_error("Failed to set skip_lockfs flag.");
1207
b9ffd32c
AK
1208 if (no_flush && !dm_task_no_flush(dmt))
1209 log_error("Failed to set no_flush flag.");
1210
1840aa09
AK
1211 if ((r = dm_task_run(dmt))) {
1212 inc_suspended();
db208f51 1213 r = dm_task_get_info(dmt, newinfo);
1840aa09 1214 }
db208f51 1215
3e8c6b73
AK
1216 dm_task_destroy(dmt);
1217
1218 return r;
1219}
1220
25e6ab87 1221static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1222{
1223 struct dm_task *dmt;
1224 int r = 0;
1225 uint64_t start, length;
1226 char *type = NULL;
1227 char *params = NULL;
e0ea24be 1228
25e6ab87
ZK
1229 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1230 return_0;
e0ea24be 1231
25e6ab87
ZK
1232 if (!dm_task_set_major(dmt, dnode->info.major) ||
1233 !dm_task_set_minor(dmt, dnode->info.minor)) {
1234 log_error("Failed to set major minor.");
1235 goto out;
e0ea24be
ZK
1236 }
1237
25e6ab87
ZK
1238 if (!dm_task_run(dmt))
1239 goto_out;
1240
1241 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1242
1243 if (type && (strcmp(type, "thin-pool") != 0)) {
1244 log_error(INTERNAL_ERROR
1245 "Expected thin-pool target for %d:%d and got %s.",
1246 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1247 goto out;
1248 }
1249
25e6ab87
ZK
1250 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
1251 log_error(INTERNAL_ERROR
1252 "Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1253 goto out;
1254 }
1255
25e6ab87 1256 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1257
25e6ab87
ZK
1258 r = 1;
1259out:
1260 dm_task_destroy(dmt);
e0ea24be 1261
25e6ab87
ZK
1262 return r;
1263}
e0ea24be 1264
25e6ab87
ZK
1265static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1266{
1267 struct dm_task *dmt;
1268 struct dm_thin_message *m = &tm->message;
1269 char buf[64];
1270 int r;
e0ea24be 1271
25e6ab87
ZK
1272 switch (m->type) {
1273 case DM_THIN_MESSAGE_CREATE_SNAP:
1274 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1275 m->u.m_create_snap.device_id,
1276 m->u.m_create_snap.origin_id);
1277 break;
1278 case DM_THIN_MESSAGE_CREATE_THIN:
1279 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1280 m->u.m_create_thin.device_id);
1281 break;
1282 case DM_THIN_MESSAGE_DELETE:
1283 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1284 m->u.m_delete.device_id);
1285 break;
1286 case DM_THIN_MESSAGE_TRIM:
1287 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1288 m->u.m_trim.device_id,
1289 m->u.m_trim.new_size);
1290 break;
1291 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1292 r = dm_snprintf(buf, sizeof(buf),
1293 "set_transaction_id %" PRIu64 " %" PRIu64,
1294 m->u.m_set_transaction_id.current_id,
1295 m->u.m_set_transaction_id.new_id);
1296 break;
1297 }
1298
1299 if (!r) {
1300 log_error("Failed to prepare message.");
1301 return 0;
1302 }
1303
1304 r = 0;
1305
1306 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1307 return_0;
1308
1309 if (!dm_task_set_major(dmt, dnode->info.major) ||
1310 !dm_task_set_minor(dmt, dnode->info.minor)) {
1311 log_error("Failed to set message major minor.");
1312 goto out;
1313 }
1314
1315 if (!dm_task_set_message(dmt, buf))
1316 goto_out;
1317
660a42bc
ZK
1318 /* Internal functionality of dm_task */
1319 dmt->expected_errno = tm->expected_errno;
1320
25e6ab87
ZK
1321 if (!dm_task_run(dmt))
1322 goto_out;
1323
1324 r = 1;
e0ea24be
ZK
1325out:
1326 dm_task_destroy(dmt);
1327
1328 return r;
1329}
1330
11f64f0a
ZK
1331static int _node_send_messages(struct dm_tree_node *dnode,
1332 const char *uuid_prefix,
1333 size_t uuid_prefix_len)
25e6ab87
ZK
1334{
1335 struct load_segment *seg;
1336 struct thin_message *tmsg;
11f64f0a 1337 uint64_t trans_id;
25e6ab87
ZK
1338 const char *uuid;
1339
11f64f0a
ZK
1340 if ((dnode == &dnode->dtree->root) || /* root has props.segs uninitialized */
1341 !dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1342 return 1;
1343
1344 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1345 if (seg->type != SEG_THIN_POOL)
1346 return 1;
1347
1348 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1349 return_0;
1350
1351 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1352 log_debug("UUID \"%s\" does not match.", uuid);
1353 return 1;
1354 }
1355
11f64f0a 1356 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
25e6ab87
ZK
1357 return_0;
1358
11f64f0a 1359 if (trans_id == dnode->props.thin_pool_transaction_id)
25e6ab87
ZK
1360 return 1; /* In sync - skip messages */
1361
11f64f0a 1362 if (trans_id != (dnode->props.thin_pool_transaction_id - 1)) {
25e6ab87 1363 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
11f64f0a 1364 trans_id, dnode->props.thin_pool_transaction_id - 1);
25e6ab87
ZK
1365 return 0; /* Nothing to send */
1366 }
1367
1368 dm_list_iterate_items(tmsg, &seg->thin_messages)
1369 if (!(_thin_pool_node_message(dnode, tmsg)))
1370 return_0;
1371
1372 return 1;
1373}
1374
18e0f934
AK
1375/*
1376 * FIXME Don't attempt to deactivate known internal dependencies.
1377 */
1378static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1379 const char *uuid_prefix,
1380 size_t uuid_prefix_len,
1381 unsigned level)
3e8c6b73 1382{
b7eb2ad0 1383 int r = 1;
3e8c6b73 1384 void *handle = NULL;
b4f1578f 1385 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1386 struct dm_info info;
1387 const struct dm_info *dinfo;
1388 const char *name;
1389 const char *uuid;
1390
b4f1578f
AK
1391 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1392 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1393 stack;
1394 continue;
1395 }
1396
b4f1578f 1397 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1398 stack;
1399 continue;
1400 }
1401
b4f1578f 1402 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1403 stack;
1404 continue;
1405 }
1406
1407 /* Ignore if it doesn't belong to this VG */
2b69db1f 1408 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1409 continue;
3e8c6b73
AK
1410
1411 /* Refresh open_count */
db208f51 1412 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
f55021f4 1413 !info.exists)
3e8c6b73
AK
1414 continue;
1415
125712be
PR
1416 if (!_check_device_not_in_use(&info))
1417 continue;
1418
f3ef15ef 1419 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1420 if ((child->presuspend_node &&
f3ef15ef
ZK
1421 !_node_has_closed_parents(child->presuspend_node,
1422 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1423 /* Only report error from (likely non-internal) dependency at top level */
1424 if (!level) {
1425 log_error("Unable to deactivate open %s (%" PRIu32
1426 ":%" PRIu32 ")", name, info.major,
1427 info.minor);
1428 r = 0;
1429 }
f55021f4
AK
1430 continue;
1431 }
1432
76d1aec8
ZK
1433 /* Suspend child node first if requested */
1434 if (child->presuspend_node &&
1435 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1436 continue;
1437
f16aea9e 1438 if (!_deactivate_node(name, info.major, info.minor,
787200ef
PR
1439 &child->dtree->cookie, child->udev_flags,
1440 child->dtree->retry_remove)) {
3e8c6b73
AK
1441 log_error("Unable to deactivate %s (%" PRIu32
1442 ":%" PRIu32 ")", name, info.major,
1443 info.minor);
b7eb2ad0 1444 r = 0;
3e8c6b73 1445 continue;
f4249251
AK
1446 } else if (info.suspended)
1447 dec_suspended();
3e8c6b73 1448
18e0f934
AK
1449 if (dm_tree_node_num_children(child, 0)) {
1450 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1451 return_0;
18e0f934 1452 }
3e8c6b73
AK
1453 }
1454
b7eb2ad0 1455 return r;
3e8c6b73 1456}
db208f51 1457
18e0f934
AK
1458int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1459 const char *uuid_prefix,
1460 size_t uuid_prefix_len)
1461{
1462 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1463}
1464
c55b1410
AK
1465void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1466{
1467 dnode->dtree->skip_lockfs = 1;
1468}
1469
b9ffd32c
AK
1470void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1471{
1472 dnode->dtree->no_flush = 1;
1473}
1474
787200ef
PR
1475void dm_tree_retry_remove(struct dm_tree_node *dnode)
1476{
1477 dnode->dtree->retry_remove = 1;
1478}
1479
b4f1578f 1480int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1481 const char *uuid_prefix,
1482 size_t uuid_prefix_len)
db208f51 1483{
68085c93 1484 int r = 1;
db208f51 1485 void *handle = NULL;
b4f1578f 1486 struct dm_tree_node *child = dnode;
db208f51
AK
1487 struct dm_info info, newinfo;
1488 const struct dm_info *dinfo;
1489 const char *name;
1490 const char *uuid;
1491
690a5da2 1492 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1493 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1494 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1495 stack;
1496 continue;
1497 }
1498
b4f1578f 1499 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1500 stack;
1501 continue;
1502 }
1503
b4f1578f 1504 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1505 stack;
1506 continue;
1507 }
1508
1509 /* Ignore if it doesn't belong to this VG */
2b69db1f 1510 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1511 continue;
1512
690a5da2
AK
1513 /* Ensure immediate parents are already suspended */
1514 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1515 continue;
1516
db208f51 1517 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
b700541f 1518 !info.exists || info.suspended)
db208f51
AK
1519 continue;
1520
c55b1410 1521 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1522 child->dtree->skip_lockfs,
1523 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1524 log_error("Unable to suspend %s (%" PRIu32
1525 ":%" PRIu32 ")", name, info.major,
1526 info.minor);
68085c93 1527 r = 0;
db208f51
AK
1528 continue;
1529 }
1530
1531 /* Update cached info */
1532 child->info = newinfo;
690a5da2
AK
1533 }
1534
1535 /* Then suspend any child nodes */
1536 handle = NULL;
1537
b4f1578f
AK
1538 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1539 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1540 stack;
1541 continue;
1542 }
1543
1544 /* Ignore if it doesn't belong to this VG */
87f98002 1545 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1546 continue;
db208f51 1547
b4f1578f 1548 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1549 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1550 return_0;
db208f51
AK
1551 }
1552
68085c93 1553 return r;
db208f51
AK
1554}
1555
b4f1578f 1556int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1557 const char *uuid_prefix,
1558 size_t uuid_prefix_len)
1559{
2ca6b865 1560 int r = 1;
db208f51 1561 void *handle = NULL;
b4f1578f 1562 struct dm_tree_node *child = dnode;
165e4a11
AK
1563 struct dm_info newinfo;
1564 const char *name;
db208f51 1565 const char *uuid;
56c28292 1566 int priority;
db208f51 1567
165e4a11 1568 /* Activate children first */
b4f1578f
AK
1569 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1570 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1571 stack;
1572 continue;
db208f51
AK
1573 }
1574
908db078
AK
1575 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1576 continue;
db208f51 1577
b4f1578f 1578 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1579 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1580 return_0;
56c28292 1581 }
165e4a11 1582
56c28292 1583 handle = NULL;
165e4a11 1584
aa6f4e51 1585 for (priority = 0; priority < 3; priority++) {
56c28292 1586 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1587 if (priority != child->activation_priority)
1588 continue;
1589
56c28292
AK
1590 if (!(uuid = dm_tree_node_get_uuid(child))) {
1591 stack;
1592 continue;
165e4a11 1593 }
165e4a11 1594
56c28292
AK
1595 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1596 continue;
165e4a11 1597
56c28292
AK
1598 if (!(name = dm_tree_node_get_name(child))) {
1599 stack;
1600 continue;
1601 }
1602
1603 /* Rename? */
1604 if (child->props.new_name) {
bd90c6b2 1605 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1606 child->info.minor, &child->dtree->cookie,
1607 child->udev_flags)) {
56c28292
AK
1608 log_error("Failed to rename %s (%" PRIu32
1609 ":%" PRIu32 ") to %s", name, child->info.major,
1610 child->info.minor, child->props.new_name);
1611 return 0;
1612 }
1613 child->name = child->props.new_name;
1614 child->props.new_name = NULL;
1615 }
1616
1617 if (!child->info.inactive_table && !child->info.suspended)
1618 continue;
1619
bafa2f39 1620 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1621 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1622 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1623 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1624 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1625 child->info.minor);
2ca6b865 1626 r = 0;
56c28292
AK
1627 continue;
1628 }
1629
1630 /* Update cached info */
1631 child->info = newinfo;
1632 }
db208f51
AK
1633 }
1634
165e4a11
AK
1635 handle = NULL;
1636
2ca6b865 1637 return r;
165e4a11
AK
1638}
1639
b4f1578f 1640static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1641{
1642 int r = 0;
1643 struct dm_task *dmt;
1644
1645 log_verbose("Creating %s", dnode->name);
1646
1647 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1648 log_error("Create dm_task creation failed for %s", dnode->name);
1649 return 0;
1650 }
1651
1652 if (!dm_task_set_name(dmt, dnode->name)) {
1653 log_error("Failed to set device name for %s", dnode->name);
1654 goto out;
1655 }
1656
1657 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1658 log_error("Failed to set uuid for %s", dnode->name);
1659 goto out;
1660 }
1661
1662 if (dnode->props.major &&
1663 (!dm_task_set_major(dmt, dnode->props.major) ||
1664 !dm_task_set_minor(dmt, dnode->props.minor))) {
1665 log_error("Failed to set device number for %s creation.", dnode->name);
1666 goto out;
1667 }
1668
1669 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1670 log_error("Failed to set read only flag for %s", dnode->name);
1671 goto out;
1672 }
1673
1674 if (!dm_task_no_open_count(dmt))
1675 log_error("Failed to disable open_count");
1676
1677 if ((r = dm_task_run(dmt)))
1678 r = dm_task_get_info(dmt, &dnode->info);
1679
1680out:
1681 dm_task_destroy(dmt);
1682
1683 return r;
1684}
1685
1686
b4f1578f 1687static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1688{
1689 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1690 log_error("Failed to format %s device number for %s as dm "
1691 "target (%u,%u)",
1692 node->name, node->uuid, node->info.major, node->info.minor);
1693 return 0;
165e4a11
AK
1694 }
1695
1696 return 1;
1697}
1698
ffa9b6a5
ZK
1699/* simplify string emiting code */
1700#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1701do {\
1702 int w;\
1703 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1704 stack; /* Out of space */\
1705 return -1;\
1706 }\
1707 p += w;\
1708} while (0)
ffa9b6a5 1709
3c74075f
JEB
1710/*
1711 * _emit_areas_line
1712 *
1713 * Returns: 1 on success, 0 on failure
1714 */
08f1ddea 1715static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1716 struct load_segment *seg, char *params,
1717 size_t paramsize, int *pos)
165e4a11
AK
1718{
1719 struct seg_area *area;
7d7d93ac 1720 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1721 unsigned first_time = 1;
db3c1ac1 1722 const char *logtype, *synctype;
b262f3e1 1723 unsigned log_parm_count;
165e4a11 1724
2c44337b 1725 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1726 switch (seg->type) {
1727 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1728 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1729 return_0;
1730
b262f3e1
ZK
1731 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1732 if (first_time)
1733 EMIT_PARAMS(*pos, " nolog 0");
1734 else {
1735 /* Remote devices */
1736 log_parm_count = (area->flags &
1737 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1738
1739 if (!area->slog) {
1740 devbuf[0] = 0; /* Only core log parameters */
1741 logtype = "core";
1742 } else {
1743 devbuf[0] = ' '; /* Extra space before device name */
1744 if (!_build_dev_string(devbuf + 1,
1745 sizeof(devbuf) - 1,
1746 area->slog))
1747 return_0;
1748 logtype = "disk";
1749 log_parm_count++; /* Extra sync log device name parameter */
1750 }
1751
1752 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1753 log_parm_count, devbuf, area->region_size);
1754
db3c1ac1
AK
1755 synctype = (area->flags & DM_NOSYNC) ?
1756 " nosync" : (area->flags & DM_FORCESYNC) ?
1757 " sync" : NULL;
b262f3e1 1758
db3c1ac1
AK
1759 if (synctype)
1760 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1761 }
1762 break;
cac52ca4
JEB
1763 case SEG_RAID1:
1764 case SEG_RAID4:
1765 case SEG_RAID5_LA:
1766 case SEG_RAID5_RA:
1767 case SEG_RAID5_LS:
1768 case SEG_RAID5_RS:
1769 case SEG_RAID6_ZR:
1770 case SEG_RAID6_NR:
1771 case SEG_RAID6_NC:
6d04311e
JEB
1772 if (!area->dev_node) {
1773 EMIT_PARAMS(*pos, " -");
1774 break;
1775 }
1776 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1777 return_0;
1778
cac52ca4
JEB
1779 EMIT_PARAMS(*pos, " %s", devbuf);
1780 break;
b262f3e1 1781 default:
6d04311e
JEB
1782 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1783 return_0;
1784
b262f3e1
ZK
1785 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1786 devbuf, area->offset);
1787 }
609faae9
AK
1788
1789 first_time = 0;
165e4a11
AK
1790 }
1791
1792 return 1;
1793}
1794
b262f3e1
ZK
1795static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1796 size_t paramsize, int *pos)
1797{
1798 const struct load_segment *rlog_seg;
1799 struct replicator_site *rsite;
1800 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1801 unsigned parm_count;
1802
1803 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1804 return_0;
1805
1806 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1807 struct load_segment);
1808
1809 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1810 seg->rlog_type, rlogbuf, rlog_seg->size);
1811
1812 dm_list_iterate_items(rsite, &seg->rsites) {
1813 parm_count = (rsite->fall_behind_data
1814 || rsite->fall_behind_ios
1815 || rsite->async_timeout) ? 4 : 2;
1816
1817 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1818 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1819
1820 if (rsite->fall_behind_data)
1821 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1822 else if (rsite->fall_behind_ios)
1823 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1824 else if (rsite->async_timeout)
1825 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1826 }
1827
1828 return 1;
1829}
1830
3c74075f 1831/*
3c74075f
JEB
1832 * Returns: 1 on success, 0 on failure
1833 */
beecb1e1
ZK
1834static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1835 char *params, size_t paramsize)
165e4a11 1836{
8f26e18c
JEB
1837 int block_on_error = 0;
1838 int handle_errors = 0;
1839 int dm_log_userspace = 0;
1840 struct utsname uts;
dbcb64b8 1841 unsigned log_parm_count;
b39fdcf4 1842 int pos = 0, parts;
7d7d93ac 1843 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1844 const char *logtype;
b39fdcf4 1845 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1846
b39fdcf4
MB
1847 if (uname(&uts) == -1) {
1848 log_error("Cannot read kernel release version.");
1849 return 0;
1850 }
1851
1852 /* Kernels with a major number of 2 always had 3 parts. */
1853 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1854 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1855 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1856 return 0;
1857 }
67b25ed4 1858
8f26e18c
JEB
1859 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1860 /*
1861 * Originally, block_on_error was an argument to the log
1862 * portion of the mirror CTR table. It was renamed to
1863 * "handle_errors" and now resides in the 'features'
1864 * section of the mirror CTR table (i.e. at the end).
1865 *
1866 * We can identify whether to use "block_on_error" or
1867 * "handle_errors" by the dm-mirror module's version
1868 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1869 */
ba61f848 1870 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
1871 handle_errors = 1;
1872 else
1873 block_on_error = 1;
1874 }
1875
1876 if (seg->clustered) {
1877 /* Cluster mirrors require a UUID */
1878 if (!seg->uuid)
1879 return_0;
1880
1881 /*
1882 * Cluster mirrors used to have their own log
1883 * types. Now they are accessed through the
1884 * userspace log type.
1885 *
1886 * The dm-log-userspace module was added to the
1887 * 2.6.31 kernel.
1888 */
ba61f848 1889 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
1890 dm_log_userspace = 1;
1891 }
1892
1893 /* Region size */
1894 log_parm_count = 1;
1895
1896 /* [no]sync, block_on_error etc. */
1897 log_parm_count += hweight32(seg->flags);
311d6d81 1898
8f26e18c
JEB
1899 /* "handle_errors" is a feature arg now */
1900 if (handle_errors)
1901 log_parm_count--;
1902
1903 /* DM_CORELOG does not count in the param list */
1904 if (seg->flags & DM_CORELOG)
1905 log_parm_count--;
1906
1907 if (seg->clustered) {
1908 log_parm_count++; /* For UUID */
1909
1910 if (!dm_log_userspace)
ffa9b6a5 1911 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
1912 else
1913 /* For clustered-* type field inserted later */
1914 log_parm_count++;
8f26e18c 1915 }
dbcb64b8 1916
8f26e18c
JEB
1917 if (!seg->log)
1918 logtype = "core";
1919 else {
1920 logtype = "disk";
1921 log_parm_count++;
1922 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1923 return_0;
1924 }
dbcb64b8 1925
8f26e18c
JEB
1926 if (dm_log_userspace)
1927 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1928 log_parm_count, seg->uuid, logtype);
1929 else
ffa9b6a5 1930 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 1931
8f26e18c
JEB
1932 if (seg->log)
1933 EMIT_PARAMS(pos, " %s", logbuf);
1934
1935 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 1936
8f26e18c
JEB
1937 if (seg->clustered && !dm_log_userspace)
1938 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 1939
8f26e18c
JEB
1940 if ((seg->flags & DM_NOSYNC))
1941 EMIT_PARAMS(pos, " nosync");
1942 else if ((seg->flags & DM_FORCESYNC))
1943 EMIT_PARAMS(pos, " sync");
dbcb64b8 1944
8f26e18c
JEB
1945 if (block_on_error)
1946 EMIT_PARAMS(pos, " block_on_error");
1947
1948 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1949
5f3325fc 1950 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 1951 return_0;
dbcb64b8 1952
8f26e18c
JEB
1953 if (handle_errors)
1954 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 1955
3c74075f 1956 return 1;
8f26e18c
JEB
1957}
1958
cac52ca4
JEB
1959static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1960 uint32_t minor, struct load_segment *seg,
1961 uint64_t *seg_start, char *params,
1962 size_t paramsize)
1963{
ad2432dc 1964 uint32_t i;
cac52ca4
JEB
1965 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1966 int pos = 0;
1967
1968 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1969 param_count++;
1970
1971 if (seg->region_size)
1972 param_count += 2;
1973
ad2432dc
MB
1974 /* rebuilds is 64-bit */
1975 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
1976 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 1977
cac52ca4
JEB
1978 if ((seg->type == SEG_RAID1) && seg->stripe_size)
1979 log_error("WARNING: Ignoring RAID1 stripe size");
1980
1981 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
1982 param_count, seg->stripe_size);
1983
1984 if (seg->flags & DM_NOSYNC)
1985 EMIT_PARAMS(pos, " nosync");
1986 else if (seg->flags & DM_FORCESYNC)
1987 EMIT_PARAMS(pos, " sync");
1988
1989 if (seg->region_size)
1990 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
1991
f439e65b
JEB
1992 for (i = 0; i < (seg->area_count / 2); i++)
1993 if (seg->rebuilds & (1 << i))
1994 EMIT_PARAMS(pos, " rebuild %u", i);
1995
cac52ca4
JEB
1996 /* Print number of metadata/data device pairs */
1997 EMIT_PARAMS(pos, " %u", seg->area_count/2);
1998
1999 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2000 return_0;
2001
2002 return 1;
2003}
2004
8f26e18c
JEB
2005static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2006 uint32_t minor, struct load_segment *seg,
2007 uint64_t *seg_start, char *params,
2008 size_t paramsize)
2009{
2010 int pos = 0;
2011 int r;
cac52ca4 2012 int target_type_is_raid = 0;
8f26e18c 2013 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2014 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2015
8f26e18c
JEB
2016 switch(seg->type) {
2017 case SEG_ERROR:
2018 case SEG_ZERO:
2019 case SEG_LINEAR:
2020 break;
2021 case SEG_MIRRORED:
2022 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2023 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2024 if (!r)
2025 return_0;
165e4a11 2026 break;
b262f3e1
ZK
2027 case SEG_REPLICATOR:
2028 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2029 &pos)) <= 0) {
2030 stack;
2031 return r;
2032 }
2033 break;
2034 case SEG_REPLICATOR_DEV:
2035 if (!seg->replicator || !_build_dev_string(originbuf,
2036 sizeof(originbuf),
2037 seg->replicator))
2038 return_0;
2039
2040 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2041 break;
165e4a11 2042 case SEG_SNAPSHOT:
aa6f4e51 2043 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2044 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2045 return_0;
2046 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2047 return_0;
ffa9b6a5
ZK
2048 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2049 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2050 break;
2051 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2052 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2053 return_0;
ffa9b6a5 2054 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2055 break;
2056 case SEG_STRIPED:
609faae9 2057 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2058 break;
12ca060e 2059 case SEG_CRYPT:
609faae9 2060 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2061 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2062 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2063 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2064 seg->iv_offset : *seg_start);
2065 break;
cac52ca4
JEB
2066 case SEG_RAID1:
2067 case SEG_RAID4:
2068 case SEG_RAID5_LA:
2069 case SEG_RAID5_RA:
2070 case SEG_RAID5_LS:
2071 case SEG_RAID5_RS:
2072 case SEG_RAID6_ZR:
2073 case SEG_RAID6_NR:
2074 case SEG_RAID6_NC:
2075 target_type_is_raid = 1;
2076 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2077 params, paramsize);
2078 if (!r)
2079 return_0;
2080
2081 break;
4251236e
ZK
2082 case SEG_THIN_POOL:
2083 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2084 return_0;
2085 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2086 return_0;
2087 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2088 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2089 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2090 break;
2091 case SEG_THIN:
2092 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2093 return_0;
2094 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2095 break;
165e4a11
AK
2096 }
2097
2098 switch(seg->type) {
2099 case SEG_ERROR:
b262f3e1 2100 case SEG_REPLICATOR:
165e4a11
AK
2101 case SEG_SNAPSHOT:
2102 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2103 case SEG_SNAPSHOT_MERGE:
165e4a11 2104 case SEG_ZERO:
4251236e
ZK
2105 case SEG_THIN_POOL:
2106 case SEG_THIN:
165e4a11 2107 break;
12ca060e 2108 case SEG_CRYPT:
165e4a11 2109 case SEG_LINEAR:
b262f3e1 2110 case SEG_REPLICATOR_DEV:
165e4a11
AK
2111 case SEG_STRIPED:
2112 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2113 stack;
2114 return r;
2115 }
b6793963
AK
2116 if (!params[0]) {
2117 log_error("No parameters supplied for %s target "
2118 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2119 major, minor);
b6793963
AK
2120 return 0;
2121 }
165e4a11
AK
2122 break;
2123 }
2124
4b2cae46
AK
2125 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2126 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2127 *seg_start, seg->size, target_type_is_raid ? "raid" :
2128 dm_segtypes[seg->type].target, params);
165e4a11 2129
cac52ca4
JEB
2130 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2131 target_type_is_raid ? "raid" :
2132 dm_segtypes[seg->type].target, params))
b4f1578f 2133 return_0;
165e4a11
AK
2134
2135 *seg_start += seg->size;
2136
2137 return 1;
2138}
2139
ffa9b6a5
ZK
2140#undef EMIT_PARAMS
2141
4b2cae46
AK
2142static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2143 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2144{
2145 char *params;
2146 size_t paramsize = 4096;
2147 int ret;
2148
2149 do {
2150 if (!(params = dm_malloc(paramsize))) {
2151 log_error("Insufficient space for target parameters.");
2152 return 0;
2153 }
2154
12ea7cb1 2155 params[0] = '\0';
4b2cae46
AK
2156 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2157 params, paramsize);
165e4a11
AK
2158 dm_free(params);
2159
2160 if (!ret)
2161 stack;
2162
2163 if (ret >= 0)
2164 return ret;
2165
2166 log_debug("Insufficient space in params[%" PRIsize_t
2167 "] for target parameters.", paramsize);
2168
2169 paramsize *= 2;
2170 } while (paramsize < MAX_TARGET_PARAMSIZE);
2171
2172 log_error("Target parameter size too big. Aborting.");
2173 return 0;
2174}
2175
b4f1578f 2176static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2177{
2178 int r = 0;
2179 struct dm_task *dmt;
2180 struct load_segment *seg;
df390f17 2181 uint64_t seg_start = 0, existing_table_size;
165e4a11 2182
4b2cae46
AK
2183 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2184 dnode->info.major, dnode->info.minor);
165e4a11
AK
2185
2186 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2187 log_error("Reload dm_task creation failed for %s", dnode->name);
2188 return 0;
2189 }
2190
2191 if (!dm_task_set_major(dmt, dnode->info.major) ||
2192 !dm_task_set_minor(dmt, dnode->info.minor)) {
2193 log_error("Failed to set device number for %s reload.", dnode->name);
2194 goto out;
2195 }
2196
2197 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2198 log_error("Failed to set read only flag for %s", dnode->name);
2199 goto out;
2200 }
2201
2202 if (!dm_task_no_open_count(dmt))
2203 log_error("Failed to disable open_count");
2204
2c44337b 2205 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2206 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2207 seg, &seg_start))
b4f1578f 2208 goto_out;
165e4a11 2209
ec289b64
AK
2210 if (!dm_task_suppress_identical_reload(dmt))
2211 log_error("Failed to suppress reload of identical tables.");
2212
2213 if ((r = dm_task_run(dmt))) {
165e4a11 2214 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2215 if (r && !dnode->info.inactive_table)
2216 log_verbose("Suppressed %s identical table reload.",
2217 dnode->name);
bb875bb9 2218
df390f17 2219 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2220 if ((dnode->props.size_changed =
df390f17 2221 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2222 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2223 PRIu64 " for %s", existing_table_size,
bb875bb9 2224 seg_start, dnode->name);
df390f17
AK
2225 /*
2226 * Kernel usually skips size validation on zero-length devices
2227 * now so no need to preload them.
2228 */
2229 /* FIXME In which kernel version did this begin? */
2230 if (!existing_table_size && dnode->props.delay_resume_if_new)
2231 dnode->props.size_changed = 0;
2232 }
ec289b64 2233 }
165e4a11
AK
2234
2235 dnode->props.segment_count = 0;
2236
2237out:
2238 dm_task_destroy(dmt);
2239
2240 return r;
165e4a11
AK
2241}
2242
b4f1578f 2243int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2244 const char *uuid_prefix,
2245 size_t uuid_prefix_len)
165e4a11 2246{
2ca6b865 2247 int r = 1;
165e4a11 2248 void *handle = NULL;
b4f1578f 2249 struct dm_tree_node *child;
165e4a11 2250 struct dm_info newinfo;
566515c0 2251 int update_devs_flag = 0;
165e4a11
AK
2252
2253 /* Preload children first */
b4f1578f 2254 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2255 /* Skip existing non-device-mapper devices */
2256 if (!child->info.exists && child->info.major)
2257 continue;
2258
2259 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2260 if (child->info.exists &&
2261 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2262 continue;
2263
b4f1578f 2264 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2265 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2266 return_0;
165e4a11 2267
165e4a11
AK
2268 /* FIXME Cope if name exists with no uuid? */
2269 if (!child->info.exists) {
2270 if (!_create_node(child)) {
2271 stack;
2272 return 0;
2273 }
2274 }
2275
2276 if (!child->info.inactive_table && child->props.segment_count) {
2277 if (!_load_node(child)) {
2278 stack;
2279 return 0;
2280 }
2281 }
2282
eb91c4ee
MB
2283 /* Propagate device size change change */
2284 if (child->props.size_changed)
2285 dnode->props.size_changed = 1;
2286
bb875bb9 2287 /* Resume device immediately if it has parents and its size changed */
3776c494 2288 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2289 continue;
2290
7707ea90
AK
2291 if (!child->info.inactive_table && !child->info.suspended)
2292 continue;
2293
fc795d87 2294 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2295 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2296 &newinfo, &child->dtree->cookie, child->udev_flags,
2297 child->info.suspended)) {
165e4a11 2298 log_error("Unable to resume %s (%" PRIu32
fc795d87 2299 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2300 child->info.minor);
2ca6b865 2301 r = 0;
165e4a11
AK
2302 continue;
2303 }
2304
2305 /* Update cached info */
2306 child->info = newinfo;
566515c0
PR
2307
2308 /*
2309 * Prepare for immediate synchronization with udev and flush all stacked
2310 * dev node operations if requested by immediate_dev_node property. But
2311 * finish processing current level in the tree first.
2312 */
2313 if (child->props.immediate_dev_node)
2314 update_devs_flag = 1;
165e4a11
AK
2315 }
2316
2317 handle = NULL;
2318
566515c0
PR
2319 if (update_devs_flag) {
2320 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2321 stack;
2322 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2323 }
2324
11f64f0a 2325 if (r && !_node_send_messages(dnode, uuid_prefix, uuid_prefix_len)) {
25e6ab87
ZK
2326 stack;
2327 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
2328 log_error("Failed to deactivate %s", dnode->name);
2329 r = 0;
2330 }
2331
2ca6b865 2332 return r;
165e4a11
AK
2333}
2334
165e4a11
AK
2335/*
2336 * Returns 1 if unsure.
2337 */
b4f1578f 2338int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2339 const char *uuid_prefix,
2340 size_t uuid_prefix_len)
2341{
2342 void *handle = NULL;
b4f1578f 2343 struct dm_tree_node *child = dnode;
165e4a11
AK
2344 const char *uuid;
2345
b4f1578f
AK
2346 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2347 if (!(uuid = dm_tree_node_get_uuid(child))) {
2348 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2349 return 1;
2350 }
2351
87f98002 2352 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2353 return 1;
2354
b4f1578f
AK
2355 if (dm_tree_node_num_children(child, 0))
2356 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2357 }
2358
2359 return 0;
2360}
2361
2362/*
2363 * Target functions
2364 */
b4f1578f 2365static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2366{
2367 struct load_segment *seg;
2368
b4f1578f
AK
2369 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2370 log_error("dtree node segment allocation failed");
165e4a11
AK
2371 return NULL;
2372 }
2373
2374 seg->type = type;
2375 seg->size = size;
2376 seg->area_count = 0;
2c44337b 2377 dm_list_init(&seg->areas);
165e4a11
AK
2378 seg->stripe_size = 0;
2379 seg->persistent = 0;
2380 seg->chunk_size = 0;
2381 seg->cow = NULL;
2382 seg->origin = NULL;
aa6f4e51 2383 seg->merge = NULL;
165e4a11 2384
2c44337b 2385 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2386 dnode->props.segment_count++;
2387
2388 return seg;
2389}
2390
b4f1578f 2391int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2392 uint64_t size,
2393 const char *origin_uuid)
165e4a11
AK
2394{
2395 struct load_segment *seg;
b4f1578f 2396 struct dm_tree_node *origin_node;
165e4a11 2397
b4f1578f
AK
2398 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2399 return_0;
165e4a11 2400
b4f1578f 2401 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2402 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2403 return 0;
2404 }
2405
2406 seg->origin = origin_node;
b4f1578f
AK
2407 if (!_link_tree_nodes(dnode, origin_node))
2408 return_0;
165e4a11 2409
56c28292
AK
2410 /* Resume snapshot origins after new snapshots */
2411 dnode->activation_priority = 1;
2412
165e4a11
AK
2413 return 1;
2414}
2415
aa6f4e51
MS
2416static int _add_snapshot_target(struct dm_tree_node *node,
2417 uint64_t size,
2418 const char *origin_uuid,
2419 const char *cow_uuid,
2420 const char *merge_uuid,
2421 int persistent,
2422 uint32_t chunk_size)
165e4a11
AK
2423{
2424 struct load_segment *seg;
aa6f4e51
MS
2425 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2426 unsigned seg_type;
2427
2428 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2429
aa6f4e51 2430 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2431 return_0;
165e4a11 2432
b4f1578f 2433 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2434 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2435 return 0;
2436 }
2437
2438 seg->origin = origin_node;
b4f1578f
AK
2439 if (!_link_tree_nodes(node, origin_node))
2440 return_0;
165e4a11 2441
b4f1578f 2442 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2443 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2444 return 0;
2445 }
2446
2447 seg->cow = cow_node;
b4f1578f
AK
2448 if (!_link_tree_nodes(node, cow_node))
2449 return_0;
165e4a11
AK
2450
2451 seg->persistent = persistent ? 1 : 0;
2452 seg->chunk_size = chunk_size;
2453
aa6f4e51
MS
2454 if (merge_uuid) {
2455 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2456 /* not a pure error, merging snapshot may have been deactivated */
2457 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2458 } else {
2459 seg->merge = merge_node;
2460 /* must not link merging snapshot, would undermine activation_priority below */
2461 }
2462
2463 /* Resume snapshot-merge (acting origin) after other snapshots */
2464 node->activation_priority = 1;
2465 if (seg->merge) {
2466 /* Resume merging snapshot after snapshot-merge */
2467 seg->merge->activation_priority = 2;
2468 }
2469 }
2470
165e4a11
AK
2471 return 1;
2472}
2473
aa6f4e51
MS
2474
2475int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2476 uint64_t size,
2477 const char *origin_uuid,
2478 const char *cow_uuid,
2479 int persistent,
2480 uint32_t chunk_size)
2481{
2482 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2483 NULL, persistent, chunk_size);
2484}
2485
2486int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2487 uint64_t size,
2488 const char *origin_uuid,
2489 const char *cow_uuid,
2490 const char *merge_uuid,
2491 uint32_t chunk_size)
2492{
2493 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2494 merge_uuid, 1, chunk_size);
2495}
2496
b4f1578f 2497int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2498 uint64_t size)
165e4a11 2499{
b4f1578f
AK
2500 if (!_add_segment(node, SEG_ERROR, size))
2501 return_0;
165e4a11
AK
2502
2503 return 1;
2504}
2505
b4f1578f 2506int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2507 uint64_t size)
165e4a11 2508{
b4f1578f
AK
2509 if (!_add_segment(node, SEG_ZERO, size))
2510 return_0;
165e4a11
AK
2511
2512 return 1;
2513}
2514
b4f1578f 2515int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2516 uint64_t size)
165e4a11 2517{
b4f1578f
AK
2518 if (!_add_segment(node, SEG_LINEAR, size))
2519 return_0;
165e4a11
AK
2520
2521 return 1;
2522}
2523
b4f1578f 2524int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2525 uint64_t size,
2526 uint32_t stripe_size)
165e4a11
AK
2527{
2528 struct load_segment *seg;
2529
b4f1578f
AK
2530 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2531 return_0;
165e4a11
AK
2532
2533 seg->stripe_size = stripe_size;
2534
2535 return 1;
2536}
2537
12ca060e
MB
2538int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2539 uint64_t size,
2540 const char *cipher,
2541 const char *chainmode,
2542 const char *iv,
2543 uint64_t iv_offset,
2544 const char *key)
2545{
2546 struct load_segment *seg;
2547
2548 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2549 return_0;
2550
2551 seg->cipher = cipher;
2552 seg->chainmode = chainmode;
2553 seg->iv = iv;
2554 seg->iv_offset = iv_offset;
2555 seg->key = key;
2556
2557 return 1;
2558}
2559
b4f1578f 2560int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2561 uint32_t region_size,
08e64ce5 2562 unsigned clustered,
165e4a11 2563 const char *log_uuid,
ce7ed2c0
AK
2564 unsigned area_count,
2565 uint32_t flags)
165e4a11 2566{
908db078 2567 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2568 struct load_segment *seg;
2569
2570 if (!node->props.segment_count) {
b8175c33 2571 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2572 return 0;
2573 }
2574
2c44337b 2575 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2576
24b026e3 2577 if (log_uuid) {
67b25ed4
AK
2578 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2579 log_error("log uuid pool_strdup failed");
2580 return 0;
2581 }
df390f17
AK
2582 if ((flags & DM_CORELOG))
2583 /* For pvmove: immediate resume (for size validation) isn't needed. */
2584 node->props.delay_resume_if_new = 1;
2585 else {
9723090c
AK
2586 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2587 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2588 return 0;
2589 }
2590
566515c0
PR
2591 if (clustered)
2592 log_node->props.immediate_dev_node = 1;
2593
0a99713e
AK
2594 /* The kernel validates the size of disk logs. */
2595 /* FIXME Propagate to any devices below */
2596 log_node->props.delay_resume_if_new = 0;
2597
9723090c
AK
2598 if (!_link_tree_nodes(node, log_node))
2599 return_0;
2600 }
165e4a11
AK
2601 }
2602
2603 seg->log = log_node;
165e4a11
AK
2604 seg->region_size = region_size;
2605 seg->clustered = clustered;
2606 seg->mirror_area_count = area_count;
dbcb64b8 2607 seg->flags = flags;
165e4a11
AK
2608
2609 return 1;
2610}
2611
b4f1578f 2612int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2613 uint64_t size)
165e4a11 2614{
cbecd3cd 2615 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2616 return_0;
165e4a11
AK
2617
2618 return 1;
2619}
2620
cac52ca4
JEB
2621int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2622 uint64_t size,
2623 const char *raid_type,
2624 uint32_t region_size,
2625 uint32_t stripe_size,
f439e65b 2626 uint64_t rebuilds,
cac52ca4
JEB
2627 uint64_t reserved2)
2628{
2629 int i;
2630 struct load_segment *seg = NULL;
2631
2632 for (i = 0; dm_segtypes[i].target && !seg; i++)
2633 if (!strcmp(raid_type, dm_segtypes[i].target))
2634 if (!(seg = _add_segment(node,
2635 dm_segtypes[i].type, size)))
2636 return_0;
2637
b2fa9b43
JEB
2638 if (!seg)
2639 return_0;
2640
cac52ca4
JEB
2641 seg->region_size = region_size;
2642 seg->stripe_size = stripe_size;
2643 seg->area_count = 0;
f439e65b 2644 seg->rebuilds = rebuilds;
cac52ca4
JEB
2645
2646 return 1;
2647}
2648
b262f3e1
ZK
2649int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2650 uint64_t size,
2651 const char *rlog_uuid,
2652 const char *rlog_type,
2653 unsigned rsite_index,
2654 dm_replicator_mode_t mode,
2655 uint32_t async_timeout,
2656 uint64_t fall_behind_data,
2657 uint32_t fall_behind_ios)
2658{
2659 struct load_segment *rseg;
2660 struct replicator_site *rsite;
2661
2662 /* Local site0 - adds replicator segment and links rlog device */
2663 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2664 if (node->props.segment_count) {
2665 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2666 return 0;
2667 }
2668
2669 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2670 return_0;
2671
2672 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2673 log_error("Missing replicator log uuid %s.", rlog_uuid);
2674 return 0;
2675 }
2676
2677 if (!_link_tree_nodes(node, rseg->log))
2678 return_0;
2679
2680 if (strcmp(rlog_type, "ringbuffer") != 0) {
2681 log_error("Unsupported replicator log type %s.", rlog_type);
2682 return 0;
2683 }
2684
2685 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2686 return_0;
2687
2688 dm_list_init(&rseg->rsites);
2689 rseg->rdevice_count = 0;
2690 node->activation_priority = 1;
2691 }
2692
2693 /* Add site to segment */
2694 if (mode == DM_REPLICATOR_SYNC
2695 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2696 log_error("Async parameters passed for synchronnous replicator.");
2697 return 0;
2698 }
2699
2700 if (node->props.segment_count != 1) {
2701 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2702 return 0;
2703 }
2704
2705 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2706 if (rseg->type != SEG_REPLICATOR) {
2707 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2708 dm_segtypes[rseg->type].target);
2709 return 0;
2710 }
2711
2712 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2713 log_error("Failed to allocate remote site segment.");
2714 return 0;
2715 }
2716
2717 dm_list_add(&rseg->rsites, &rsite->list);
2718 rseg->rsite_count++;
2719
2720 rsite->mode = mode;
2721 rsite->async_timeout = async_timeout;
2722 rsite->fall_behind_data = fall_behind_data;
2723 rsite->fall_behind_ios = fall_behind_ios;
2724 rsite->rsite_index = rsite_index;
2725
2726 return 1;
2727}
2728
2729/* Appends device node to Replicator */
2730int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2731 uint64_t size,
2732 const char *replicator_uuid,
2733 uint64_t rdevice_index,
2734 const char *rdev_uuid,
2735 unsigned rsite_index,
2736 const char *slog_uuid,
2737 uint32_t slog_flags,
2738 uint32_t slog_region_size)
2739{
2740 struct seg_area *area;
2741 struct load_segment *rseg;
2742 struct load_segment *rep_seg;
2743
2744 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2745 /* Site index for local target */
2746 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2747 return_0;
2748
2749 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2750 log_error("Missing replicator uuid %s.", replicator_uuid);
2751 return 0;
2752 }
2753
2754 /* Local slink0 for replicator must be always initialized first */
2755 if (rseg->replicator->props.segment_count != 1) {
2756 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2757 return 0;
2758 }
2759
2760 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2761 if (rep_seg->type != SEG_REPLICATOR) {
2762 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2763 dm_segtypes[rep_seg->type].target);
2764 return 0;
2765 }
2766 rep_seg->rdevice_count++;
2767
2768 if (!_link_tree_nodes(node, rseg->replicator))
2769 return_0;
2770
2771 rseg->rdevice_index = rdevice_index;
2772 } else {
2773 /* Local slink0 for replicator must be always initialized first */
2774 if (node->props.segment_count != 1) {
2775 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2776 return 0;
2777 }
2778
2779 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2780 if (rseg->type != SEG_REPLICATOR_DEV) {
2781 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2782 dm_segtypes[rseg->type].target);
2783 return 0;
2784 }
2785 }
2786
2787 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2788 log_error("Unspecified sync log uuid.");
2789 return 0;
2790 }
2791
2792 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2793 return_0;
2794
2795 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2796
2797 if (!(slog_flags & DM_CORELOG)) {
2798 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2799 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2800 return 0;
2801 }
2802
2803 if (!_link_tree_nodes(node, area->slog))
2804 return_0;
2805 }
2806
2807 area->flags = slog_flags;
2808 area->region_size = slog_region_size;
2809 area->rsite_index = rsite_index;
2810
2811 return 1;
2812}
2813
5668fe04
ZK
2814static int _thin_validate_device_id(uint32_t device_id)
2815{
2816 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2817 log_error("Device id %u is higher then %u.",
2818 device_id, DM_THIN_MAX_DEVICE_ID);
2819 return 0;
2820 }
2821
2822 return 1;
2823}
2824
4251236e
ZK
2825int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2826 uint64_t size,
e0ea24be 2827 uint64_t transaction_id,
4251236e 2828 const char *metadata_uuid,
5668fd6a 2829 const char *pool_uuid,
4251236e 2830 uint32_t data_block_size,
e9156c2b 2831 uint64_t low_water_mark,
460c5991 2832 unsigned skip_block_zeroing)
4251236e
ZK
2833{
2834 struct load_segment *seg;
2835
3f53c059 2836 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2837 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2838 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2839 return 0;
2840 }
2841
3f53c059 2842 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2843 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2844 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2845 return 0;
2846 }
2847
2848 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2849 return_0;
2850
2851 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2852 log_error("Missing metadata uuid %s.", metadata_uuid);
2853 return 0;
2854 }
2855
2856 if (!_link_tree_nodes(node, seg->metadata))
2857 return_0;
2858
2859 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2860 log_error("Missing pool uuid %s.", pool_uuid);
2861 return 0;
2862 }
2863
2864 if (!_link_tree_nodes(node, seg->pool))
2865 return_0;
2866
e0ea24be 2867 node->props.thin_pool_transaction_id = transaction_id; // compare on resume
e9156c2b 2868 seg->low_water_mark = low_water_mark;
e0ea24be 2869 seg->data_block_size = data_block_size;
460c5991 2870 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
2871 dm_list_init(&seg->thin_messages);
2872
2873 return 1;
2874}
2875
2876int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
7b199dc5 2877 const struct dm_thin_message *message)
25e6ab87
ZK
2878{
2879 struct load_segment *seg;
2880 struct thin_message *tm;
2881
2882 if (node->props.segment_count != 1) {
759b9592 2883 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
2884 return 0;
2885 }
2886
2887 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 2888 if (seg->type != SEG_THIN_POOL) {
759b9592 2889 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
2890 dm_segtypes[seg->type].target);
2891 return 0;
2892 }
2893
2894 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
2895 log_error("Failed to allocate thin message.");
2896 return 0;
2897 }
2898
2899 switch (message->type) {
2900 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 2901 /* If the thin origin is active, it must be suspend first! */
25e6ab87 2902 if (message->u.m_create_snap.device_id == message->u.m_create_snap.origin_id) {
759b9592 2903 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
2904 return 0;
2905 }
2906 if (!_thin_validate_device_id(message->u.m_create_snap.device_id) ||
2907 !_thin_validate_device_id(message->u.m_create_snap.origin_id))
2908 return_0;
2a0d806b 2909 tm->message.u.m_create_snap = message->u.m_create_snap;
25e6ab87
ZK
2910 break;
2911 case DM_THIN_MESSAGE_CREATE_THIN:
2912 if (!_thin_validate_device_id(message->u.m_create_thin.device_id))
2913 return_0;
2a0d806b 2914 tm->message.u.m_create_thin = message->u.m_create_thin;
660a42bc 2915 tm->expected_errno = EEXIST;
25e6ab87
ZK
2916 break;
2917 case DM_THIN_MESSAGE_DELETE:
2918 if (!_thin_validate_device_id(message->u.m_delete.device_id))
2919 return_0;
2a0d806b 2920 tm->message.u.m_delete = message->u.m_delete;
660a42bc 2921 tm->expected_errno = ENODATA;
25e6ab87
ZK
2922 break;
2923 case DM_THIN_MESSAGE_TRIM:
2924 if (!_thin_validate_device_id(message->u.m_trim.device_id))
2925 return_0;
2a0d806b 2926 tm->message.u.m_trim = message->u.m_trim;
25e6ab87
ZK
2927 break;
2928 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
2929 if (message->u.m_set_transaction_id.current_id !=
2930 (message->u.m_set_transaction_id.new_id - 1)) {
2931 log_error("New transaction_id must be sequential.");
2932 return 0; /* FIXME: Maybe too strict here? */
2933 }
2a0d806b 2934 tm->message.u.m_set_transaction_id = message->u.m_set_transaction_id;
25e6ab87
ZK
2935 break;
2936 default:
2937 log_error("Unsupported message type %d.", (int) message->type);
2938 return 0;
2939 }
2940
2941 tm->message.type = message->type;
2942 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
2943
2944 return 1;
2945}
2946
2947int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2948 uint64_t size,
4251236e
ZK
2949 const char *thin_pool_uuid,
2950 uint32_t device_id)
2951{
2952 struct load_segment *seg;
2953
5668fe04
ZK
2954 if (!_thin_validate_device_id(device_id))
2955 return_0;
4251236e
ZK
2956
2957 if (!(seg = _add_segment(node, SEG_THIN, size)))
2958 return_0;
2959
2960 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2961 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2962 return 0;
2963 }
2964
2965 if (!_link_tree_nodes(node, seg->pool))
2966 return_0;
2967
1419bf1c
ZK
2968 seg->device_id = device_id;
2969
4251236e
ZK
2970 return 1;
2971}
2972
b4f1578f 2973static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
2974{
2975 struct seg_area *area;
2976
b4f1578f 2977 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
2978 log_error("Failed to allocate target segment area.");
2979 return 0;
2980 }
2981
2982 area->dev_node = dev_node;
2983 area->offset = offset;
2984
2c44337b 2985 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
2986 seg->area_count++;
2987
2988 return 1;
2989}
2990
b4f1578f 2991int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
2992 const char *dev_name,
2993 const char *uuid,
2994 uint64_t offset)
165e4a11
AK
2995{
2996 struct load_segment *seg;
2997 struct stat info;
b4f1578f 2998 struct dm_tree_node *dev_node;
165e4a11
AK
2999
3000 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3001 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3002 return 0;
3003 }
3004
3005 if (uuid) {
b4f1578f 3006 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3007 log_error("Couldn't find area uuid %s.", uuid);
3008 return 0;
3009 }
b4f1578f
AK
3010 if (!_link_tree_nodes(node, dev_node))
3011 return_0;
165e4a11 3012 } else {
6d04311e 3013 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3014 log_error("Device %s not found.", dev_name);
3015 return 0;
3016 }
3017
40e5fd8b 3018 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3019 log_error("Device %s is not a block device.", dev_name);
3020 return 0;
3021 }
3022
3023 /* FIXME Check correct macro use */
cda69e17
PR
3024 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3025 MINOR(info.st_rdev), 0)))
b4f1578f 3026 return_0;
165e4a11
AK
3027 }
3028
3029 if (!node->props.segment_count) {
b8175c33 3030 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3031 return 0;
3032 }
3033
2c44337b 3034 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3035
b4f1578f
AK
3036 if (!_add_area(node, seg, dev_node, offset))
3037 return_0;
165e4a11
AK
3038
3039 return 1;
db208f51 3040}
bd90c6b2 3041
6d04311e
JEB
3042int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3043{
3044 struct load_segment *seg;
3045
3046 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3047
415c0690
AK
3048 switch (seg->type) {
3049 case SEG_RAID1:
3050 case SEG_RAID4:
3051 case SEG_RAID5_LA:
3052 case SEG_RAID5_RA:
3053 case SEG_RAID5_LS:
3054 case SEG_RAID5_RS:
3055 case SEG_RAID6_ZR:
3056 case SEG_RAID6_NR:
3057 case SEG_RAID6_NC:
3058 break;
3059 default:
3060 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3061 return 0;
3062 }
3063
6d04311e
JEB
3064 if (!_add_area(node, seg, NULL, offset))
3065 return_0;
3066
3067 return 1;
3068}
3069
bd90c6b2
AK
3070void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
3071{
3072 node->dtree->cookie = cookie;
3073}
3074
3075uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
3076{
3077 return node->dtree->cookie;
3078}
This page took 0.484646 seconds and 5 git commands to generate.