]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Drop old check for transaction_id
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
87f98002
AK
27/* FIXME Fix interface so this is used only by LVM */
28#define UUID_PREFIX "LVM-"
29
b262f3e1
ZK
30#define REPLICATOR_LOCAL_SITE 0
31
165e4a11
AK
32/* Supported segment types */
33enum {
12ca060e
MB
34 SEG_CRYPT,
35 SEG_ERROR,
165e4a11
AK
36 SEG_LINEAR,
37 SEG_MIRRORED,
b262f3e1
ZK
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
165e4a11
AK
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 42 SEG_SNAPSHOT_MERGE,
165e4a11
AK
43 SEG_STRIPED,
44 SEG_ZERO,
4251236e
ZK
45 SEG_THIN_POOL,
46 SEG_THIN,
cac52ca4
JEB
47 SEG_RAID1,
48 SEG_RAID4,
49 SEG_RAID5_LA,
50 SEG_RAID5_RA,
51 SEG_RAID5_LS,
52 SEG_RAID5_RS,
53 SEG_RAID6_ZR,
54 SEG_RAID6_NR,
55 SEG_RAID6_NC,
56 SEG_LAST,
165e4a11 57};
b4f1578f 58
165e4a11
AK
59/* FIXME Add crypt and multipath support */
60
61struct {
62 unsigned type;
63 const char *target;
64} dm_segtypes[] = {
12ca060e 65 { SEG_CRYPT, "crypt" },
165e4a11
AK
66 { SEG_ERROR, "error" },
67 { SEG_LINEAR, "linear" },
68 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
69 { SEG_REPLICATOR, "replicator" },
70 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
71 { SEG_SNAPSHOT, "snapshot" },
72 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 73 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
74 { SEG_STRIPED, "striped" },
75 { SEG_ZERO, "zero"},
4251236e
ZK
76 { SEG_THIN_POOL, "thin-pool"},
77 { SEG_THIN, "thin"},
cac52ca4
JEB
78 { SEG_RAID1, "raid1"},
79 { SEG_RAID4, "raid4"},
80 { SEG_RAID5_LA, "raid5_la"},
81 { SEG_RAID5_RA, "raid5_ra"},
82 { SEG_RAID5_LS, "raid5_ls"},
83 { SEG_RAID5_RS, "raid5_rs"},
84 { SEG_RAID6_ZR, "raid6_zr"},
85 { SEG_RAID6_NR, "raid6_nr"},
86 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
87
88 /*
89 *WARNING: Since 'raid' target overloads this 1:1 mapping table
90 * for search do not add new enum elements past them!
91 */
cac52ca4
JEB
92 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
93 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
94 { SEG_LAST, NULL },
165e4a11
AK
95};
96
97/* Some segment types have a list of areas of other devices attached */
98struct seg_area {
2c44337b 99 struct dm_list list;
165e4a11 100
b4f1578f 101 struct dm_tree_node *dev_node;
165e4a11
AK
102
103 uint64_t offset;
b262f3e1
ZK
104
105 unsigned rsite_index; /* Replicator site index */
106 struct dm_tree_node *slog; /* Replicator sync log node */
107 uint64_t region_size; /* Replicator sync log size */
108 uint32_t flags; /* Replicator sync log flags */
109};
110
111/* Replicator-log has a list of sites */
112/* FIXME: maybe move to seg_area too? */
113struct replicator_site {
114 struct dm_list list;
115
116 unsigned rsite_index;
117 dm_replicator_mode_t mode;
118 uint32_t async_timeout;
119 uint32_t fall_behind_ios;
120 uint64_t fall_behind_data;
165e4a11
AK
121};
122
123/* Per-segment properties */
124struct load_segment {
2c44337b 125 struct dm_list list;
165e4a11
AK
126
127 unsigned type;
128
129 uint64_t size;
130
b262f3e1
ZK
131 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
132 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 133
cac52ca4 134 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
135
136 int persistent; /* Snapshot */
137 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
138 struct dm_tree_node *cow; /* Snapshot */
139 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 140 struct dm_tree_node *merge; /* Snapshot */
165e4a11 141
b262f3e1 142 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 143 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
144 unsigned clustered; /* Mirror */
145 unsigned mirror_area_count; /* Mirror */
dbcb64b8 146 uint32_t flags; /* Mirror log */
67b25ed4 147 char *uuid; /* Clustered mirror log */
12ca060e
MB
148
149 const char *cipher; /* Crypt */
150 const char *chainmode; /* Crypt */
151 const char *iv; /* Crypt */
152 uint64_t iv_offset; /* Crypt */
153 const char *key; /* Crypt */
b262f3e1
ZK
154
155 const char *rlog_type; /* Replicator */
156 struct dm_list rsites; /* Replicator */
157 unsigned rsite_count; /* Replicator */
158 unsigned rdevice_count; /* Replicator */
159 struct dm_tree_node *replicator;/* Replicator-dev */
160 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 161
40e5fd8b 162 uint64_t rebuilds; /* raid */
4251236e
ZK
163
164 struct dm_tree_node *metadata; /* Thin_pool */
165 struct dm_tree_node *pool; /* Thin_pool, Thin */
460c5991 166 uint64_t low_water_mark_size; /* Thin_pool */
e0ea24be 167 uint32_t data_block_size; /* Thin_pool */
460c5991 168 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
169 uint32_t device_id; /* Thin */
170
165e4a11
AK
171};
172
173/* Per-device properties */
174struct load_properties {
175 int read_only;
176 uint32_t major;
177 uint32_t minor;
178
52b84409
AK
179 uint32_t read_ahead;
180 uint32_t read_ahead_flags;
181
e0ea24be
ZK
182 uint64_t thin_pool_transaction_id; /* Thin_pool */
183
165e4a11 184 unsigned segment_count;
bb875bb9 185 unsigned size_changed;
2c44337b 186 struct dm_list segs;
165e4a11
AK
187
188 const char *new_name;
566515c0
PR
189
190 /* If immediate_dev_node is set to 1, try to create the dev node
191 * as soon as possible (e.g. in preload stage even during traversal
192 * and processing of dm tree). This will also flush all stacked dev
193 * node operations, synchronizing with udev.
194 */
df390f17
AK
195 unsigned immediate_dev_node;
196
197 /*
198 * If the device size changed from zero and this is set,
199 * don't resume the device immediately, even if the device
200 * has parents. This works provided the parents do not
201 * validate the device size and is required by pvmove to
202 * avoid starting the mirror resync operation too early.
203 */
204 unsigned delay_resume_if_new;
165e4a11
AK
205};
206
207/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 208struct dm_tree_link {
2c44337b 209 struct dm_list list;
b4f1578f 210 struct dm_tree_node *node;
165e4a11
AK
211};
212
b4f1578f
AK
213struct dm_tree_node {
214 struct dm_tree *dtree;
3d0480ed 215
40e5fd8b
AK
216 const char *name;
217 const char *uuid;
218 struct dm_info info;
3d0480ed 219
40e5fd8b
AK
220 struct dm_list uses; /* Nodes this node uses */
221 struct dm_list used_by; /* Nodes that use this node */
165e4a11 222
56c28292
AK
223 int activation_priority; /* 0 gets activated first */
224
f16aea9e
PR
225 uint16_t udev_flags; /* Udev control flags */
226
165e4a11
AK
227 void *context; /* External supplied context */
228
229 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
230
231 /*
232 * If presuspend of child node is needed
233 * Note: only direct child is allowed
234 */
235 struct dm_tree_node *presuspend_node;
3d0480ed
AK
236};
237
b4f1578f 238struct dm_tree {
a3f6b2ce
AK
239 struct dm_pool *mem;
240 struct dm_hash_table *devs;
165e4a11 241 struct dm_hash_table *uuids;
b4f1578f 242 struct dm_tree_node root;
c55b1410 243 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
244 int no_flush; /* 1 sets noflush (mirrors/multipath) */
245 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 246 uint32_t cookie;
3d0480ed
AK
247};
248
b4f1578f 249struct dm_tree *dm_tree_create(void)
3d0480ed 250{
0395dd22 251 struct dm_pool *dmem;
b4f1578f 252 struct dm_tree *dtree;
3d0480ed 253
0395dd22
ZK
254 if (!(dmem = dm_pool_create("dtree", 1024)) ||
255 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
256 log_error("Failed to allocate dtree.");
257 if (dmem)
258 dm_pool_destroy(dmem);
3d0480ed
AK
259 return NULL;
260 }
261
b4f1578f 262 dtree->root.dtree = dtree;
2c44337b
AK
263 dm_list_init(&dtree->root.uses);
264 dm_list_init(&dtree->root.used_by);
c55b1410 265 dtree->skip_lockfs = 0;
b9ffd32c 266 dtree->no_flush = 0;
0395dd22 267 dtree->mem = dmem;
3d0480ed 268
b4f1578f
AK
269 if (!(dtree->devs = dm_hash_create(8))) {
270 log_error("dtree hash creation failed");
271 dm_pool_destroy(dtree->mem);
3d0480ed
AK
272 return NULL;
273 }
274
b4f1578f
AK
275 if (!(dtree->uuids = dm_hash_create(32))) {
276 log_error("dtree uuid hash creation failed");
277 dm_hash_destroy(dtree->devs);
278 dm_pool_destroy(dtree->mem);
165e4a11
AK
279 return NULL;
280 }
281
b4f1578f 282 return dtree;
3d0480ed
AK
283}
284
b4f1578f 285void dm_tree_free(struct dm_tree *dtree)
3d0480ed 286{
b4f1578f 287 if (!dtree)
3d0480ed
AK
288 return;
289
b4f1578f
AK
290 dm_hash_destroy(dtree->uuids);
291 dm_hash_destroy(dtree->devs);
292 dm_pool_destroy(dtree->mem);
3d0480ed
AK
293}
294
04bde319
ZK
295static int _nodes_are_linked(const struct dm_tree_node *parent,
296 const struct dm_tree_node *child)
3d0480ed 297{
b4f1578f 298 struct dm_tree_link *dlink;
3d0480ed 299
2c44337b 300 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
301 if (dlink->node == child)
302 return 1;
3d0480ed
AK
303
304 return 0;
305}
306
2c44337b 307static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 308{
b4f1578f 309 struct dm_tree_link *dlink;
3d0480ed 310
b4f1578f
AK
311 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
312 log_error("dtree link allocation failed");
3d0480ed
AK
313 return 0;
314 }
315
316 dlink->node = node;
2c44337b 317 dm_list_add(list, &dlink->list);
3d0480ed
AK
318
319 return 1;
320}
321
b4f1578f
AK
322static int _link_nodes(struct dm_tree_node *parent,
323 struct dm_tree_node *child)
3d0480ed
AK
324{
325 if (_nodes_are_linked(parent, child))
326 return 1;
327
328 if (!_link(&parent->uses, child))
329 return 0;
330
331 if (!_link(&child->used_by, parent))
332 return 0;
333
334 return 1;
335}
336
2c44337b 337static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 338{
b4f1578f 339 struct dm_tree_link *dlink;
3d0480ed 340
2c44337b 341 dm_list_iterate_items(dlink, list)
3d0480ed 342 if (dlink->node == node) {
2c44337b 343 dm_list_del(&dlink->list);
3d0480ed
AK
344 break;
345 }
3d0480ed
AK
346}
347
b4f1578f
AK
348static void _unlink_nodes(struct dm_tree_node *parent,
349 struct dm_tree_node *child)
3d0480ed
AK
350{
351 if (!_nodes_are_linked(parent, child))
352 return;
353
354 _unlink(&parent->uses, child);
355 _unlink(&child->used_by, parent);
356}
357
b4f1578f 358static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 359{
b4f1578f 360 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
361}
362
b4f1578f 363static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 364{
b1ebf028 365 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
366}
367
b4f1578f 368static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 369{
b4f1578f 370 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
371}
372
b4f1578f 373static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 374{
b1ebf028 375 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
376}
377
b4f1578f 378static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
379{
380 /* Don't link to root node if child already has a parent */
f77736ca 381 if (parent == &parent->dtree->root) {
b4f1578f 382 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
383 return 1;
384 } else
385 _remove_from_toplevel(child);
386
f77736ca 387 if (child == &child->dtree->root) {
b4f1578f 388 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
389 return 1;
390 } else
391 _remove_from_bottomlevel(parent);
392
393 return _link_nodes(parent, child);
394}
395
b4f1578f 396static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
397 const char *name,
398 const char *uuid,
165e4a11 399 struct dm_info *info,
f16aea9e
PR
400 void *context,
401 uint16_t udev_flags)
3d0480ed 402{
b4f1578f 403 struct dm_tree_node *node;
3d0480ed
AK
404 uint64_t dev;
405
b4f1578f
AK
406 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
407 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
408 return NULL;
409 }
410
b4f1578f 411 node->dtree = dtree;
3d0480ed
AK
412
413 node->name = name;
414 node->uuid = uuid;
415 node->info = *info;
165e4a11 416 node->context = context;
f16aea9e 417 node->udev_flags = udev_flags;
56c28292 418 node->activation_priority = 0;
3d0480ed 419
2c44337b
AK
420 dm_list_init(&node->uses);
421 dm_list_init(&node->used_by);
422 dm_list_init(&node->props.segs);
3d0480ed
AK
423
424 dev = MKDEV(info->major, info->minor);
425
b4f1578f 426 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 427 sizeof(dev), node)) {
b4f1578f
AK
428 log_error("dtree node hash insertion failed");
429 dm_pool_free(dtree->mem, node);
3d0480ed
AK
430 return NULL;
431 }
432
165e4a11 433 if (uuid && *uuid &&
b4f1578f
AK
434 !dm_hash_insert(dtree->uuids, uuid, node)) {
435 log_error("dtree uuid hash insertion failed");
436 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 437 sizeof(dev));
b4f1578f 438 dm_pool_free(dtree->mem, node);
165e4a11
AK
439 return NULL;
440 }
441
3d0480ed
AK
442 return node;
443}
444
b4f1578f 445static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
446 uint32_t major, uint32_t minor)
447{
448 uint64_t dev = MKDEV(major, minor);
449
b4f1578f 450 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
451 sizeof(dev));
452}
453
b4f1578f 454static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
455 const char *uuid)
456{
87f98002
AK
457 struct dm_tree_node *node;
458
459 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
460 return node;
461
462 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
463 return NULL;
464
465 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
165e4a11
AK
466}
467
a3f6b2ce 468static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
3d0480ed
AK
469 const char **name, const char **uuid,
470 struct dm_info *info, struct dm_deps **deps)
471{
472 memset(info, 0, sizeof(*info));
473
474 if (!dm_is_dm_major(major)) {
475 *name = "";
476 *uuid = "";
477 *deps = NULL;
478 info->major = major;
479 info->minor = minor;
480 info->exists = 0;
165e4a11
AK
481 info->live_table = 0;
482 info->inactive_table = 0;
483 info->read_only = 0;
3d0480ed
AK
484 return 1;
485 }
486
487 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
488 log_error("deps dm_task creation failed");
489 return 0;
490 }
491
b4f1578f
AK
492 if (!dm_task_set_major(*dmt, major)) {
493 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
494 major, minor);
3d0480ed 495 goto failed;
b4f1578f 496 }
3d0480ed 497
b4f1578f
AK
498 if (!dm_task_set_minor(*dmt, minor)) {
499 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
500 major, minor);
3d0480ed 501 goto failed;
b4f1578f 502 }
3d0480ed 503
b4f1578f
AK
504 if (!dm_task_run(*dmt)) {
505 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
506 major, minor);
3d0480ed 507 goto failed;
b4f1578f 508 }
3d0480ed 509
b4f1578f
AK
510 if (!dm_task_get_info(*dmt, info)) {
511 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
512 major, minor);
3d0480ed 513 goto failed;
b4f1578f 514 }
3d0480ed
AK
515
516 if (!info->exists) {
517 *name = "";
518 *uuid = "";
519 *deps = NULL;
520 } else {
521 if (info->major != major) {
b4f1578f 522 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
523 major, info->major);
524 goto failed;
525 }
526 if (info->minor != minor) {
b4f1578f 527 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
528 minor, info->minor);
529 goto failed;
530 }
a3f6b2ce 531 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
532 log_error("name pool_strdup failed");
533 goto failed;
534 }
a3f6b2ce 535 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
536 log_error("uuid pool_strdup failed");
537 goto failed;
538 }
539 *deps = dm_task_get_deps(*dmt);
540 }
541
542 return 1;
543
544failed:
545 dm_task_destroy(*dmt);
546 return 0;
547}
548
b4f1578f
AK
549static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
550 struct dm_tree_node *parent,
cda69e17
PR
551 uint32_t major, uint32_t minor,
552 uint16_t udev_flags)
3d0480ed
AK
553{
554 struct dm_task *dmt = NULL;
555 struct dm_info info;
556 struct dm_deps *deps = NULL;
557 const char *name = NULL;
558 const char *uuid = NULL;
b4f1578f 559 struct dm_tree_node *node = NULL;
3d0480ed 560 uint32_t i;
3d0480ed
AK
561 int new = 0;
562
563 /* Already in tree? */
b4f1578f
AK
564 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
565 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
566 return_NULL;
3d0480ed 567
f16aea9e 568 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
cda69e17 569 NULL, udev_flags)))
b4f1578f 570 goto_out;
3d0480ed
AK
571 new = 1;
572 }
573
165e4a11
AK
574 if (!_link_tree_nodes(parent, node)) {
575 node = NULL;
b4f1578f 576 goto_out;
165e4a11 577 }
3d0480ed
AK
578
579 /* If node was already in tree, no need to recurse. */
580 if (!new)
165e4a11 581 goto out;
3d0480ed
AK
582
583 /* Can't recurse if not a mapped device or there are no dependencies */
584 if (!node->info.exists || !deps->count) {
b4f1578f
AK
585 if (!_add_to_bottomlevel(node)) {
586 stack;
165e4a11 587 node = NULL;
b4f1578f 588 }
165e4a11 589 goto out;
3d0480ed
AK
590 }
591
592 /* Add dependencies to tree */
593 for (i = 0; i < deps->count; i++)
b4f1578f 594 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
cda69e17 595 MINOR(deps->device[i]), udev_flags)) {
165e4a11 596 node = NULL;
b4f1578f 597 goto_out;
165e4a11 598 }
3d0480ed 599
3d0480ed
AK
600out:
601 if (dmt)
602 dm_task_destroy(dmt);
603
165e4a11
AK
604 return node;
605}
606
b4f1578f 607static int _node_clear_table(struct dm_tree_node *dnode)
165e4a11
AK
608{
609 struct dm_task *dmt;
610 struct dm_info *info;
611 const char *name;
612 int r;
613
614 if (!(info = &dnode->info)) {
b4f1578f 615 log_error("_node_clear_table failed: missing info");
165e4a11
AK
616 return 0;
617 }
618
b4f1578f
AK
619 if (!(name = dm_tree_node_get_name(dnode))) {
620 log_error("_node_clear_table failed: missing name");
165e4a11
AK
621 return 0;
622 }
623
624 /* Is there a table? */
625 if (!info->exists || !info->inactive_table)
626 return 1;
627
10d0d9c7
AK
628// FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
629
165e4a11
AK
630 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
631 name, info->major, info->minor);
632
633 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11
AK
634 log_error("Table clear dm_task creation failed for %s", name);
635 return 0;
636 }
637
638 if (!dm_task_set_major(dmt, info->major) ||
639 !dm_task_set_minor(dmt, info->minor)) {
640 log_error("Failed to set device number for %s table clear", name);
641 dm_task_destroy(dmt);
642 return 0;
643 }
644
645 r = dm_task_run(dmt);
646
647 if (!dm_task_get_info(dmt, info)) {
b4f1578f 648 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
649 r = 0;
650 }
651
652 dm_task_destroy(dmt);
653
3d0480ed
AK
654 return r;
655}
656
b4f1578f 657struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
165e4a11
AK
658 const char *name,
659 const char *uuid,
660 uint32_t major, uint32_t minor,
661 int read_only,
662 int clear_inactive,
663 void *context)
664{
b4f1578f 665 struct dm_tree_node *dnode;
165e4a11
AK
666 struct dm_info info;
667 const char *name2;
668 const char *uuid2;
669
670 /* Do we need to add node to tree? */
b4f1578f
AK
671 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
672 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
673 log_error("name pool_strdup failed");
674 return NULL;
675 }
b4f1578f 676 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
165e4a11
AK
677 log_error("uuid pool_strdup failed");
678 return NULL;
679 }
680
681 info.major = 0;
682 info.minor = 0;
683 info.exists = 0;
684 info.live_table = 0;
685 info.inactive_table = 0;
686 info.read_only = 0;
687
f16aea9e
PR
688 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
689 context, 0)))
b4f1578f 690 return_NULL;
165e4a11
AK
691
692 /* Attach to root node until a table is supplied */
b4f1578f
AK
693 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
694 return_NULL;
165e4a11
AK
695
696 dnode->props.major = major;
697 dnode->props.minor = minor;
698 dnode->props.new_name = NULL;
bb875bb9 699 dnode->props.size_changed = 0;
165e4a11
AK
700 } else if (strcmp(name, dnode->name)) {
701 /* Do we need to rename node? */
b4f1578f 702 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
703 log_error("name pool_strdup failed");
704 return 0;
705 }
706 }
707
708 dnode->props.read_only = read_only ? 1 : 0;
52b84409
AK
709 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
710 dnode->props.read_ahead_flags = 0;
165e4a11 711
b4f1578f
AK
712 if (clear_inactive && !_node_clear_table(dnode))
713 return_NULL;
165e4a11
AK
714
715 dnode->context = context;
f16aea9e 716 dnode->udev_flags = 0;
165e4a11
AK
717
718 return dnode;
719}
720
f16aea9e
PR
721struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
722 const char *name,
723 const char *uuid,
724 uint32_t major,
725 uint32_t minor,
726 int read_only,
727 int clear_inactive,
728 void *context,
729 uint16_t udev_flags)
730{
731 struct dm_tree_node *node;
732
733 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
734 clear_inactive, context)))
735 node->udev_flags = udev_flags;
736
737 return node;
738}
739
83c606ae
JEB
740void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
741
742{
743 struct dm_info *dinfo = &dnode->info;
744
745 if (udev_flags != dnode->udev_flags)
746 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
747 ") udev_flags from 0x%x to 0x%x",
748 dnode->name, dinfo->major, dinfo->minor,
749 dnode->udev_flags, udev_flags);
750 dnode->udev_flags = udev_flags;
751}
f16aea9e 752
52b84409
AK
753void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
754 uint32_t read_ahead,
755 uint32_t read_ahead_flags)
08e64ce5 756{
52b84409
AK
757 dnode->props.read_ahead = read_ahead;
758 dnode->props.read_ahead_flags = read_ahead_flags;
759}
760
76d1aec8
ZK
761void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
762 struct dm_tree_node *presuspend_node)
763{
764 node->presuspend_node = presuspend_node;
765}
766
b4f1578f 767int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
3d0480ed 768{
cda69e17
PR
769 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
770}
771
772int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
773 uint32_t minor, uint16_t udev_flags)
774{
775 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
3d0480ed
AK
776}
777
04bde319 778const char *dm_tree_node_get_name(const struct dm_tree_node *node)
3d0480ed
AK
779{
780 return node->info.exists ? node->name : "";
781}
782
04bde319 783const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
3d0480ed
AK
784{
785 return node->info.exists ? node->uuid : "";
786}
787
04bde319 788const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
3d0480ed
AK
789{
790 return &node->info;
791}
792
04bde319 793void *dm_tree_node_get_context(const struct dm_tree_node *node)
165e4a11
AK
794{
795 return node->context;
796}
797
04bde319 798int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
eb91c4ee
MB
799{
800 return dnode->props.size_changed;
801}
802
04bde319 803int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
3d0480ed
AK
804{
805 if (inverted) {
b4f1578f 806 if (_nodes_are_linked(&node->dtree->root, node))
3d0480ed 807 return 0;
2c44337b 808 return dm_list_size(&node->used_by);
3d0480ed
AK
809 }
810
b4f1578f 811 if (_nodes_are_linked(node, &node->dtree->root))
3d0480ed
AK
812 return 0;
813
2c44337b 814 return dm_list_size(&node->uses);
3d0480ed
AK
815}
816
2b69db1f
AK
817/*
818 * Returns 1 if no prefix supplied
819 */
820static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
821{
822 if (!uuid_prefix)
823 return 1;
824
825 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
826 return 1;
827
828 /* Handle transition: active device uuids might be missing the prefix */
829 if (uuid_prefix_len <= 4)
830 return 0;
831
87f98002 832 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
872dea04
AK
833 return 0;
834
87f98002 835 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
2b69db1f
AK
836 return 0;
837
87f98002 838 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
2b69db1f
AK
839 return 1;
840
841 return 0;
842}
843
690a5da2
AK
844/*
845 * Returns 1 if no children.
846 */
b4f1578f 847static int _children_suspended(struct dm_tree_node *node,
690a5da2
AK
848 uint32_t inverted,
849 const char *uuid_prefix,
850 size_t uuid_prefix_len)
851{
2c44337b 852 struct dm_list *list;
b4f1578f 853 struct dm_tree_link *dlink;
690a5da2
AK
854 const struct dm_info *dinfo;
855 const char *uuid;
856
857 if (inverted) {
b4f1578f 858 if (_nodes_are_linked(&node->dtree->root, node))
690a5da2
AK
859 return 1;
860 list = &node->used_by;
861 } else {
b4f1578f 862 if (_nodes_are_linked(node, &node->dtree->root))
690a5da2
AK
863 return 1;
864 list = &node->uses;
865 }
866
2c44337b 867 dm_list_iterate_items(dlink, list) {
b4f1578f 868 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
690a5da2
AK
869 stack;
870 continue;
871 }
872
873 /* Ignore if it doesn't belong to this VG */
2b69db1f 874 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2
AK
875 continue;
876
76d1aec8
ZK
877 /* Ignore if parent node wants to presuspend this node */
878 if (dlink->node->presuspend_node == node)
879 continue;
880
b4f1578f
AK
881 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
882 stack; /* FIXME Is this normal? */
690a5da2
AK
883 return 0;
884 }
885
886 if (!dinfo->suspended)
887 return 0;
888 }
889
890 return 1;
891}
892
3d0480ed
AK
893/*
894 * Set major and minor to zero for root of tree.
895 */
b4f1578f 896struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
3d0480ed
AK
897 uint32_t major,
898 uint32_t minor)
899{
900 if (!major && !minor)
b4f1578f 901 return &dtree->root;
3d0480ed 902
b4f1578f 903 return _find_dm_tree_node(dtree, major, minor);
3d0480ed
AK
904}
905
165e4a11
AK
906/*
907 * Set uuid to NULL for root of tree.
908 */
b4f1578f 909struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
910 const char *uuid)
911{
912 if (!uuid || !*uuid)
b4f1578f 913 return &dtree->root;
165e4a11 914
b4f1578f 915 return _find_dm_tree_node_by_uuid(dtree, uuid);
165e4a11
AK
916}
917
3d0480ed
AK
918/*
919 * First time set *handle to NULL.
920 * Set inverted to invert the tree.
921 */
b4f1578f 922struct dm_tree_node *dm_tree_next_child(void **handle,
04bde319
ZK
923 const struct dm_tree_node *parent,
924 uint32_t inverted)
3d0480ed 925{
2c44337b 926 struct dm_list **dlink = (struct dm_list **) handle;
04bde319 927 const struct dm_list *use_list;
3d0480ed
AK
928
929 if (inverted)
930 use_list = &parent->used_by;
931 else
932 use_list = &parent->uses;
933
934 if (!*dlink)
2c44337b 935 *dlink = dm_list_first(use_list);
3d0480ed 936 else
2c44337b 937 *dlink = dm_list_next(use_list, *dlink);
3d0480ed 938
2c44337b 939 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
3d0480ed
AK
940}
941
3e8c6b73 942/*
a6d97ede 943 * Deactivate a device with its dependencies if the uuid prefix matches.
3e8c6b73 944 */
db208f51
AK
945static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
946 struct dm_info *info)
3e8c6b73
AK
947{
948 struct dm_task *dmt;
949 int r;
950
951 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
952 log_error("_info_by_dev: dm_task creation failed");
953 return 0;
954 }
955
956 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
957 log_error("_info_by_dev: Failed to set device number");
958 dm_task_destroy(dmt);
959 return 0;
960 }
961
db208f51
AK
962 if (!with_open_count && !dm_task_no_open_count(dmt))
963 log_error("Failed to disable open_count");
964
3e8c6b73
AK
965 if ((r = dm_task_run(dmt)))
966 r = dm_task_get_info(dmt, info);
967
968 dm_task_destroy(dmt);
969
970 return r;
971}
972
125712be
PR
973static int _check_device_not_in_use(struct dm_info *info)
974{
975 if (!info->exists)
976 return 1;
977
978 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
979 if (!*dm_sysfs_dir()) {
980 if (info->open_count) {
981 log_error("Device %" PRIu32 ":%" PRIu32 " in use",
982 info->major, info->minor);
983 return 0;
984 }
985
986 return 1;
987 }
125712be
PR
988
989 if (dm_device_has_holders(info->major, info->minor)) {
990 log_error("Device %" PRIu32 ":%" PRIu32 " is used "
991 "by another device.", info->major, info->minor);
992 return 0;
993 }
994
995 if (dm_device_has_mounted_fs(info->major, info->minor)) {
996 log_error("Device %" PRIu32 ":%" PRIu32 " contains "
997 "a filesystem in use.", info->major, info->minor);
998 return 0;
999 }
1000
1001 return 1;
1002}
1003
f3ef15ef
ZK
1004/* Check if all parent nodes of given node have open_count == 0 */
1005static int _node_has_closed_parents(struct dm_tree_node *node,
1006 const char *uuid_prefix,
1007 size_t uuid_prefix_len)
1008{
1009 struct dm_tree_link *dlink;
1010 const struct dm_info *dinfo;
1011 struct dm_info info;
1012 const char *uuid;
1013
1014 /* Iterate through parents of this node */
1015 dm_list_iterate_items(dlink, &node->used_by) {
1016 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1017 stack;
1018 continue;
1019 }
1020
1021 /* Ignore if it doesn't belong to this VG */
1022 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1023 continue;
1024
1025 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1026 stack; /* FIXME Is this normal? */
1027 return 0;
1028 }
1029
1030 /* Refresh open_count */
1031 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1032 !info.exists)
1033 continue;
1034
eb418883
ZK
1035 if (info.open_count) {
1036 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1037 dinfo->major, dinfo->minor, info.open_count);
f3ef15ef 1038 return 0;
eb418883 1039 }
f3ef15ef
ZK
1040 }
1041
1042 return 1;
1043}
1044
f16aea9e 1045static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
787200ef 1046 uint32_t *cookie, uint16_t udev_flags, int retry)
3e8c6b73
AK
1047{
1048 struct dm_task *dmt;
bd90c6b2 1049 int r = 0;
3e8c6b73
AK
1050
1051 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1052
1053 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1054 log_error("Deactivation dm_task creation failed for %s", name);
1055 return 0;
1056 }
1057
1058 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1059 log_error("Failed to set device number for %s deactivation", name);
bd90c6b2 1060 goto out;
3e8c6b73
AK
1061 }
1062
1063 if (!dm_task_no_open_count(dmt))
1064 log_error("Failed to disable open_count");
1065
f16aea9e 1066 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1067 goto out;
1068
787200ef
PR
1069
1070 if (retry)
1071 dm_task_retry_remove(dmt);
1072
3e8c6b73
AK
1073 r = dm_task_run(dmt);
1074
0437bccc
AK
1075 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1076 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
9032898e 1077 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
165e4a11 1078
db208f51
AK
1079 /* FIXME Remove node from tree or mark invalid? */
1080
bd90c6b2 1081out:
db208f51
AK
1082 dm_task_destroy(dmt);
1083
1084 return r;
1085}
1086
bd90c6b2 1087static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1088 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1089{
1090 struct dm_task *dmt;
1091 int r = 0;
1092
1093 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1094
1095 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1096 log_error("Rename dm_task creation failed for %s", old_name);
1097 return 0;
1098 }
1099
1100 if (!dm_task_set_name(dmt, old_name)) {
1101 log_error("Failed to set name for %s rename.", old_name);
1102 goto out;
1103 }
1104
b4f1578f 1105 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1106 goto_out;
165e4a11
AK
1107
1108 if (!dm_task_no_open_count(dmt))
1109 log_error("Failed to disable open_count");
1110
f16aea9e 1111 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1112 goto out;
1113
165e4a11
AK
1114 r = dm_task_run(dmt);
1115
1116out:
1117 dm_task_destroy(dmt);
1118
1119 return r;
1120}
1121
165e4a11
AK
1122/* FIXME Merge with _suspend_node? */
1123static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1124 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1125 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1126 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1127{
1128 struct dm_task *dmt;
bd90c6b2 1129 int r = 0;
165e4a11
AK
1130
1131 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1132
1133 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1134 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1135 return 0;
1136 }
1137
0b7d16bc
AK
1138 /* FIXME Kernel should fill in name on return instead */
1139 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1140 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1141 goto out;
0b7d16bc
AK
1142 }
1143
165e4a11
AK
1144 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1145 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1146 goto out;
165e4a11
AK
1147 }
1148
1149 if (!dm_task_no_open_count(dmt))
1150 log_error("Failed to disable open_count");
1151
52b84409
AK
1152 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1153 log_error("Failed to set read ahead");
1154
f16aea9e 1155 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1156 goto_out;
bd90c6b2 1157
9a8f192a
ZK
1158 if (!(r = dm_task_run(dmt)))
1159 goto_out;
1160
1161 if (already_suspended)
1162 dec_suspended();
1163
1164 if (!(r = dm_task_get_info(dmt, newinfo)))
1165 stack;
165e4a11 1166
bd90c6b2 1167out:
165e4a11
AK
1168 dm_task_destroy(dmt);
1169
1170 return r;
1171}
1172
db208f51 1173static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1174 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1175{
1176 struct dm_task *dmt;
1177 int r;
1178
b9ffd32c
AK
1179 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1180 name, major, minor,
1181 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1182 no_flush ? "" : " with device flush");
db208f51
AK
1183
1184 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1185 log_error("Suspend dm_task creation failed for %s", name);
1186 return 0;
1187 }
1188
1189 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1190 log_error("Failed to set device number for %s suspension.", name);
1191 dm_task_destroy(dmt);
1192 return 0;
1193 }
1194
1195 if (!dm_task_no_open_count(dmt))
1196 log_error("Failed to disable open_count");
1197
c55b1410
AK
1198 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1199 log_error("Failed to set skip_lockfs flag.");
1200
b9ffd32c
AK
1201 if (no_flush && !dm_task_no_flush(dmt))
1202 log_error("Failed to set no_flush flag.");
1203
1840aa09
AK
1204 if ((r = dm_task_run(dmt))) {
1205 inc_suspended();
db208f51 1206 r = dm_task_get_info(dmt, newinfo);
1840aa09 1207 }
db208f51 1208
3e8c6b73
AK
1209 dm_task_destroy(dmt);
1210
1211 return r;
1212}
1213
e0ea24be
ZK
1214static int _check_thin_pool_transaction_id(const char *name, uint32_t major, uint32_t minor,
1215 uint64_t transaction_id)
1216{
1217 struct dm_task *dmt;
1218 int r = 0;
1219 uint64_t start, length;
1220 char *type = NULL;
1221 char *params = NULL;
1222 uint64_t t_id = transaction_id; // FIXME: fake
1223
1224 log_verbose("Checking transaction id %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1225
1226 if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) {
1227 log_debug("Device status dm_task creation failed for %s.", name);
1228 return 0;
1229 }
1230
1231 if (!dm_task_set_name(dmt, name)) {
1232 log_debug("Failed to set device name for %s status.", name);
1233 goto out;
1234 }
1235
1236 if (!dm_task_set_major_minor(dmt, major, minor, 1)) {
1237 log_error("Failed to set device number for %s status.", name);
1238 goto out;
1239 }
1240
1241 if (!dm_task_no_open_count(dmt))
1242 log_error("Failed to disable open_count");
1243
1244 if (!(r = dm_task_run(dmt)))
1245 goto_out;
1246
1247 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1248 log_verbose("PARSE params %s", params); // FIXME: parse status
1249
1250 r = (transaction_id == t_id);
1251
1252out:
1253 dm_task_destroy(dmt);
1254
1255 return r;
1256}
1257
18e0f934
AK
1258/*
1259 * FIXME Don't attempt to deactivate known internal dependencies.
1260 */
1261static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1262 const char *uuid_prefix,
1263 size_t uuid_prefix_len,
1264 unsigned level)
3e8c6b73 1265{
b7eb2ad0 1266 int r = 1;
3e8c6b73 1267 void *handle = NULL;
b4f1578f 1268 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1269 struct dm_info info;
1270 const struct dm_info *dinfo;
1271 const char *name;
1272 const char *uuid;
1273
b4f1578f
AK
1274 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1275 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1276 stack;
1277 continue;
1278 }
1279
b4f1578f 1280 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1281 stack;
1282 continue;
1283 }
1284
b4f1578f 1285 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1286 stack;
1287 continue;
1288 }
1289
1290 /* Ignore if it doesn't belong to this VG */
2b69db1f 1291 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1292 continue;
3e8c6b73
AK
1293
1294 /* Refresh open_count */
db208f51 1295 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
f55021f4 1296 !info.exists)
3e8c6b73
AK
1297 continue;
1298
125712be
PR
1299 if (!_check_device_not_in_use(&info))
1300 continue;
1301
f3ef15ef 1302 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1303 if ((child->presuspend_node &&
f3ef15ef
ZK
1304 !_node_has_closed_parents(child->presuspend_node,
1305 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1306 /* Only report error from (likely non-internal) dependency at top level */
1307 if (!level) {
1308 log_error("Unable to deactivate open %s (%" PRIu32
1309 ":%" PRIu32 ")", name, info.major,
1310 info.minor);
1311 r = 0;
1312 }
f55021f4
AK
1313 continue;
1314 }
1315
76d1aec8
ZK
1316 /* Suspend child node first if requested */
1317 if (child->presuspend_node &&
1318 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1319 continue;
1320
f16aea9e 1321 if (!_deactivate_node(name, info.major, info.minor,
787200ef
PR
1322 &child->dtree->cookie, child->udev_flags,
1323 child->dtree->retry_remove)) {
3e8c6b73
AK
1324 log_error("Unable to deactivate %s (%" PRIu32
1325 ":%" PRIu32 ")", name, info.major,
1326 info.minor);
b7eb2ad0 1327 r = 0;
3e8c6b73 1328 continue;
f4249251
AK
1329 } else if (info.suspended)
1330 dec_suspended();
3e8c6b73 1331
18e0f934
AK
1332 if (dm_tree_node_num_children(child, 0)) {
1333 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1334 return_0;
18e0f934 1335 }
3e8c6b73
AK
1336 }
1337
b7eb2ad0 1338 return r;
3e8c6b73 1339}
db208f51 1340
18e0f934
AK
1341int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1342 const char *uuid_prefix,
1343 size_t uuid_prefix_len)
1344{
1345 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1346}
1347
c55b1410
AK
1348void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1349{
1350 dnode->dtree->skip_lockfs = 1;
1351}
1352
b9ffd32c
AK
1353void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1354{
1355 dnode->dtree->no_flush = 1;
1356}
1357
787200ef
PR
1358void dm_tree_retry_remove(struct dm_tree_node *dnode)
1359{
1360 dnode->dtree->retry_remove = 1;
1361}
1362
b4f1578f 1363int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1364 const char *uuid_prefix,
1365 size_t uuid_prefix_len)
db208f51 1366{
68085c93 1367 int r = 1;
db208f51 1368 void *handle = NULL;
b4f1578f 1369 struct dm_tree_node *child = dnode;
db208f51
AK
1370 struct dm_info info, newinfo;
1371 const struct dm_info *dinfo;
1372 const char *name;
1373 const char *uuid;
1374
690a5da2 1375 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1376 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1377 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1378 stack;
1379 continue;
1380 }
1381
b4f1578f 1382 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1383 stack;
1384 continue;
1385 }
1386
b4f1578f 1387 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1388 stack;
1389 continue;
1390 }
1391
1392 /* Ignore if it doesn't belong to this VG */
2b69db1f 1393 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1394 continue;
1395
690a5da2
AK
1396 /* Ensure immediate parents are already suspended */
1397 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1398 continue;
1399
db208f51 1400 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
b700541f 1401 !info.exists || info.suspended)
db208f51
AK
1402 continue;
1403
c55b1410 1404 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1405 child->dtree->skip_lockfs,
1406 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1407 log_error("Unable to suspend %s (%" PRIu32
1408 ":%" PRIu32 ")", name, info.major,
1409 info.minor);
68085c93 1410 r = 0;
db208f51
AK
1411 continue;
1412 }
1413
1414 /* Update cached info */
1415 child->info = newinfo;
690a5da2
AK
1416 }
1417
1418 /* Then suspend any child nodes */
1419 handle = NULL;
1420
b4f1578f
AK
1421 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1422 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1423 stack;
1424 continue;
1425 }
1426
1427 /* Ignore if it doesn't belong to this VG */
87f98002 1428 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1429 continue;
db208f51 1430
b4f1578f 1431 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1432 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1433 return_0;
db208f51
AK
1434 }
1435
68085c93 1436 return r;
db208f51
AK
1437}
1438
b4f1578f 1439int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1440 const char *uuid_prefix,
1441 size_t uuid_prefix_len)
1442{
2ca6b865 1443 int r = 1;
db208f51 1444 void *handle = NULL;
b4f1578f 1445 struct dm_tree_node *child = dnode;
165e4a11
AK
1446 struct dm_info newinfo;
1447 const char *name;
db208f51 1448 const char *uuid;
56c28292 1449 int priority;
db208f51 1450
165e4a11 1451 /* Activate children first */
b4f1578f
AK
1452 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1453 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1454 stack;
1455 continue;
db208f51
AK
1456 }
1457
908db078
AK
1458 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1459 continue;
db208f51 1460
b4f1578f 1461 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1462 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1463 return_0;
56c28292 1464 }
165e4a11 1465
56c28292 1466 handle = NULL;
165e4a11 1467
aa6f4e51 1468 for (priority = 0; priority < 3; priority++) {
56c28292 1469 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1470 if (priority != child->activation_priority)
1471 continue;
1472
56c28292
AK
1473 if (!(uuid = dm_tree_node_get_uuid(child))) {
1474 stack;
1475 continue;
165e4a11 1476 }
165e4a11 1477
56c28292
AK
1478 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1479 continue;
165e4a11 1480
56c28292
AK
1481 if (!(name = dm_tree_node_get_name(child))) {
1482 stack;
1483 continue;
1484 }
1485
1486 /* Rename? */
1487 if (child->props.new_name) {
bd90c6b2 1488 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1489 child->info.minor, &child->dtree->cookie,
1490 child->udev_flags)) {
56c28292
AK
1491 log_error("Failed to rename %s (%" PRIu32
1492 ":%" PRIu32 ") to %s", name, child->info.major,
1493 child->info.minor, child->props.new_name);
1494 return 0;
1495 }
1496 child->name = child->props.new_name;
1497 child->props.new_name = NULL;
1498 }
1499
1500 if (!child->info.inactive_table && !child->info.suspended)
1501 continue;
1502
bafa2f39 1503 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1504 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1505 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1506 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1507 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1508 child->info.minor);
2ca6b865 1509 r = 0;
56c28292
AK
1510 continue;
1511 }
1512
1513 /* Update cached info */
1514 child->info = newinfo;
1515 }
db208f51
AK
1516 }
1517
165e4a11
AK
1518 handle = NULL;
1519
2ca6b865 1520 return r;
165e4a11
AK
1521}
1522
b4f1578f 1523static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1524{
1525 int r = 0;
1526 struct dm_task *dmt;
1527
1528 log_verbose("Creating %s", dnode->name);
1529
1530 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1531 log_error("Create dm_task creation failed for %s", dnode->name);
1532 return 0;
1533 }
1534
1535 if (!dm_task_set_name(dmt, dnode->name)) {
1536 log_error("Failed to set device name for %s", dnode->name);
1537 goto out;
1538 }
1539
1540 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1541 log_error("Failed to set uuid for %s", dnode->name);
1542 goto out;
1543 }
1544
1545 if (dnode->props.major &&
1546 (!dm_task_set_major(dmt, dnode->props.major) ||
1547 !dm_task_set_minor(dmt, dnode->props.minor))) {
1548 log_error("Failed to set device number for %s creation.", dnode->name);
1549 goto out;
1550 }
1551
1552 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1553 log_error("Failed to set read only flag for %s", dnode->name);
1554 goto out;
1555 }
1556
1557 if (!dm_task_no_open_count(dmt))
1558 log_error("Failed to disable open_count");
1559
1560 if ((r = dm_task_run(dmt)))
1561 r = dm_task_get_info(dmt, &dnode->info);
1562
1563out:
1564 dm_task_destroy(dmt);
1565
1566 return r;
1567}
1568
1569
b4f1578f 1570static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1571{
1572 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1573 log_error("Failed to format %s device number for %s as dm "
1574 "target (%u,%u)",
1575 node->name, node->uuid, node->info.major, node->info.minor);
1576 return 0;
165e4a11
AK
1577 }
1578
1579 return 1;
1580}
1581
ffa9b6a5
ZK
1582/* simplify string emiting code */
1583#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1584do {\
1585 int w;\
1586 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1587 stack; /* Out of space */\
1588 return -1;\
1589 }\
1590 p += w;\
1591} while (0)
ffa9b6a5 1592
3c74075f
JEB
1593/*
1594 * _emit_areas_line
1595 *
1596 * Returns: 1 on success, 0 on failure
1597 */
08f1ddea 1598static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1599 struct load_segment *seg, char *params,
1600 size_t paramsize, int *pos)
165e4a11
AK
1601{
1602 struct seg_area *area;
7d7d93ac 1603 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1604 unsigned first_time = 1;
db3c1ac1 1605 const char *logtype, *synctype;
b262f3e1 1606 unsigned log_parm_count;
165e4a11 1607
2c44337b 1608 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1609 switch (seg->type) {
1610 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1611 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1612 return_0;
1613
b262f3e1
ZK
1614 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1615 if (first_time)
1616 EMIT_PARAMS(*pos, " nolog 0");
1617 else {
1618 /* Remote devices */
1619 log_parm_count = (area->flags &
1620 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1621
1622 if (!area->slog) {
1623 devbuf[0] = 0; /* Only core log parameters */
1624 logtype = "core";
1625 } else {
1626 devbuf[0] = ' '; /* Extra space before device name */
1627 if (!_build_dev_string(devbuf + 1,
1628 sizeof(devbuf) - 1,
1629 area->slog))
1630 return_0;
1631 logtype = "disk";
1632 log_parm_count++; /* Extra sync log device name parameter */
1633 }
1634
1635 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1636 log_parm_count, devbuf, area->region_size);
1637
db3c1ac1
AK
1638 synctype = (area->flags & DM_NOSYNC) ?
1639 " nosync" : (area->flags & DM_FORCESYNC) ?
1640 " sync" : NULL;
b262f3e1 1641
db3c1ac1
AK
1642 if (synctype)
1643 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1644 }
1645 break;
cac52ca4
JEB
1646 case SEG_RAID1:
1647 case SEG_RAID4:
1648 case SEG_RAID5_LA:
1649 case SEG_RAID5_RA:
1650 case SEG_RAID5_LS:
1651 case SEG_RAID5_RS:
1652 case SEG_RAID6_ZR:
1653 case SEG_RAID6_NR:
1654 case SEG_RAID6_NC:
6d04311e
JEB
1655 if (!area->dev_node) {
1656 EMIT_PARAMS(*pos, " -");
1657 break;
1658 }
1659 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1660 return_0;
1661
cac52ca4
JEB
1662 EMIT_PARAMS(*pos, " %s", devbuf);
1663 break;
b262f3e1 1664 default:
6d04311e
JEB
1665 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1666 return_0;
1667
b262f3e1
ZK
1668 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1669 devbuf, area->offset);
1670 }
609faae9
AK
1671
1672 first_time = 0;
165e4a11
AK
1673 }
1674
1675 return 1;
1676}
1677
b262f3e1
ZK
1678static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1679 size_t paramsize, int *pos)
1680{
1681 const struct load_segment *rlog_seg;
1682 struct replicator_site *rsite;
1683 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1684 unsigned parm_count;
1685
1686 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1687 return_0;
1688
1689 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1690 struct load_segment);
1691
1692 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1693 seg->rlog_type, rlogbuf, rlog_seg->size);
1694
1695 dm_list_iterate_items(rsite, &seg->rsites) {
1696 parm_count = (rsite->fall_behind_data
1697 || rsite->fall_behind_ios
1698 || rsite->async_timeout) ? 4 : 2;
1699
1700 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1701 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1702
1703 if (rsite->fall_behind_data)
1704 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1705 else if (rsite->fall_behind_ios)
1706 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1707 else if (rsite->async_timeout)
1708 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1709 }
1710
1711 return 1;
1712}
1713
3c74075f 1714/*
3c74075f
JEB
1715 * Returns: 1 on success, 0 on failure
1716 */
beecb1e1
ZK
1717static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1718 char *params, size_t paramsize)
165e4a11 1719{
8f26e18c
JEB
1720 int block_on_error = 0;
1721 int handle_errors = 0;
1722 int dm_log_userspace = 0;
1723 struct utsname uts;
dbcb64b8 1724 unsigned log_parm_count;
b39fdcf4 1725 int pos = 0, parts;
7d7d93ac 1726 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1727 const char *logtype;
b39fdcf4 1728 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1729
b39fdcf4
MB
1730 if (uname(&uts) == -1) {
1731 log_error("Cannot read kernel release version.");
1732 return 0;
1733 }
1734
1735 /* Kernels with a major number of 2 always had 3 parts. */
1736 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1737 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1738 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1739 return 0;
1740 }
67b25ed4 1741
8f26e18c
JEB
1742 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1743 /*
1744 * Originally, block_on_error was an argument to the log
1745 * portion of the mirror CTR table. It was renamed to
1746 * "handle_errors" and now resides in the 'features'
1747 * section of the mirror CTR table (i.e. at the end).
1748 *
1749 * We can identify whether to use "block_on_error" or
1750 * "handle_errors" by the dm-mirror module's version
1751 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1752 */
ba61f848 1753 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
1754 handle_errors = 1;
1755 else
1756 block_on_error = 1;
1757 }
1758
1759 if (seg->clustered) {
1760 /* Cluster mirrors require a UUID */
1761 if (!seg->uuid)
1762 return_0;
1763
1764 /*
1765 * Cluster mirrors used to have their own log
1766 * types. Now they are accessed through the
1767 * userspace log type.
1768 *
1769 * The dm-log-userspace module was added to the
1770 * 2.6.31 kernel.
1771 */
ba61f848 1772 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
1773 dm_log_userspace = 1;
1774 }
1775
1776 /* Region size */
1777 log_parm_count = 1;
1778
1779 /* [no]sync, block_on_error etc. */
1780 log_parm_count += hweight32(seg->flags);
311d6d81 1781
8f26e18c
JEB
1782 /* "handle_errors" is a feature arg now */
1783 if (handle_errors)
1784 log_parm_count--;
1785
1786 /* DM_CORELOG does not count in the param list */
1787 if (seg->flags & DM_CORELOG)
1788 log_parm_count--;
1789
1790 if (seg->clustered) {
1791 log_parm_count++; /* For UUID */
1792
1793 if (!dm_log_userspace)
ffa9b6a5 1794 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
1795 else
1796 /* For clustered-* type field inserted later */
1797 log_parm_count++;
8f26e18c 1798 }
dbcb64b8 1799
8f26e18c
JEB
1800 if (!seg->log)
1801 logtype = "core";
1802 else {
1803 logtype = "disk";
1804 log_parm_count++;
1805 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1806 return_0;
1807 }
dbcb64b8 1808
8f26e18c
JEB
1809 if (dm_log_userspace)
1810 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1811 log_parm_count, seg->uuid, logtype);
1812 else
ffa9b6a5 1813 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 1814
8f26e18c
JEB
1815 if (seg->log)
1816 EMIT_PARAMS(pos, " %s", logbuf);
1817
1818 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 1819
8f26e18c
JEB
1820 if (seg->clustered && !dm_log_userspace)
1821 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 1822
8f26e18c
JEB
1823 if ((seg->flags & DM_NOSYNC))
1824 EMIT_PARAMS(pos, " nosync");
1825 else if ((seg->flags & DM_FORCESYNC))
1826 EMIT_PARAMS(pos, " sync");
dbcb64b8 1827
8f26e18c
JEB
1828 if (block_on_error)
1829 EMIT_PARAMS(pos, " block_on_error");
1830
1831 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1832
5f3325fc 1833 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 1834 return_0;
dbcb64b8 1835
8f26e18c
JEB
1836 if (handle_errors)
1837 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 1838
3c74075f 1839 return 1;
8f26e18c
JEB
1840}
1841
cac52ca4
JEB
1842static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1843 uint32_t minor, struct load_segment *seg,
1844 uint64_t *seg_start, char *params,
1845 size_t paramsize)
1846{
ad2432dc 1847 uint32_t i;
cac52ca4
JEB
1848 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1849 int pos = 0;
1850
1851 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1852 param_count++;
1853
1854 if (seg->region_size)
1855 param_count += 2;
1856
ad2432dc
MB
1857 /* rebuilds is 64-bit */
1858 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
1859 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 1860
cac52ca4
JEB
1861 if ((seg->type == SEG_RAID1) && seg->stripe_size)
1862 log_error("WARNING: Ignoring RAID1 stripe size");
1863
1864 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
1865 param_count, seg->stripe_size);
1866
1867 if (seg->flags & DM_NOSYNC)
1868 EMIT_PARAMS(pos, " nosync");
1869 else if (seg->flags & DM_FORCESYNC)
1870 EMIT_PARAMS(pos, " sync");
1871
1872 if (seg->region_size)
1873 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
1874
f439e65b
JEB
1875 for (i = 0; i < (seg->area_count / 2); i++)
1876 if (seg->rebuilds & (1 << i))
1877 EMIT_PARAMS(pos, " rebuild %u", i);
1878
cac52ca4
JEB
1879 /* Print number of metadata/data device pairs */
1880 EMIT_PARAMS(pos, " %u", seg->area_count/2);
1881
1882 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
1883 return_0;
1884
1885 return 1;
1886}
1887
8f26e18c
JEB
1888static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
1889 uint32_t minor, struct load_segment *seg,
1890 uint64_t *seg_start, char *params,
1891 size_t paramsize)
1892{
1893 int pos = 0;
1894 int r;
cac52ca4 1895 int target_type_is_raid = 0;
8f26e18c 1896 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 1897 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1898
8f26e18c
JEB
1899 switch(seg->type) {
1900 case SEG_ERROR:
1901 case SEG_ZERO:
1902 case SEG_LINEAR:
1903 break;
1904 case SEG_MIRRORED:
1905 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 1906 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
1907 if (!r)
1908 return_0;
165e4a11 1909 break;
b262f3e1
ZK
1910 case SEG_REPLICATOR:
1911 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
1912 &pos)) <= 0) {
1913 stack;
1914 return r;
1915 }
1916 break;
1917 case SEG_REPLICATOR_DEV:
1918 if (!seg->replicator || !_build_dev_string(originbuf,
1919 sizeof(originbuf),
1920 seg->replicator))
1921 return_0;
1922
1923 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
1924 break;
165e4a11 1925 case SEG_SNAPSHOT:
aa6f4e51 1926 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
1927 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1928 return_0;
1929 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
1930 return_0;
ffa9b6a5
ZK
1931 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
1932 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
1933 break;
1934 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
1935 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
1936 return_0;
ffa9b6a5 1937 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
1938 break;
1939 case SEG_STRIPED:
609faae9 1940 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 1941 break;
12ca060e 1942 case SEG_CRYPT:
609faae9 1943 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
1944 seg->chainmode ? "-" : "", seg->chainmode ?: "",
1945 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
1946 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
1947 seg->iv_offset : *seg_start);
1948 break;
cac52ca4
JEB
1949 case SEG_RAID1:
1950 case SEG_RAID4:
1951 case SEG_RAID5_LA:
1952 case SEG_RAID5_RA:
1953 case SEG_RAID5_LS:
1954 case SEG_RAID5_RS:
1955 case SEG_RAID6_ZR:
1956 case SEG_RAID6_NR:
1957 case SEG_RAID6_NC:
1958 target_type_is_raid = 1;
1959 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
1960 params, paramsize);
1961 if (!r)
1962 return_0;
1963
1964 break;
4251236e
ZK
1965 case SEG_THIN_POOL:
1966 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
1967 return_0;
1968 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1969 return_0;
1970 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
460c5991
ZK
1971 seg->data_block_size, seg->low_water_mark_size,
1972 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "");
4251236e
ZK
1973 break;
1974 case SEG_THIN:
1975 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
1976 return_0;
1977 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
1978 break;
165e4a11
AK
1979 }
1980
1981 switch(seg->type) {
1982 case SEG_ERROR:
b262f3e1 1983 case SEG_REPLICATOR:
165e4a11
AK
1984 case SEG_SNAPSHOT:
1985 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 1986 case SEG_SNAPSHOT_MERGE:
165e4a11 1987 case SEG_ZERO:
4251236e
ZK
1988 case SEG_THIN_POOL:
1989 case SEG_THIN:
165e4a11 1990 break;
12ca060e 1991 case SEG_CRYPT:
165e4a11 1992 case SEG_LINEAR:
b262f3e1 1993 case SEG_REPLICATOR_DEV:
165e4a11
AK
1994 case SEG_STRIPED:
1995 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
1996 stack;
1997 return r;
1998 }
b6793963
AK
1999 if (!params[0]) {
2000 log_error("No parameters supplied for %s target "
2001 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2002 major, minor);
b6793963
AK
2003 return 0;
2004 }
165e4a11
AK
2005 break;
2006 }
2007
4b2cae46
AK
2008 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2009 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2010 *seg_start, seg->size, target_type_is_raid ? "raid" :
2011 dm_segtypes[seg->type].target, params);
165e4a11 2012
cac52ca4
JEB
2013 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2014 target_type_is_raid ? "raid" :
2015 dm_segtypes[seg->type].target, params))
b4f1578f 2016 return_0;
165e4a11
AK
2017
2018 *seg_start += seg->size;
2019
2020 return 1;
2021}
2022
ffa9b6a5
ZK
2023#undef EMIT_PARAMS
2024
4b2cae46
AK
2025static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2026 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2027{
2028 char *params;
2029 size_t paramsize = 4096;
2030 int ret;
2031
2032 do {
2033 if (!(params = dm_malloc(paramsize))) {
2034 log_error("Insufficient space for target parameters.");
2035 return 0;
2036 }
2037
12ea7cb1 2038 params[0] = '\0';
4b2cae46
AK
2039 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2040 params, paramsize);
165e4a11
AK
2041 dm_free(params);
2042
2043 if (!ret)
2044 stack;
2045
2046 if (ret >= 0)
2047 return ret;
2048
2049 log_debug("Insufficient space in params[%" PRIsize_t
2050 "] for target parameters.", paramsize);
2051
2052 paramsize *= 2;
2053 } while (paramsize < MAX_TARGET_PARAMSIZE);
2054
2055 log_error("Target parameter size too big. Aborting.");
2056 return 0;
2057}
2058
b4f1578f 2059static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2060{
2061 int r = 0;
2062 struct dm_task *dmt;
2063 struct load_segment *seg;
df390f17 2064 uint64_t seg_start = 0, existing_table_size;
165e4a11 2065
4b2cae46
AK
2066 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2067 dnode->info.major, dnode->info.minor);
165e4a11
AK
2068
2069 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2070 log_error("Reload dm_task creation failed for %s", dnode->name);
2071 return 0;
2072 }
2073
2074 if (!dm_task_set_major(dmt, dnode->info.major) ||
2075 !dm_task_set_minor(dmt, dnode->info.minor)) {
2076 log_error("Failed to set device number for %s reload.", dnode->name);
2077 goto out;
2078 }
2079
2080 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2081 log_error("Failed to set read only flag for %s", dnode->name);
2082 goto out;
2083 }
2084
2085 if (!dm_task_no_open_count(dmt))
2086 log_error("Failed to disable open_count");
2087
2c44337b 2088 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2089 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2090 seg, &seg_start))
b4f1578f 2091 goto_out;
165e4a11 2092
ec289b64
AK
2093 if (!dm_task_suppress_identical_reload(dmt))
2094 log_error("Failed to suppress reload of identical tables.");
2095
2096 if ((r = dm_task_run(dmt))) {
165e4a11 2097 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2098 if (r && !dnode->info.inactive_table)
2099 log_verbose("Suppressed %s identical table reload.",
2100 dnode->name);
bb875bb9 2101
df390f17 2102 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2103 if ((dnode->props.size_changed =
df390f17 2104 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2105 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2106 PRIu64 " for %s", existing_table_size,
bb875bb9 2107 seg_start, dnode->name);
df390f17
AK
2108 /*
2109 * Kernel usually skips size validation on zero-length devices
2110 * now so no need to preload them.
2111 */
2112 /* FIXME In which kernel version did this begin? */
2113 if (!existing_table_size && dnode->props.delay_resume_if_new)
2114 dnode->props.size_changed = 0;
2115 }
ec289b64 2116 }
165e4a11
AK
2117
2118 dnode->props.segment_count = 0;
2119
2120out:
2121 dm_task_destroy(dmt);
2122
2123 return r;
165e4a11
AK
2124}
2125
b4f1578f 2126int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2127 const char *uuid_prefix,
2128 size_t uuid_prefix_len)
165e4a11 2129{
2ca6b865 2130 int r = 1;
165e4a11 2131 void *handle = NULL;
b4f1578f 2132 struct dm_tree_node *child;
165e4a11 2133 struct dm_info newinfo;
566515c0 2134 int update_devs_flag = 0;
165e4a11
AK
2135
2136 /* Preload children first */
b4f1578f 2137 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2138 /* Skip existing non-device-mapper devices */
2139 if (!child->info.exists && child->info.major)
2140 continue;
2141
2142 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2143 if (child->info.exists &&
2144 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2145 continue;
2146
b4f1578f 2147 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2148 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2149 return_0;
165e4a11 2150
165e4a11
AK
2151 /* FIXME Cope if name exists with no uuid? */
2152 if (!child->info.exists) {
2153 if (!_create_node(child)) {
2154 stack;
2155 return 0;
2156 }
2157 }
2158
2159 if (!child->info.inactive_table && child->props.segment_count) {
2160 if (!_load_node(child)) {
2161 stack;
2162 return 0;
2163 }
2164 }
2165
eb91c4ee
MB
2166 /* Propagate device size change change */
2167 if (child->props.size_changed)
2168 dnode->props.size_changed = 1;
2169
bb875bb9 2170 /* Resume device immediately if it has parents and its size changed */
3776c494 2171 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2172 continue;
2173
7707ea90
AK
2174 if (!child->info.inactive_table && !child->info.suspended)
2175 continue;
2176
fc795d87 2177 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2178 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2179 &newinfo, &child->dtree->cookie, child->udev_flags,
2180 child->info.suspended)) {
165e4a11 2181 log_error("Unable to resume %s (%" PRIu32
fc795d87 2182 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2183 child->info.minor);
2ca6b865 2184 r = 0;
165e4a11
AK
2185 continue;
2186 }
2187
2188 /* Update cached info */
2189 child->info = newinfo;
566515c0
PR
2190
2191 /*
2192 * Prepare for immediate synchronization with udev and flush all stacked
2193 * dev node operations if requested by immediate_dev_node property. But
2194 * finish processing current level in the tree first.
2195 */
2196 if (child->props.immediate_dev_node)
2197 update_devs_flag = 1;
165e4a11
AK
2198 }
2199
2200 handle = NULL;
2201
566515c0
PR
2202 if (update_devs_flag) {
2203 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2204 stack;
2205 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2206 }
2207
2ca6b865 2208 return r;
165e4a11
AK
2209}
2210
165e4a11
AK
2211/*
2212 * Returns 1 if unsure.
2213 */
b4f1578f 2214int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2215 const char *uuid_prefix,
2216 size_t uuid_prefix_len)
2217{
2218 void *handle = NULL;
b4f1578f 2219 struct dm_tree_node *child = dnode;
165e4a11
AK
2220 const char *uuid;
2221
b4f1578f
AK
2222 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2223 if (!(uuid = dm_tree_node_get_uuid(child))) {
2224 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2225 return 1;
2226 }
2227
87f98002 2228 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2229 return 1;
2230
b4f1578f
AK
2231 if (dm_tree_node_num_children(child, 0))
2232 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2233 }
2234
2235 return 0;
2236}
2237
2238/*
2239 * Target functions
2240 */
b4f1578f 2241static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2242{
2243 struct load_segment *seg;
2244
b4f1578f
AK
2245 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2246 log_error("dtree node segment allocation failed");
165e4a11
AK
2247 return NULL;
2248 }
2249
2250 seg->type = type;
2251 seg->size = size;
2252 seg->area_count = 0;
2c44337b 2253 dm_list_init(&seg->areas);
165e4a11
AK
2254 seg->stripe_size = 0;
2255 seg->persistent = 0;
2256 seg->chunk_size = 0;
2257 seg->cow = NULL;
2258 seg->origin = NULL;
aa6f4e51 2259 seg->merge = NULL;
165e4a11 2260
2c44337b 2261 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2262 dnode->props.segment_count++;
2263
2264 return seg;
2265}
2266
b4f1578f 2267int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2268 uint64_t size,
2269 const char *origin_uuid)
165e4a11
AK
2270{
2271 struct load_segment *seg;
b4f1578f 2272 struct dm_tree_node *origin_node;
165e4a11 2273
b4f1578f
AK
2274 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2275 return_0;
165e4a11 2276
b4f1578f 2277 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2278 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2279 return 0;
2280 }
2281
2282 seg->origin = origin_node;
b4f1578f
AK
2283 if (!_link_tree_nodes(dnode, origin_node))
2284 return_0;
165e4a11 2285
56c28292
AK
2286 /* Resume snapshot origins after new snapshots */
2287 dnode->activation_priority = 1;
2288
165e4a11
AK
2289 return 1;
2290}
2291
aa6f4e51
MS
2292static int _add_snapshot_target(struct dm_tree_node *node,
2293 uint64_t size,
2294 const char *origin_uuid,
2295 const char *cow_uuid,
2296 const char *merge_uuid,
2297 int persistent,
2298 uint32_t chunk_size)
165e4a11
AK
2299{
2300 struct load_segment *seg;
aa6f4e51
MS
2301 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2302 unsigned seg_type;
2303
2304 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2305
aa6f4e51 2306 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2307 return_0;
165e4a11 2308
b4f1578f 2309 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2310 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2311 return 0;
2312 }
2313
2314 seg->origin = origin_node;
b4f1578f
AK
2315 if (!_link_tree_nodes(node, origin_node))
2316 return_0;
165e4a11 2317
b4f1578f 2318 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2319 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2320 return 0;
2321 }
2322
2323 seg->cow = cow_node;
b4f1578f
AK
2324 if (!_link_tree_nodes(node, cow_node))
2325 return_0;
165e4a11
AK
2326
2327 seg->persistent = persistent ? 1 : 0;
2328 seg->chunk_size = chunk_size;
2329
aa6f4e51
MS
2330 if (merge_uuid) {
2331 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2332 /* not a pure error, merging snapshot may have been deactivated */
2333 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2334 } else {
2335 seg->merge = merge_node;
2336 /* must not link merging snapshot, would undermine activation_priority below */
2337 }
2338
2339 /* Resume snapshot-merge (acting origin) after other snapshots */
2340 node->activation_priority = 1;
2341 if (seg->merge) {
2342 /* Resume merging snapshot after snapshot-merge */
2343 seg->merge->activation_priority = 2;
2344 }
2345 }
2346
165e4a11
AK
2347 return 1;
2348}
2349
aa6f4e51
MS
2350
2351int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2352 uint64_t size,
2353 const char *origin_uuid,
2354 const char *cow_uuid,
2355 int persistent,
2356 uint32_t chunk_size)
2357{
2358 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2359 NULL, persistent, chunk_size);
2360}
2361
2362int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2363 uint64_t size,
2364 const char *origin_uuid,
2365 const char *cow_uuid,
2366 const char *merge_uuid,
2367 uint32_t chunk_size)
2368{
2369 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2370 merge_uuid, 1, chunk_size);
2371}
2372
b4f1578f 2373int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2374 uint64_t size)
165e4a11 2375{
b4f1578f
AK
2376 if (!_add_segment(node, SEG_ERROR, size))
2377 return_0;
165e4a11
AK
2378
2379 return 1;
2380}
2381
b4f1578f 2382int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2383 uint64_t size)
165e4a11 2384{
b4f1578f
AK
2385 if (!_add_segment(node, SEG_ZERO, size))
2386 return_0;
165e4a11
AK
2387
2388 return 1;
2389}
2390
b4f1578f 2391int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2392 uint64_t size)
165e4a11 2393{
b4f1578f
AK
2394 if (!_add_segment(node, SEG_LINEAR, size))
2395 return_0;
165e4a11
AK
2396
2397 return 1;
2398}
2399
b4f1578f 2400int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2401 uint64_t size,
2402 uint32_t stripe_size)
165e4a11
AK
2403{
2404 struct load_segment *seg;
2405
b4f1578f
AK
2406 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2407 return_0;
165e4a11
AK
2408
2409 seg->stripe_size = stripe_size;
2410
2411 return 1;
2412}
2413
12ca060e
MB
2414int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2415 uint64_t size,
2416 const char *cipher,
2417 const char *chainmode,
2418 const char *iv,
2419 uint64_t iv_offset,
2420 const char *key)
2421{
2422 struct load_segment *seg;
2423
2424 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2425 return_0;
2426
2427 seg->cipher = cipher;
2428 seg->chainmode = chainmode;
2429 seg->iv = iv;
2430 seg->iv_offset = iv_offset;
2431 seg->key = key;
2432
2433 return 1;
2434}
2435
b4f1578f 2436int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2437 uint32_t region_size,
08e64ce5 2438 unsigned clustered,
165e4a11 2439 const char *log_uuid,
ce7ed2c0
AK
2440 unsigned area_count,
2441 uint32_t flags)
165e4a11 2442{
908db078 2443 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2444 struct load_segment *seg;
2445
2446 if (!node->props.segment_count) {
b8175c33 2447 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2448 return 0;
2449 }
2450
2c44337b 2451 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2452
24b026e3 2453 if (log_uuid) {
67b25ed4
AK
2454 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2455 log_error("log uuid pool_strdup failed");
2456 return 0;
2457 }
df390f17
AK
2458 if ((flags & DM_CORELOG))
2459 /* For pvmove: immediate resume (for size validation) isn't needed. */
2460 node->props.delay_resume_if_new = 1;
2461 else {
9723090c
AK
2462 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2463 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2464 return 0;
2465 }
2466
566515c0
PR
2467 if (clustered)
2468 log_node->props.immediate_dev_node = 1;
2469
0a99713e
AK
2470 /* The kernel validates the size of disk logs. */
2471 /* FIXME Propagate to any devices below */
2472 log_node->props.delay_resume_if_new = 0;
2473
9723090c
AK
2474 if (!_link_tree_nodes(node, log_node))
2475 return_0;
2476 }
165e4a11
AK
2477 }
2478
2479 seg->log = log_node;
165e4a11
AK
2480 seg->region_size = region_size;
2481 seg->clustered = clustered;
2482 seg->mirror_area_count = area_count;
dbcb64b8 2483 seg->flags = flags;
165e4a11
AK
2484
2485 return 1;
2486}
2487
b4f1578f 2488int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2489 uint64_t size)
165e4a11 2490{
cbecd3cd 2491 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2492 return_0;
165e4a11
AK
2493
2494 return 1;
2495}
2496
cac52ca4
JEB
2497int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2498 uint64_t size,
2499 const char *raid_type,
2500 uint32_t region_size,
2501 uint32_t stripe_size,
f439e65b 2502 uint64_t rebuilds,
cac52ca4
JEB
2503 uint64_t reserved2)
2504{
2505 int i;
2506 struct load_segment *seg = NULL;
2507
2508 for (i = 0; dm_segtypes[i].target && !seg; i++)
2509 if (!strcmp(raid_type, dm_segtypes[i].target))
2510 if (!(seg = _add_segment(node,
2511 dm_segtypes[i].type, size)))
2512 return_0;
2513
b2fa9b43
JEB
2514 if (!seg)
2515 return_0;
2516
cac52ca4
JEB
2517 seg->region_size = region_size;
2518 seg->stripe_size = stripe_size;
2519 seg->area_count = 0;
f439e65b 2520 seg->rebuilds = rebuilds;
cac52ca4
JEB
2521
2522 return 1;
2523}
2524
b262f3e1
ZK
2525int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2526 uint64_t size,
2527 const char *rlog_uuid,
2528 const char *rlog_type,
2529 unsigned rsite_index,
2530 dm_replicator_mode_t mode,
2531 uint32_t async_timeout,
2532 uint64_t fall_behind_data,
2533 uint32_t fall_behind_ios)
2534{
2535 struct load_segment *rseg;
2536 struct replicator_site *rsite;
2537
2538 /* Local site0 - adds replicator segment and links rlog device */
2539 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2540 if (node->props.segment_count) {
2541 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2542 return 0;
2543 }
2544
2545 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2546 return_0;
2547
2548 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2549 log_error("Missing replicator log uuid %s.", rlog_uuid);
2550 return 0;
2551 }
2552
2553 if (!_link_tree_nodes(node, rseg->log))
2554 return_0;
2555
2556 if (strcmp(rlog_type, "ringbuffer") != 0) {
2557 log_error("Unsupported replicator log type %s.", rlog_type);
2558 return 0;
2559 }
2560
2561 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2562 return_0;
2563
2564 dm_list_init(&rseg->rsites);
2565 rseg->rdevice_count = 0;
2566 node->activation_priority = 1;
2567 }
2568
2569 /* Add site to segment */
2570 if (mode == DM_REPLICATOR_SYNC
2571 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2572 log_error("Async parameters passed for synchronnous replicator.");
2573 return 0;
2574 }
2575
2576 if (node->props.segment_count != 1) {
2577 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2578 return 0;
2579 }
2580
2581 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2582 if (rseg->type != SEG_REPLICATOR) {
2583 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2584 dm_segtypes[rseg->type].target);
2585 return 0;
2586 }
2587
2588 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2589 log_error("Failed to allocate remote site segment.");
2590 return 0;
2591 }
2592
2593 dm_list_add(&rseg->rsites, &rsite->list);
2594 rseg->rsite_count++;
2595
2596 rsite->mode = mode;
2597 rsite->async_timeout = async_timeout;
2598 rsite->fall_behind_data = fall_behind_data;
2599 rsite->fall_behind_ios = fall_behind_ios;
2600 rsite->rsite_index = rsite_index;
2601
2602 return 1;
2603}
2604
2605/* Appends device node to Replicator */
2606int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2607 uint64_t size,
2608 const char *replicator_uuid,
2609 uint64_t rdevice_index,
2610 const char *rdev_uuid,
2611 unsigned rsite_index,
2612 const char *slog_uuid,
2613 uint32_t slog_flags,
2614 uint32_t slog_region_size)
2615{
2616 struct seg_area *area;
2617 struct load_segment *rseg;
2618 struct load_segment *rep_seg;
2619
2620 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2621 /* Site index for local target */
2622 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2623 return_0;
2624
2625 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2626 log_error("Missing replicator uuid %s.", replicator_uuid);
2627 return 0;
2628 }
2629
2630 /* Local slink0 for replicator must be always initialized first */
2631 if (rseg->replicator->props.segment_count != 1) {
2632 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2633 return 0;
2634 }
2635
2636 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2637 if (rep_seg->type != SEG_REPLICATOR) {
2638 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2639 dm_segtypes[rep_seg->type].target);
2640 return 0;
2641 }
2642 rep_seg->rdevice_count++;
2643
2644 if (!_link_tree_nodes(node, rseg->replicator))
2645 return_0;
2646
2647 rseg->rdevice_index = rdevice_index;
2648 } else {
2649 /* Local slink0 for replicator must be always initialized first */
2650 if (node->props.segment_count != 1) {
2651 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2652 return 0;
2653 }
2654
2655 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2656 if (rseg->type != SEG_REPLICATOR_DEV) {
2657 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2658 dm_segtypes[rseg->type].target);
2659 return 0;
2660 }
2661 }
2662
2663 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2664 log_error("Unspecified sync log uuid.");
2665 return 0;
2666 }
2667
2668 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2669 return_0;
2670
2671 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2672
2673 if (!(slog_flags & DM_CORELOG)) {
2674 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2675 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2676 return 0;
2677 }
2678
2679 if (!_link_tree_nodes(node, area->slog))
2680 return_0;
2681 }
2682
2683 area->flags = slog_flags;
2684 area->region_size = slog_region_size;
2685 area->rsite_index = rsite_index;
2686
2687 return 1;
2688}
2689
4251236e
ZK
2690int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2691 uint64_t size,
e0ea24be 2692 uint64_t transaction_id,
4251236e
ZK
2693 const char *pool_uuid,
2694 const char *metadata_uuid,
2695 uint32_t data_block_size,
460c5991
ZK
2696 uint64_t low_water_mark_size,
2697 unsigned skip_block_zeroing)
4251236e
ZK
2698{
2699 struct load_segment *seg;
2700
565a4bfc
ZK
2701 if (data_block_size < DM_THIN_MIN_DATA_SIZE) {
2702 log_error("Data block size %u is lower then %u sectors.",
2703 data_block_size, DM_THIN_MIN_DATA_SIZE);
4251236e
ZK
2704 return 0;
2705 }
2706
565a4bfc
ZK
2707 if (data_block_size > DM_THIN_MAX_DATA_SIZE) {
2708 log_error("Data block size %u is higher then %u sectors.",
2709 data_block_size, DM_THIN_MAX_DATA_SIZE);
4251236e
ZK
2710 return 0;
2711 }
2712
2713 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2714 return_0;
2715
2716 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2717 log_error("Missing metadata uuid %s.", metadata_uuid);
2718 return 0;
2719 }
2720
2721 if (!_link_tree_nodes(node, seg->metadata))
2722 return_0;
2723
2724 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2725 log_error("Missing pool uuid %s.", pool_uuid);
2726 return 0;
2727 }
2728
2729 if (!_link_tree_nodes(node, seg->pool))
2730 return_0;
2731
e0ea24be 2732 node->props.thin_pool_transaction_id = transaction_id; // compare on resume
460c5991 2733 seg->low_water_mark_size = low_water_mark_size;
e0ea24be 2734 seg->data_block_size = data_block_size;
460c5991 2735 seg->skip_block_zeroing = skip_block_zeroing;
4251236e
ZK
2736
2737 return 1;
2738}
2739
2740int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2741 uint64_t size,
4251236e
ZK
2742 const char *thin_pool_uuid,
2743 uint32_t device_id)
2744{
2745 struct load_segment *seg;
2746
565a4bfc
ZK
2747 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2748 log_error("Device id %u is higher then %u.",
2749 device_id, DM_THIN_MAX_DEVICE_ID);
4251236e
ZK
2750 return 0;
2751 }
2752
2753 if (!(seg = _add_segment(node, SEG_THIN, size)))
2754 return_0;
2755
2756 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2757 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2758 return 0;
2759 }
2760
2761 if (!_link_tree_nodes(node, seg->pool))
2762 return_0;
2763
1419bf1c
ZK
2764 seg->device_id = device_id;
2765
4251236e
ZK
2766 return 1;
2767}
2768
b4f1578f 2769static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
2770{
2771 struct seg_area *area;
2772
b4f1578f 2773 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
2774 log_error("Failed to allocate target segment area.");
2775 return 0;
2776 }
2777
2778 area->dev_node = dev_node;
2779 area->offset = offset;
2780
2c44337b 2781 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
2782 seg->area_count++;
2783
2784 return 1;
2785}
2786
b4f1578f 2787int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
2788 const char *dev_name,
2789 const char *uuid,
2790 uint64_t offset)
165e4a11
AK
2791{
2792 struct load_segment *seg;
2793 struct stat info;
b4f1578f 2794 struct dm_tree_node *dev_node;
165e4a11
AK
2795
2796 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 2797 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
2798 return 0;
2799 }
2800
2801 if (uuid) {
b4f1578f 2802 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
2803 log_error("Couldn't find area uuid %s.", uuid);
2804 return 0;
2805 }
b4f1578f
AK
2806 if (!_link_tree_nodes(node, dev_node))
2807 return_0;
165e4a11 2808 } else {
6d04311e 2809 if (stat(dev_name, &info) < 0) {
165e4a11
AK
2810 log_error("Device %s not found.", dev_name);
2811 return 0;
2812 }
2813
40e5fd8b 2814 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
2815 log_error("Device %s is not a block device.", dev_name);
2816 return 0;
2817 }
2818
2819 /* FIXME Check correct macro use */
cda69e17
PR
2820 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
2821 MINOR(info.st_rdev), 0)))
b4f1578f 2822 return_0;
165e4a11
AK
2823 }
2824
2825 if (!node->props.segment_count) {
b8175c33 2826 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2827 return 0;
2828 }
2829
2c44337b 2830 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2831
b4f1578f
AK
2832 if (!_add_area(node, seg, dev_node, offset))
2833 return_0;
165e4a11
AK
2834
2835 return 1;
db208f51 2836}
bd90c6b2 2837
6d04311e
JEB
2838int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
2839{
2840 struct load_segment *seg;
2841
2842 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2843
415c0690
AK
2844 switch (seg->type) {
2845 case SEG_RAID1:
2846 case SEG_RAID4:
2847 case SEG_RAID5_LA:
2848 case SEG_RAID5_RA:
2849 case SEG_RAID5_LS:
2850 case SEG_RAID5_RS:
2851 case SEG_RAID6_ZR:
2852 case SEG_RAID6_NR:
2853 case SEG_RAID6_NC:
2854 break;
2855 default:
2856 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
2857 return 0;
2858 }
2859
6d04311e
JEB
2860 if (!_add_area(node, seg, NULL, offset))
2861 return_0;
2862
2863 return 1;
2864}
2865
bd90c6b2
AK
2866void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
2867{
2868 node->dtree->cookie = cookie;
2869}
2870
2871uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
2872{
2873 return node->dtree->cookie;
2874}
This page took 0.432073 seconds and 5 git commands to generate.