]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Add missing lvrename mirrored log recursion in for_each_sub_lv.
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
87f98002
AK
27/* FIXME Fix interface so this is used only by LVM */
28#define UUID_PREFIX "LVM-"
29
b262f3e1
ZK
30#define REPLICATOR_LOCAL_SITE 0
31
165e4a11
AK
32/* Supported segment types */
33enum {
12ca060e
MB
34 SEG_CRYPT,
35 SEG_ERROR,
165e4a11
AK
36 SEG_LINEAR,
37 SEG_MIRRORED,
b262f3e1
ZK
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
165e4a11
AK
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 42 SEG_SNAPSHOT_MERGE,
165e4a11
AK
43 SEG_STRIPED,
44 SEG_ZERO,
4251236e
ZK
45 SEG_THIN_POOL,
46 SEG_THIN,
cac52ca4
JEB
47 SEG_RAID1,
48 SEG_RAID4,
49 SEG_RAID5_LA,
50 SEG_RAID5_RA,
51 SEG_RAID5_LS,
52 SEG_RAID5_RS,
53 SEG_RAID6_ZR,
54 SEG_RAID6_NR,
55 SEG_RAID6_NC,
56 SEG_LAST,
165e4a11 57};
b4f1578f 58
165e4a11
AK
59/* FIXME Add crypt and multipath support */
60
61struct {
62 unsigned type;
63 const char *target;
64} dm_segtypes[] = {
12ca060e 65 { SEG_CRYPT, "crypt" },
165e4a11
AK
66 { SEG_ERROR, "error" },
67 { SEG_LINEAR, "linear" },
68 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
69 { SEG_REPLICATOR, "replicator" },
70 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
71 { SEG_SNAPSHOT, "snapshot" },
72 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 73 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
74 { SEG_STRIPED, "striped" },
75 { SEG_ZERO, "zero"},
4251236e
ZK
76 { SEG_THIN_POOL, "thin-pool"},
77 { SEG_THIN, "thin"},
cac52ca4
JEB
78 { SEG_RAID1, "raid1"},
79 { SEG_RAID4, "raid4"},
80 { SEG_RAID5_LA, "raid5_la"},
81 { SEG_RAID5_RA, "raid5_ra"},
82 { SEG_RAID5_LS, "raid5_ls"},
83 { SEG_RAID5_RS, "raid5_rs"},
84 { SEG_RAID6_ZR, "raid6_zr"},
85 { SEG_RAID6_NR, "raid6_nr"},
86 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
87
88 /*
89 *WARNING: Since 'raid' target overloads this 1:1 mapping table
90 * for search do not add new enum elements past them!
91 */
cac52ca4
JEB
92 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
93 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
94 { SEG_LAST, NULL },
165e4a11
AK
95};
96
97/* Some segment types have a list of areas of other devices attached */
98struct seg_area {
2c44337b 99 struct dm_list list;
165e4a11 100
b4f1578f 101 struct dm_tree_node *dev_node;
165e4a11
AK
102
103 uint64_t offset;
b262f3e1
ZK
104
105 unsigned rsite_index; /* Replicator site index */
106 struct dm_tree_node *slog; /* Replicator sync log node */
107 uint64_t region_size; /* Replicator sync log size */
108 uint32_t flags; /* Replicator sync log flags */
109};
110
2e732e96
ZK
111struct dm_thin_message {
112 dm_thin_message_t type;
113 union {
114 struct {
115 uint32_t device_id;
116 uint32_t origin_id;
117 } m_create_snap;
118 struct {
119 uint32_t device_id;
120 } m_create_thin;
121 struct {
122 uint32_t device_id;
123 } m_delete;
124 struct {
125 uint64_t current_id;
126 uint64_t new_id;
127 } m_set_transaction_id;
128 struct {
129 uint32_t device_id;
130 uint64_t new_size;
131 } m_trim;
132 } u;
133};
134
25e6ab87
ZK
135struct thin_message {
136 struct dm_list list;
137 struct dm_thin_message message;
660a42bc 138 int expected_errno;
25e6ab87
ZK
139};
140
b262f3e1
ZK
141/* Replicator-log has a list of sites */
142/* FIXME: maybe move to seg_area too? */
143struct replicator_site {
144 struct dm_list list;
145
146 unsigned rsite_index;
147 dm_replicator_mode_t mode;
148 uint32_t async_timeout;
149 uint32_t fall_behind_ios;
150 uint64_t fall_behind_data;
165e4a11
AK
151};
152
153/* Per-segment properties */
154struct load_segment {
2c44337b 155 struct dm_list list;
165e4a11
AK
156
157 unsigned type;
158
159 uint64_t size;
160
b262f3e1
ZK
161 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
162 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 163
cac52ca4 164 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
165
166 int persistent; /* Snapshot */
167 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
168 struct dm_tree_node *cow; /* Snapshot */
169 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 170 struct dm_tree_node *merge; /* Snapshot */
165e4a11 171
b262f3e1 172 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 173 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
174 unsigned clustered; /* Mirror */
175 unsigned mirror_area_count; /* Mirror */
dbcb64b8 176 uint32_t flags; /* Mirror log */
67b25ed4 177 char *uuid; /* Clustered mirror log */
12ca060e
MB
178
179 const char *cipher; /* Crypt */
180 const char *chainmode; /* Crypt */
181 const char *iv; /* Crypt */
182 uint64_t iv_offset; /* Crypt */
183 const char *key; /* Crypt */
b262f3e1
ZK
184
185 const char *rlog_type; /* Replicator */
186 struct dm_list rsites; /* Replicator */
187 unsigned rsite_count; /* Replicator */
188 unsigned rdevice_count; /* Replicator */
189 struct dm_tree_node *replicator;/* Replicator-dev */
190 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 191
40e5fd8b 192 uint64_t rebuilds; /* raid */
4251236e
ZK
193
194 struct dm_tree_node *metadata; /* Thin_pool */
195 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 196 struct dm_list thin_messages; /* Thin_pool */
bbcd37e4 197 uint64_t transaction_id; /* Thin_pool */
e9156c2b 198 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 199 uint32_t data_block_size; /* Thin_pool */
460c5991 200 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
201 uint32_t device_id; /* Thin */
202
165e4a11
AK
203};
204
205/* Per-device properties */
206struct load_properties {
207 int read_only;
208 uint32_t major;
209 uint32_t minor;
210
52b84409
AK
211 uint32_t read_ahead;
212 uint32_t read_ahead_flags;
213
165e4a11 214 unsigned segment_count;
bb875bb9 215 unsigned size_changed;
2c44337b 216 struct dm_list segs;
165e4a11
AK
217
218 const char *new_name;
566515c0
PR
219
220 /* If immediate_dev_node is set to 1, try to create the dev node
221 * as soon as possible (e.g. in preload stage even during traversal
222 * and processing of dm tree). This will also flush all stacked dev
223 * node operations, synchronizing with udev.
224 */
df390f17
AK
225 unsigned immediate_dev_node;
226
227 /*
228 * If the device size changed from zero and this is set,
229 * don't resume the device immediately, even if the device
230 * has parents. This works provided the parents do not
231 * validate the device size and is required by pvmove to
232 * avoid starting the mirror resync operation too early.
233 */
234 unsigned delay_resume_if_new;
bbcd37e4
ZK
235
236 /* Send messages for this node in preload */
237 unsigned send_messages;
165e4a11
AK
238};
239
240/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 241struct dm_tree_link {
2c44337b 242 struct dm_list list;
b4f1578f 243 struct dm_tree_node *node;
165e4a11
AK
244};
245
b4f1578f
AK
246struct dm_tree_node {
247 struct dm_tree *dtree;
3d0480ed 248
40e5fd8b
AK
249 const char *name;
250 const char *uuid;
251 struct dm_info info;
3d0480ed 252
40e5fd8b
AK
253 struct dm_list uses; /* Nodes this node uses */
254 struct dm_list used_by; /* Nodes that use this node */
165e4a11 255
56c28292
AK
256 int activation_priority; /* 0 gets activated first */
257
f16aea9e
PR
258 uint16_t udev_flags; /* Udev control flags */
259
165e4a11
AK
260 void *context; /* External supplied context */
261
262 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
263
264 /*
265 * If presuspend of child node is needed
266 * Note: only direct child is allowed
267 */
268 struct dm_tree_node *presuspend_node;
3d0480ed
AK
269};
270
b4f1578f 271struct dm_tree {
a3f6b2ce
AK
272 struct dm_pool *mem;
273 struct dm_hash_table *devs;
165e4a11 274 struct dm_hash_table *uuids;
b4f1578f 275 struct dm_tree_node root;
c55b1410 276 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
277 int no_flush; /* 1 sets noflush (mirrors/multipath) */
278 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 279 uint32_t cookie;
3d0480ed
AK
280};
281
b4f1578f 282struct dm_tree *dm_tree_create(void)
3d0480ed 283{
0395dd22 284 struct dm_pool *dmem;
b4f1578f 285 struct dm_tree *dtree;
3d0480ed 286
0395dd22
ZK
287 if (!(dmem = dm_pool_create("dtree", 1024)) ||
288 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
289 log_error("Failed to allocate dtree.");
290 if (dmem)
291 dm_pool_destroy(dmem);
3d0480ed
AK
292 return NULL;
293 }
294
b4f1578f 295 dtree->root.dtree = dtree;
2c44337b
AK
296 dm_list_init(&dtree->root.uses);
297 dm_list_init(&dtree->root.used_by);
c55b1410 298 dtree->skip_lockfs = 0;
b9ffd32c 299 dtree->no_flush = 0;
0395dd22 300 dtree->mem = dmem;
3d0480ed 301
b4f1578f
AK
302 if (!(dtree->devs = dm_hash_create(8))) {
303 log_error("dtree hash creation failed");
304 dm_pool_destroy(dtree->mem);
3d0480ed
AK
305 return NULL;
306 }
307
b4f1578f
AK
308 if (!(dtree->uuids = dm_hash_create(32))) {
309 log_error("dtree uuid hash creation failed");
310 dm_hash_destroy(dtree->devs);
311 dm_pool_destroy(dtree->mem);
165e4a11
AK
312 return NULL;
313 }
314
b4f1578f 315 return dtree;
3d0480ed
AK
316}
317
b4f1578f 318void dm_tree_free(struct dm_tree *dtree)
3d0480ed 319{
b4f1578f 320 if (!dtree)
3d0480ed
AK
321 return;
322
b4f1578f
AK
323 dm_hash_destroy(dtree->uuids);
324 dm_hash_destroy(dtree->devs);
325 dm_pool_destroy(dtree->mem);
3d0480ed
AK
326}
327
04bde319
ZK
328static int _nodes_are_linked(const struct dm_tree_node *parent,
329 const struct dm_tree_node *child)
3d0480ed 330{
b4f1578f 331 struct dm_tree_link *dlink;
3d0480ed 332
2c44337b 333 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
334 if (dlink->node == child)
335 return 1;
3d0480ed
AK
336
337 return 0;
338}
339
2c44337b 340static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 341{
b4f1578f 342 struct dm_tree_link *dlink;
3d0480ed 343
b4f1578f
AK
344 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
345 log_error("dtree link allocation failed");
3d0480ed
AK
346 return 0;
347 }
348
349 dlink->node = node;
2c44337b 350 dm_list_add(list, &dlink->list);
3d0480ed
AK
351
352 return 1;
353}
354
b4f1578f
AK
355static int _link_nodes(struct dm_tree_node *parent,
356 struct dm_tree_node *child)
3d0480ed
AK
357{
358 if (_nodes_are_linked(parent, child))
359 return 1;
360
361 if (!_link(&parent->uses, child))
362 return 0;
363
364 if (!_link(&child->used_by, parent))
365 return 0;
366
367 return 1;
368}
369
2c44337b 370static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 371{
b4f1578f 372 struct dm_tree_link *dlink;
3d0480ed 373
2c44337b 374 dm_list_iterate_items(dlink, list)
3d0480ed 375 if (dlink->node == node) {
2c44337b 376 dm_list_del(&dlink->list);
3d0480ed
AK
377 break;
378 }
3d0480ed
AK
379}
380
b4f1578f
AK
381static void _unlink_nodes(struct dm_tree_node *parent,
382 struct dm_tree_node *child)
3d0480ed
AK
383{
384 if (!_nodes_are_linked(parent, child))
385 return;
386
387 _unlink(&parent->uses, child);
388 _unlink(&child->used_by, parent);
389}
390
b4f1578f 391static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 392{
b4f1578f 393 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
394}
395
b4f1578f 396static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 397{
b1ebf028 398 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
399}
400
b4f1578f 401static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 402{
b4f1578f 403 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
404}
405
b4f1578f 406static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 407{
b1ebf028 408 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
409}
410
b4f1578f 411static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
412{
413 /* Don't link to root node if child already has a parent */
f77736ca 414 if (parent == &parent->dtree->root) {
b4f1578f 415 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
416 return 1;
417 } else
418 _remove_from_toplevel(child);
419
f77736ca 420 if (child == &child->dtree->root) {
b4f1578f 421 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
422 return 1;
423 } else
424 _remove_from_bottomlevel(parent);
425
426 return _link_nodes(parent, child);
427}
428
b4f1578f 429static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
430 const char *name,
431 const char *uuid,
165e4a11 432 struct dm_info *info,
f16aea9e
PR
433 void *context,
434 uint16_t udev_flags)
3d0480ed 435{
b4f1578f 436 struct dm_tree_node *node;
3d0480ed
AK
437 uint64_t dev;
438
b4f1578f
AK
439 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
440 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
441 return NULL;
442 }
443
b4f1578f 444 node->dtree = dtree;
3d0480ed
AK
445
446 node->name = name;
447 node->uuid = uuid;
448 node->info = *info;
165e4a11 449 node->context = context;
f16aea9e 450 node->udev_flags = udev_flags;
56c28292 451 node->activation_priority = 0;
3d0480ed 452
2c44337b
AK
453 dm_list_init(&node->uses);
454 dm_list_init(&node->used_by);
455 dm_list_init(&node->props.segs);
3d0480ed
AK
456
457 dev = MKDEV(info->major, info->minor);
458
b4f1578f 459 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 460 sizeof(dev), node)) {
b4f1578f
AK
461 log_error("dtree node hash insertion failed");
462 dm_pool_free(dtree->mem, node);
3d0480ed
AK
463 return NULL;
464 }
465
165e4a11 466 if (uuid && *uuid &&
b4f1578f
AK
467 !dm_hash_insert(dtree->uuids, uuid, node)) {
468 log_error("dtree uuid hash insertion failed");
469 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 470 sizeof(dev));
b4f1578f 471 dm_pool_free(dtree->mem, node);
165e4a11
AK
472 return NULL;
473 }
474
3d0480ed
AK
475 return node;
476}
477
b4f1578f 478static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
479 uint32_t major, uint32_t minor)
480{
481 uint64_t dev = MKDEV(major, minor);
482
b4f1578f 483 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
484 sizeof(dev));
485}
486
b4f1578f 487static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
488 const char *uuid)
489{
87f98002
AK
490 struct dm_tree_node *node;
491
492 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
493 return node;
494
495 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
496 return NULL;
497
498 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
165e4a11
AK
499}
500
a3f6b2ce 501static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
3d0480ed
AK
502 const char **name, const char **uuid,
503 struct dm_info *info, struct dm_deps **deps)
504{
505 memset(info, 0, sizeof(*info));
506
507 if (!dm_is_dm_major(major)) {
508 *name = "";
509 *uuid = "";
510 *deps = NULL;
511 info->major = major;
512 info->minor = minor;
513 info->exists = 0;
165e4a11
AK
514 info->live_table = 0;
515 info->inactive_table = 0;
516 info->read_only = 0;
3d0480ed
AK
517 return 1;
518 }
519
520 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
521 log_error("deps dm_task creation failed");
522 return 0;
523 }
524
b4f1578f
AK
525 if (!dm_task_set_major(*dmt, major)) {
526 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
527 major, minor);
3d0480ed 528 goto failed;
b4f1578f 529 }
3d0480ed 530
b4f1578f
AK
531 if (!dm_task_set_minor(*dmt, minor)) {
532 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
533 major, minor);
3d0480ed 534 goto failed;
b4f1578f 535 }
3d0480ed 536
b4f1578f
AK
537 if (!dm_task_run(*dmt)) {
538 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
539 major, minor);
3d0480ed 540 goto failed;
b4f1578f 541 }
3d0480ed 542
b4f1578f
AK
543 if (!dm_task_get_info(*dmt, info)) {
544 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
545 major, minor);
3d0480ed 546 goto failed;
b4f1578f 547 }
3d0480ed
AK
548
549 if (!info->exists) {
550 *name = "";
551 *uuid = "";
552 *deps = NULL;
553 } else {
554 if (info->major != major) {
b4f1578f 555 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
556 major, info->major);
557 goto failed;
558 }
559 if (info->minor != minor) {
b4f1578f 560 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
561 minor, info->minor);
562 goto failed;
563 }
a3f6b2ce 564 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
565 log_error("name pool_strdup failed");
566 goto failed;
567 }
a3f6b2ce 568 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
569 log_error("uuid pool_strdup failed");
570 goto failed;
571 }
572 *deps = dm_task_get_deps(*dmt);
573 }
574
575 return 1;
576
577failed:
578 dm_task_destroy(*dmt);
579 return 0;
580}
581
b4f1578f
AK
582static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
583 struct dm_tree_node *parent,
cda69e17
PR
584 uint32_t major, uint32_t minor,
585 uint16_t udev_flags)
3d0480ed
AK
586{
587 struct dm_task *dmt = NULL;
588 struct dm_info info;
589 struct dm_deps *deps = NULL;
590 const char *name = NULL;
591 const char *uuid = NULL;
b4f1578f 592 struct dm_tree_node *node = NULL;
3d0480ed 593 uint32_t i;
3d0480ed
AK
594 int new = 0;
595
596 /* Already in tree? */
b4f1578f
AK
597 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
598 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
599 return_NULL;
3d0480ed 600
f16aea9e 601 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
cda69e17 602 NULL, udev_flags)))
b4f1578f 603 goto_out;
3d0480ed
AK
604 new = 1;
605 }
606
165e4a11
AK
607 if (!_link_tree_nodes(parent, node)) {
608 node = NULL;
b4f1578f 609 goto_out;
165e4a11 610 }
3d0480ed
AK
611
612 /* If node was already in tree, no need to recurse. */
613 if (!new)
165e4a11 614 goto out;
3d0480ed
AK
615
616 /* Can't recurse if not a mapped device or there are no dependencies */
617 if (!node->info.exists || !deps->count) {
b4f1578f
AK
618 if (!_add_to_bottomlevel(node)) {
619 stack;
165e4a11 620 node = NULL;
b4f1578f 621 }
165e4a11 622 goto out;
3d0480ed
AK
623 }
624
625 /* Add dependencies to tree */
626 for (i = 0; i < deps->count; i++)
b4f1578f 627 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
cda69e17 628 MINOR(deps->device[i]), udev_flags)) {
165e4a11 629 node = NULL;
b4f1578f 630 goto_out;
165e4a11 631 }
3d0480ed 632
3d0480ed
AK
633out:
634 if (dmt)
635 dm_task_destroy(dmt);
636
165e4a11
AK
637 return node;
638}
639
b4f1578f 640static int _node_clear_table(struct dm_tree_node *dnode)
165e4a11
AK
641{
642 struct dm_task *dmt;
643 struct dm_info *info;
644 const char *name;
645 int r;
646
647 if (!(info = &dnode->info)) {
b4f1578f 648 log_error("_node_clear_table failed: missing info");
165e4a11
AK
649 return 0;
650 }
651
b4f1578f
AK
652 if (!(name = dm_tree_node_get_name(dnode))) {
653 log_error("_node_clear_table failed: missing name");
165e4a11
AK
654 return 0;
655 }
656
657 /* Is there a table? */
658 if (!info->exists || !info->inactive_table)
659 return 1;
660
10d0d9c7
AK
661// FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
662
165e4a11
AK
663 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
664 name, info->major, info->minor);
665
666 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11
AK
667 log_error("Table clear dm_task creation failed for %s", name);
668 return 0;
669 }
670
671 if (!dm_task_set_major(dmt, info->major) ||
672 !dm_task_set_minor(dmt, info->minor)) {
673 log_error("Failed to set device number for %s table clear", name);
674 dm_task_destroy(dmt);
675 return 0;
676 }
677
678 r = dm_task_run(dmt);
679
680 if (!dm_task_get_info(dmt, info)) {
b4f1578f 681 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
682 r = 0;
683 }
684
685 dm_task_destroy(dmt);
686
3d0480ed
AK
687 return r;
688}
689
b4f1578f 690struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
165e4a11
AK
691 const char *name,
692 const char *uuid,
693 uint32_t major, uint32_t minor,
694 int read_only,
695 int clear_inactive,
696 void *context)
697{
b4f1578f 698 struct dm_tree_node *dnode;
165e4a11
AK
699 struct dm_info info;
700 const char *name2;
701 const char *uuid2;
702
703 /* Do we need to add node to tree? */
b4f1578f
AK
704 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
705 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
706 log_error("name pool_strdup failed");
707 return NULL;
708 }
b4f1578f 709 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
165e4a11
AK
710 log_error("uuid pool_strdup failed");
711 return NULL;
712 }
713
714 info.major = 0;
715 info.minor = 0;
716 info.exists = 0;
717 info.live_table = 0;
718 info.inactive_table = 0;
719 info.read_only = 0;
720
f16aea9e
PR
721 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
722 context, 0)))
b4f1578f 723 return_NULL;
165e4a11
AK
724
725 /* Attach to root node until a table is supplied */
b4f1578f
AK
726 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
727 return_NULL;
165e4a11
AK
728
729 dnode->props.major = major;
730 dnode->props.minor = minor;
731 dnode->props.new_name = NULL;
bb875bb9 732 dnode->props.size_changed = 0;
165e4a11
AK
733 } else if (strcmp(name, dnode->name)) {
734 /* Do we need to rename node? */
b4f1578f 735 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
736 log_error("name pool_strdup failed");
737 return 0;
738 }
739 }
740
741 dnode->props.read_only = read_only ? 1 : 0;
52b84409
AK
742 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
743 dnode->props.read_ahead_flags = 0;
165e4a11 744
b4f1578f
AK
745 if (clear_inactive && !_node_clear_table(dnode))
746 return_NULL;
165e4a11
AK
747
748 dnode->context = context;
f16aea9e 749 dnode->udev_flags = 0;
165e4a11
AK
750
751 return dnode;
752}
753
f16aea9e
PR
754struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
755 const char *name,
756 const char *uuid,
757 uint32_t major,
758 uint32_t minor,
759 int read_only,
760 int clear_inactive,
761 void *context,
762 uint16_t udev_flags)
763{
764 struct dm_tree_node *node;
765
766 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
767 clear_inactive, context)))
768 node->udev_flags = udev_flags;
769
770 return node;
771}
772
83c606ae
JEB
773void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
774
775{
776 struct dm_info *dinfo = &dnode->info;
777
778 if (udev_flags != dnode->udev_flags)
779 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
780 ") udev_flags from 0x%x to 0x%x",
781 dnode->name, dinfo->major, dinfo->minor,
782 dnode->udev_flags, udev_flags);
783 dnode->udev_flags = udev_flags;
784}
f16aea9e 785
52b84409
AK
786void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
787 uint32_t read_ahead,
788 uint32_t read_ahead_flags)
08e64ce5 789{
52b84409
AK
790 dnode->props.read_ahead = read_ahead;
791 dnode->props.read_ahead_flags = read_ahead_flags;
792}
793
76d1aec8
ZK
794void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
795 struct dm_tree_node *presuspend_node)
796{
797 node->presuspend_node = presuspend_node;
798}
799
b4f1578f 800int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
3d0480ed 801{
cda69e17
PR
802 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
803}
804
805int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
806 uint32_t minor, uint16_t udev_flags)
807{
808 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
3d0480ed
AK
809}
810
04bde319 811const char *dm_tree_node_get_name(const struct dm_tree_node *node)
3d0480ed
AK
812{
813 return node->info.exists ? node->name : "";
814}
815
04bde319 816const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
3d0480ed
AK
817{
818 return node->info.exists ? node->uuid : "";
819}
820
04bde319 821const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
3d0480ed
AK
822{
823 return &node->info;
824}
825
04bde319 826void *dm_tree_node_get_context(const struct dm_tree_node *node)
165e4a11
AK
827{
828 return node->context;
829}
830
04bde319 831int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
eb91c4ee
MB
832{
833 return dnode->props.size_changed;
834}
835
04bde319 836int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
3d0480ed
AK
837{
838 if (inverted) {
b4f1578f 839 if (_nodes_are_linked(&node->dtree->root, node))
3d0480ed 840 return 0;
2c44337b 841 return dm_list_size(&node->used_by);
3d0480ed
AK
842 }
843
b4f1578f 844 if (_nodes_are_linked(node, &node->dtree->root))
3d0480ed
AK
845 return 0;
846
2c44337b 847 return dm_list_size(&node->uses);
3d0480ed
AK
848}
849
2b69db1f
AK
850/*
851 * Returns 1 if no prefix supplied
852 */
853static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
854{
855 if (!uuid_prefix)
856 return 1;
857
858 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
859 return 1;
860
861 /* Handle transition: active device uuids might be missing the prefix */
862 if (uuid_prefix_len <= 4)
863 return 0;
864
87f98002 865 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
872dea04
AK
866 return 0;
867
87f98002 868 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
2b69db1f
AK
869 return 0;
870
87f98002 871 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
2b69db1f
AK
872 return 1;
873
874 return 0;
875}
876
690a5da2
AK
877/*
878 * Returns 1 if no children.
879 */
b4f1578f 880static int _children_suspended(struct dm_tree_node *node,
690a5da2
AK
881 uint32_t inverted,
882 const char *uuid_prefix,
883 size_t uuid_prefix_len)
884{
2c44337b 885 struct dm_list *list;
b4f1578f 886 struct dm_tree_link *dlink;
690a5da2
AK
887 const struct dm_info *dinfo;
888 const char *uuid;
889
890 if (inverted) {
b4f1578f 891 if (_nodes_are_linked(&node->dtree->root, node))
690a5da2
AK
892 return 1;
893 list = &node->used_by;
894 } else {
b4f1578f 895 if (_nodes_are_linked(node, &node->dtree->root))
690a5da2
AK
896 return 1;
897 list = &node->uses;
898 }
899
2c44337b 900 dm_list_iterate_items(dlink, list) {
b4f1578f 901 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
690a5da2
AK
902 stack;
903 continue;
904 }
905
906 /* Ignore if it doesn't belong to this VG */
2b69db1f 907 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2
AK
908 continue;
909
76d1aec8
ZK
910 /* Ignore if parent node wants to presuspend this node */
911 if (dlink->node->presuspend_node == node)
912 continue;
913
b4f1578f
AK
914 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
915 stack; /* FIXME Is this normal? */
690a5da2
AK
916 return 0;
917 }
918
919 if (!dinfo->suspended)
920 return 0;
921 }
922
923 return 1;
924}
925
3d0480ed
AK
926/*
927 * Set major and minor to zero for root of tree.
928 */
b4f1578f 929struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
3d0480ed
AK
930 uint32_t major,
931 uint32_t minor)
932{
933 if (!major && !minor)
b4f1578f 934 return &dtree->root;
3d0480ed 935
b4f1578f 936 return _find_dm_tree_node(dtree, major, minor);
3d0480ed
AK
937}
938
165e4a11
AK
939/*
940 * Set uuid to NULL for root of tree.
941 */
b4f1578f 942struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
943 const char *uuid)
944{
945 if (!uuid || !*uuid)
b4f1578f 946 return &dtree->root;
165e4a11 947
b4f1578f 948 return _find_dm_tree_node_by_uuid(dtree, uuid);
165e4a11
AK
949}
950
3d0480ed
AK
951/*
952 * First time set *handle to NULL.
953 * Set inverted to invert the tree.
954 */
b4f1578f 955struct dm_tree_node *dm_tree_next_child(void **handle,
04bde319
ZK
956 const struct dm_tree_node *parent,
957 uint32_t inverted)
3d0480ed 958{
2c44337b 959 struct dm_list **dlink = (struct dm_list **) handle;
04bde319 960 const struct dm_list *use_list;
3d0480ed
AK
961
962 if (inverted)
963 use_list = &parent->used_by;
964 else
965 use_list = &parent->uses;
966
967 if (!*dlink)
2c44337b 968 *dlink = dm_list_first(use_list);
3d0480ed 969 else
2c44337b 970 *dlink = dm_list_next(use_list, *dlink);
3d0480ed 971
2c44337b 972 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
3d0480ed
AK
973}
974
3e8c6b73 975/*
a6d97ede 976 * Deactivate a device with its dependencies if the uuid prefix matches.
3e8c6b73 977 */
db208f51
AK
978static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
979 struct dm_info *info)
3e8c6b73
AK
980{
981 struct dm_task *dmt;
982 int r;
983
984 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
985 log_error("_info_by_dev: dm_task creation failed");
986 return 0;
987 }
988
989 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
990 log_error("_info_by_dev: Failed to set device number");
991 dm_task_destroy(dmt);
992 return 0;
993 }
994
db208f51
AK
995 if (!with_open_count && !dm_task_no_open_count(dmt))
996 log_error("Failed to disable open_count");
997
3e8c6b73
AK
998 if ((r = dm_task_run(dmt)))
999 r = dm_task_get_info(dmt, info);
1000
1001 dm_task_destroy(dmt);
1002
1003 return r;
1004}
1005
4ce43894 1006static int _check_device_not_in_use(const char *name, struct dm_info *info)
125712be
PR
1007{
1008 if (!info->exists)
1009 return 1;
1010
1011 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
1012 if (!*dm_sysfs_dir()) {
1013 if (info->open_count) {
4ce43894
ZK
1014 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
1015 name, info->major, info->minor);
c3e5b497
PR
1016 return 0;
1017 }
1018
1019 return 1;
1020 }
125712be
PR
1021
1022 if (dm_device_has_holders(info->major, info->minor)) {
4ce43894
ZK
1023 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
1024 "by another device.", name, info->major, info->minor);
125712be
PR
1025 return 0;
1026 }
1027
1028 if (dm_device_has_mounted_fs(info->major, info->minor)) {
4ce43894
ZK
1029 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
1030 "a filesystem in use.", name, info->major, info->minor);
125712be
PR
1031 return 0;
1032 }
1033
1034 return 1;
1035}
1036
f3ef15ef
ZK
1037/* Check if all parent nodes of given node have open_count == 0 */
1038static int _node_has_closed_parents(struct dm_tree_node *node,
1039 const char *uuid_prefix,
1040 size_t uuid_prefix_len)
1041{
1042 struct dm_tree_link *dlink;
1043 const struct dm_info *dinfo;
1044 struct dm_info info;
1045 const char *uuid;
1046
1047 /* Iterate through parents of this node */
1048 dm_list_iterate_items(dlink, &node->used_by) {
1049 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1050 stack;
1051 continue;
1052 }
1053
1054 /* Ignore if it doesn't belong to this VG */
1055 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1056 continue;
1057
1058 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1059 stack; /* FIXME Is this normal? */
1060 return 0;
1061 }
1062
1063 /* Refresh open_count */
1064 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1065 !info.exists)
1066 continue;
1067
eb418883
ZK
1068 if (info.open_count) {
1069 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1070 dinfo->major, dinfo->minor, info.open_count);
f3ef15ef 1071 return 0;
eb418883 1072 }
f3ef15ef
ZK
1073 }
1074
1075 return 1;
1076}
1077
f16aea9e 1078static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
787200ef 1079 uint32_t *cookie, uint16_t udev_flags, int retry)
3e8c6b73
AK
1080{
1081 struct dm_task *dmt;
bd90c6b2 1082 int r = 0;
3e8c6b73
AK
1083
1084 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1085
1086 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1087 log_error("Deactivation dm_task creation failed for %s", name);
1088 return 0;
1089 }
1090
1091 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1092 log_error("Failed to set device number for %s deactivation", name);
bd90c6b2 1093 goto out;
3e8c6b73
AK
1094 }
1095
1096 if (!dm_task_no_open_count(dmt))
1097 log_error("Failed to disable open_count");
1098
f16aea9e 1099 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1100 goto out;
1101
787200ef
PR
1102
1103 if (retry)
1104 dm_task_retry_remove(dmt);
1105
3e8c6b73
AK
1106 r = dm_task_run(dmt);
1107
0437bccc
AK
1108 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1109 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
9032898e 1110 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
165e4a11 1111
db208f51
AK
1112 /* FIXME Remove node from tree or mark invalid? */
1113
bd90c6b2 1114out:
db208f51
AK
1115 dm_task_destroy(dmt);
1116
1117 return r;
1118}
1119
bd90c6b2 1120static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1121 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1122{
1123 struct dm_task *dmt;
1124 int r = 0;
1125
1126 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1127
1128 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1129 log_error("Rename dm_task creation failed for %s", old_name);
1130 return 0;
1131 }
1132
1133 if (!dm_task_set_name(dmt, old_name)) {
1134 log_error("Failed to set name for %s rename.", old_name);
1135 goto out;
1136 }
1137
b4f1578f 1138 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1139 goto_out;
165e4a11
AK
1140
1141 if (!dm_task_no_open_count(dmt))
1142 log_error("Failed to disable open_count");
1143
f16aea9e 1144 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1145 goto out;
1146
165e4a11
AK
1147 r = dm_task_run(dmt);
1148
1149out:
1150 dm_task_destroy(dmt);
1151
1152 return r;
1153}
1154
165e4a11
AK
1155/* FIXME Merge with _suspend_node? */
1156static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1157 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1158 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1159 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1160{
1161 struct dm_task *dmt;
bd90c6b2 1162 int r = 0;
165e4a11
AK
1163
1164 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1165
1166 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1167 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1168 return 0;
1169 }
1170
0b7d16bc
AK
1171 /* FIXME Kernel should fill in name on return instead */
1172 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1173 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1174 goto out;
0b7d16bc
AK
1175 }
1176
165e4a11
AK
1177 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1178 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1179 goto out;
165e4a11
AK
1180 }
1181
1182 if (!dm_task_no_open_count(dmt))
1183 log_error("Failed to disable open_count");
1184
52b84409
AK
1185 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1186 log_error("Failed to set read ahead");
1187
f16aea9e 1188 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1189 goto_out;
bd90c6b2 1190
9a8f192a
ZK
1191 if (!(r = dm_task_run(dmt)))
1192 goto_out;
1193
1194 if (already_suspended)
1195 dec_suspended();
1196
1197 if (!(r = dm_task_get_info(dmt, newinfo)))
1198 stack;
165e4a11 1199
bd90c6b2 1200out:
165e4a11
AK
1201 dm_task_destroy(dmt);
1202
1203 return r;
1204}
1205
db208f51 1206static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1207 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1208{
1209 struct dm_task *dmt;
1210 int r;
1211
b9ffd32c
AK
1212 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1213 name, major, minor,
1214 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1215 no_flush ? "" : " with device flush");
db208f51
AK
1216
1217 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1218 log_error("Suspend dm_task creation failed for %s", name);
1219 return 0;
1220 }
1221
1222 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1223 log_error("Failed to set device number for %s suspension.", name);
1224 dm_task_destroy(dmt);
1225 return 0;
1226 }
1227
1228 if (!dm_task_no_open_count(dmt))
1229 log_error("Failed to disable open_count");
1230
c55b1410
AK
1231 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1232 log_error("Failed to set skip_lockfs flag.");
1233
b9ffd32c
AK
1234 if (no_flush && !dm_task_no_flush(dmt))
1235 log_error("Failed to set no_flush flag.");
1236
1840aa09
AK
1237 if ((r = dm_task_run(dmt))) {
1238 inc_suspended();
db208f51 1239 r = dm_task_get_info(dmt, newinfo);
1840aa09 1240 }
db208f51 1241
3e8c6b73
AK
1242 dm_task_destroy(dmt);
1243
1244 return r;
1245}
1246
25e6ab87 1247static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1248{
1249 struct dm_task *dmt;
1250 int r = 0;
1251 uint64_t start, length;
1252 char *type = NULL;
1253 char *params = NULL;
e0ea24be 1254
25e6ab87
ZK
1255 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1256 return_0;
e0ea24be 1257
25e6ab87
ZK
1258 if (!dm_task_set_major(dmt, dnode->info.major) ||
1259 !dm_task_set_minor(dmt, dnode->info.minor)) {
1260 log_error("Failed to set major minor.");
1261 goto out;
e0ea24be
ZK
1262 }
1263
25e6ab87
ZK
1264 if (!dm_task_run(dmt))
1265 goto_out;
1266
1267 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1268
1269 if (type && (strcmp(type, "thin-pool") != 0)) {
c590a9cd 1270 log_error("Expected thin-pool target for %d:%d and got %s.",
25e6ab87 1271 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1272 goto out;
1273 }
1274
25e6ab87 1275 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
c590a9cd 1276 log_error("Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1277 goto out;
1278 }
1279
25e6ab87 1280 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1281
25e6ab87
ZK
1282 r = 1;
1283out:
1284 dm_task_destroy(dmt);
e0ea24be 1285
25e6ab87
ZK
1286 return r;
1287}
e0ea24be 1288
25e6ab87
ZK
1289static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1290{
1291 struct dm_task *dmt;
1292 struct dm_thin_message *m = &tm->message;
1293 char buf[64];
1294 int r;
e0ea24be 1295
25e6ab87
ZK
1296 switch (m->type) {
1297 case DM_THIN_MESSAGE_CREATE_SNAP:
1298 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1299 m->u.m_create_snap.device_id,
1300 m->u.m_create_snap.origin_id);
1301 break;
1302 case DM_THIN_MESSAGE_CREATE_THIN:
1303 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1304 m->u.m_create_thin.device_id);
1305 break;
1306 case DM_THIN_MESSAGE_DELETE:
1307 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1308 m->u.m_delete.device_id);
1309 break;
1310 case DM_THIN_MESSAGE_TRIM:
1311 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1312 m->u.m_trim.device_id,
1313 m->u.m_trim.new_size);
1314 break;
1315 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1316 r = dm_snprintf(buf, sizeof(buf),
1317 "set_transaction_id %" PRIu64 " %" PRIu64,
1318 m->u.m_set_transaction_id.current_id,
1319 m->u.m_set_transaction_id.new_id);
1320 break;
25de9add
ZK
1321 default:
1322 r = -1;
25e6ab87
ZK
1323 }
1324
25de9add 1325 if (r < 0) {
25e6ab87
ZK
1326 log_error("Failed to prepare message.");
1327 return 0;
1328 }
1329
1330 r = 0;
1331
1332 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1333 return_0;
1334
1335 if (!dm_task_set_major(dmt, dnode->info.major) ||
1336 !dm_task_set_minor(dmt, dnode->info.minor)) {
1337 log_error("Failed to set message major minor.");
1338 goto out;
1339 }
1340
1341 if (!dm_task_set_message(dmt, buf))
1342 goto_out;
1343
660a42bc
ZK
1344 /* Internal functionality of dm_task */
1345 dmt->expected_errno = tm->expected_errno;
1346
25e6ab87
ZK
1347 if (!dm_task_run(dmt))
1348 goto_out;
1349
1350 r = 1;
e0ea24be
ZK
1351out:
1352 dm_task_destroy(dmt);
1353
1354 return r;
1355}
1356
11f64f0a
ZK
1357static int _node_send_messages(struct dm_tree_node *dnode,
1358 const char *uuid_prefix,
1359 size_t uuid_prefix_len)
25e6ab87
ZK
1360{
1361 struct load_segment *seg;
1362 struct thin_message *tmsg;
11f64f0a 1363 uint64_t trans_id;
25e6ab87
ZK
1364 const char *uuid;
1365
bbcd37e4 1366 if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1367 return 1;
1368
1369 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1370 if (seg->type != SEG_THIN_POOL)
1371 return 1;
1372
1373 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1374 return_0;
1375
1376 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1377 log_debug("UUID \"%s\" does not match.", uuid);
1378 return 1;
1379 }
1380
11f64f0a 1381 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
bbcd37e4 1382 goto_bad;
25e6ab87 1383
bbcd37e4 1384 if (trans_id == seg->transaction_id)
25e6ab87
ZK
1385 return 1; /* In sync - skip messages */
1386
bbcd37e4 1387 if (trans_id != (seg->transaction_id - 1)) {
25e6ab87 1388 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
bbcd37e4
ZK
1389 trans_id, seg->transaction_id - 1);
1390 goto bad; /* Nothing to send */
25e6ab87
ZK
1391 }
1392
1393 dm_list_iterate_items(tmsg, &seg->thin_messages)
1394 if (!(_thin_pool_node_message(dnode, tmsg)))
bbcd37e4 1395 goto_bad;
25e6ab87
ZK
1396
1397 return 1;
bbcd37e4
ZK
1398bad:
1399 /* Try to deactivate */
1400 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
1401 log_error("Failed to deactivate %s", dnode->name);
1402
1403 return 0;
25e6ab87
ZK
1404}
1405
18e0f934
AK
1406/*
1407 * FIXME Don't attempt to deactivate known internal dependencies.
1408 */
1409static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1410 const char *uuid_prefix,
1411 size_t uuid_prefix_len,
1412 unsigned level)
3e8c6b73 1413{
b7eb2ad0 1414 int r = 1;
3e8c6b73 1415 void *handle = NULL;
b4f1578f 1416 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1417 struct dm_info info;
1418 const struct dm_info *dinfo;
1419 const char *name;
1420 const char *uuid;
1421
b4f1578f
AK
1422 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1423 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1424 stack;
1425 continue;
1426 }
1427
b4f1578f 1428 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1429 stack;
1430 continue;
1431 }
1432
b4f1578f 1433 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1434 stack;
1435 continue;
1436 }
1437
1438 /* Ignore if it doesn't belong to this VG */
2b69db1f 1439 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1440 continue;
3e8c6b73
AK
1441
1442 /* Refresh open_count */
db208f51 1443 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
f55021f4 1444 !info.exists)
3e8c6b73
AK
1445 continue;
1446
4ce43894
ZK
1447 if (info.open_count) {
1448 /* Skip internal non-toplevel opened nodes */
1449 if (level)
1450 continue;
1451
1452 /* When retry is not allowed, error */
1453 if (!child->dtree->retry_remove) {
1454 log_error("Unable to deactivate open %s (%" PRIu32
1455 ":%" PRIu32 ")", name, info.major, info.minor);
1456 r = 0;
1457 continue;
1458 }
1459
1460 /* Check toplevel node for holders/mounted fs */
1461 if (!_check_device_not_in_use(name, &info)) {
1462 stack;
1463 r = 0;
1464 continue;
1465 }
1466 /* Go on with retry */
1467 }
125712be 1468
f3ef15ef 1469 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1470 if ((child->presuspend_node &&
f3ef15ef
ZK
1471 !_node_has_closed_parents(child->presuspend_node,
1472 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1473 /* Only report error from (likely non-internal) dependency at top level */
1474 if (!level) {
1475 log_error("Unable to deactivate open %s (%" PRIu32
1476 ":%" PRIu32 ")", name, info.major,
1477 info.minor);
1478 r = 0;
1479 }
f55021f4
AK
1480 continue;
1481 }
1482
76d1aec8
ZK
1483 /* Suspend child node first if requested */
1484 if (child->presuspend_node &&
1485 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1486 continue;
1487
f16aea9e 1488 if (!_deactivate_node(name, info.major, info.minor,
787200ef 1489 &child->dtree->cookie, child->udev_flags,
4ce43894 1490 (level == 0) ? child->dtree->retry_remove : 0)) {
3e8c6b73
AK
1491 log_error("Unable to deactivate %s (%" PRIu32
1492 ":%" PRIu32 ")", name, info.major,
1493 info.minor);
b7eb2ad0 1494 r = 0;
3e8c6b73 1495 continue;
f4249251
AK
1496 } else if (info.suspended)
1497 dec_suspended();
3e8c6b73 1498
18e0f934
AK
1499 if (dm_tree_node_num_children(child, 0)) {
1500 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1501 return_0;
18e0f934 1502 }
3e8c6b73
AK
1503 }
1504
b7eb2ad0 1505 return r;
3e8c6b73 1506}
db208f51 1507
18e0f934
AK
1508int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1509 const char *uuid_prefix,
1510 size_t uuid_prefix_len)
1511{
1512 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1513}
1514
c55b1410
AK
1515void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1516{
1517 dnode->dtree->skip_lockfs = 1;
1518}
1519
b9ffd32c
AK
1520void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1521{
1522 dnode->dtree->no_flush = 1;
1523}
1524
787200ef
PR
1525void dm_tree_retry_remove(struct dm_tree_node *dnode)
1526{
1527 dnode->dtree->retry_remove = 1;
1528}
1529
b4f1578f 1530int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1531 const char *uuid_prefix,
1532 size_t uuid_prefix_len)
db208f51 1533{
68085c93 1534 int r = 1;
db208f51 1535 void *handle = NULL;
b4f1578f 1536 struct dm_tree_node *child = dnode;
db208f51
AK
1537 struct dm_info info, newinfo;
1538 const struct dm_info *dinfo;
1539 const char *name;
1540 const char *uuid;
1541
690a5da2 1542 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1543 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1544 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1545 stack;
1546 continue;
1547 }
1548
b4f1578f 1549 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1550 stack;
1551 continue;
1552 }
1553
b4f1578f 1554 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1555 stack;
1556 continue;
1557 }
1558
1559 /* Ignore if it doesn't belong to this VG */
2b69db1f 1560 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1561 continue;
1562
690a5da2
AK
1563 /* Ensure immediate parents are already suspended */
1564 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1565 continue;
1566
db208f51 1567 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
b700541f 1568 !info.exists || info.suspended)
db208f51
AK
1569 continue;
1570
c55b1410 1571 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1572 child->dtree->skip_lockfs,
1573 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1574 log_error("Unable to suspend %s (%" PRIu32
1575 ":%" PRIu32 ")", name, info.major,
1576 info.minor);
68085c93 1577 r = 0;
db208f51
AK
1578 continue;
1579 }
1580
1581 /* Update cached info */
1582 child->info = newinfo;
690a5da2
AK
1583 }
1584
1585 /* Then suspend any child nodes */
1586 handle = NULL;
1587
b4f1578f
AK
1588 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1589 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1590 stack;
1591 continue;
1592 }
1593
1594 /* Ignore if it doesn't belong to this VG */
87f98002 1595 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1596 continue;
db208f51 1597
b4f1578f 1598 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1599 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1600 return_0;
db208f51
AK
1601 }
1602
68085c93 1603 return r;
db208f51
AK
1604}
1605
b4f1578f 1606int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1607 const char *uuid_prefix,
1608 size_t uuid_prefix_len)
1609{
2ca6b865 1610 int r = 1;
db208f51 1611 void *handle = NULL;
b4f1578f 1612 struct dm_tree_node *child = dnode;
165e4a11
AK
1613 struct dm_info newinfo;
1614 const char *name;
db208f51 1615 const char *uuid;
56c28292 1616 int priority;
db208f51 1617
165e4a11 1618 /* Activate children first */
b4f1578f
AK
1619 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1620 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1621 stack;
1622 continue;
db208f51
AK
1623 }
1624
908db078
AK
1625 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1626 continue;
db208f51 1627
b4f1578f 1628 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1629 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1630 return_0;
56c28292 1631 }
165e4a11 1632
56c28292 1633 handle = NULL;
165e4a11 1634
aa6f4e51 1635 for (priority = 0; priority < 3; priority++) {
56c28292 1636 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1637 if (priority != child->activation_priority)
1638 continue;
1639
56c28292
AK
1640 if (!(uuid = dm_tree_node_get_uuid(child))) {
1641 stack;
1642 continue;
165e4a11 1643 }
165e4a11 1644
56c28292
AK
1645 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1646 continue;
165e4a11 1647
56c28292
AK
1648 if (!(name = dm_tree_node_get_name(child))) {
1649 stack;
1650 continue;
1651 }
1652
1653 /* Rename? */
1654 if (child->props.new_name) {
bd90c6b2 1655 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1656 child->info.minor, &child->dtree->cookie,
1657 child->udev_flags)) {
56c28292
AK
1658 log_error("Failed to rename %s (%" PRIu32
1659 ":%" PRIu32 ") to %s", name, child->info.major,
1660 child->info.minor, child->props.new_name);
1661 return 0;
1662 }
1663 child->name = child->props.new_name;
1664 child->props.new_name = NULL;
1665 }
1666
1667 if (!child->info.inactive_table && !child->info.suspended)
1668 continue;
1669
bafa2f39 1670 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1671 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1672 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1673 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1674 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1675 child->info.minor);
2ca6b865 1676 r = 0;
56c28292
AK
1677 continue;
1678 }
1679
1680 /* Update cached info */
1681 child->info = newinfo;
1682 }
db208f51
AK
1683 }
1684
165e4a11
AK
1685 handle = NULL;
1686
2ca6b865 1687 return r;
165e4a11
AK
1688}
1689
b4f1578f 1690static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1691{
1692 int r = 0;
1693 struct dm_task *dmt;
1694
1695 log_verbose("Creating %s", dnode->name);
1696
1697 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1698 log_error("Create dm_task creation failed for %s", dnode->name);
1699 return 0;
1700 }
1701
1702 if (!dm_task_set_name(dmt, dnode->name)) {
1703 log_error("Failed to set device name for %s", dnode->name);
1704 goto out;
1705 }
1706
1707 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1708 log_error("Failed to set uuid for %s", dnode->name);
1709 goto out;
1710 }
1711
1712 if (dnode->props.major &&
1713 (!dm_task_set_major(dmt, dnode->props.major) ||
1714 !dm_task_set_minor(dmt, dnode->props.minor))) {
1715 log_error("Failed to set device number for %s creation.", dnode->name);
1716 goto out;
1717 }
1718
1719 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1720 log_error("Failed to set read only flag for %s", dnode->name);
1721 goto out;
1722 }
1723
1724 if (!dm_task_no_open_count(dmt))
1725 log_error("Failed to disable open_count");
1726
1727 if ((r = dm_task_run(dmt)))
1728 r = dm_task_get_info(dmt, &dnode->info);
1729
1730out:
1731 dm_task_destroy(dmt);
1732
1733 return r;
1734}
1735
1736
b4f1578f 1737static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1738{
1739 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1740 log_error("Failed to format %s device number for %s as dm "
1741 "target (%u,%u)",
1742 node->name, node->uuid, node->info.major, node->info.minor);
1743 return 0;
165e4a11
AK
1744 }
1745
1746 return 1;
1747}
1748
ffa9b6a5
ZK
1749/* simplify string emiting code */
1750#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1751do {\
1752 int w;\
1753 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1754 stack; /* Out of space */\
1755 return -1;\
1756 }\
1757 p += w;\
1758} while (0)
ffa9b6a5 1759
3c74075f
JEB
1760/*
1761 * _emit_areas_line
1762 *
1763 * Returns: 1 on success, 0 on failure
1764 */
08f1ddea 1765static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1766 struct load_segment *seg, char *params,
1767 size_t paramsize, int *pos)
165e4a11
AK
1768{
1769 struct seg_area *area;
7d7d93ac 1770 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1771 unsigned first_time = 1;
db3c1ac1 1772 const char *logtype, *synctype;
b262f3e1 1773 unsigned log_parm_count;
165e4a11 1774
2c44337b 1775 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1776 switch (seg->type) {
1777 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1778 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1779 return_0;
1780
b262f3e1
ZK
1781 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1782 if (first_time)
1783 EMIT_PARAMS(*pos, " nolog 0");
1784 else {
1785 /* Remote devices */
1786 log_parm_count = (area->flags &
1787 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1788
1789 if (!area->slog) {
1790 devbuf[0] = 0; /* Only core log parameters */
1791 logtype = "core";
1792 } else {
1793 devbuf[0] = ' '; /* Extra space before device name */
1794 if (!_build_dev_string(devbuf + 1,
1795 sizeof(devbuf) - 1,
1796 area->slog))
1797 return_0;
1798 logtype = "disk";
1799 log_parm_count++; /* Extra sync log device name parameter */
1800 }
1801
1802 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1803 log_parm_count, devbuf, area->region_size);
1804
db3c1ac1
AK
1805 synctype = (area->flags & DM_NOSYNC) ?
1806 " nosync" : (area->flags & DM_FORCESYNC) ?
1807 " sync" : NULL;
b262f3e1 1808
db3c1ac1
AK
1809 if (synctype)
1810 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1811 }
1812 break;
cac52ca4
JEB
1813 case SEG_RAID1:
1814 case SEG_RAID4:
1815 case SEG_RAID5_LA:
1816 case SEG_RAID5_RA:
1817 case SEG_RAID5_LS:
1818 case SEG_RAID5_RS:
1819 case SEG_RAID6_ZR:
1820 case SEG_RAID6_NR:
1821 case SEG_RAID6_NC:
6d04311e
JEB
1822 if (!area->dev_node) {
1823 EMIT_PARAMS(*pos, " -");
1824 break;
1825 }
1826 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1827 return_0;
1828
cac52ca4
JEB
1829 EMIT_PARAMS(*pos, " %s", devbuf);
1830 break;
b262f3e1 1831 default:
6d04311e
JEB
1832 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1833 return_0;
1834
b262f3e1
ZK
1835 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1836 devbuf, area->offset);
1837 }
609faae9
AK
1838
1839 first_time = 0;
165e4a11
AK
1840 }
1841
1842 return 1;
1843}
1844
b262f3e1
ZK
1845static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1846 size_t paramsize, int *pos)
1847{
1848 const struct load_segment *rlog_seg;
1849 struct replicator_site *rsite;
1850 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1851 unsigned parm_count;
1852
1853 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1854 return_0;
1855
1856 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1857 struct load_segment);
1858
1859 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1860 seg->rlog_type, rlogbuf, rlog_seg->size);
1861
1862 dm_list_iterate_items(rsite, &seg->rsites) {
1863 parm_count = (rsite->fall_behind_data
1864 || rsite->fall_behind_ios
1865 || rsite->async_timeout) ? 4 : 2;
1866
1867 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1868 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1869
1870 if (rsite->fall_behind_data)
1871 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1872 else if (rsite->fall_behind_ios)
1873 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1874 else if (rsite->async_timeout)
1875 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1876 }
1877
1878 return 1;
1879}
1880
3c74075f 1881/*
3c74075f
JEB
1882 * Returns: 1 on success, 0 on failure
1883 */
beecb1e1
ZK
1884static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1885 char *params, size_t paramsize)
165e4a11 1886{
8f26e18c
JEB
1887 int block_on_error = 0;
1888 int handle_errors = 0;
1889 int dm_log_userspace = 0;
1890 struct utsname uts;
dbcb64b8 1891 unsigned log_parm_count;
b39fdcf4 1892 int pos = 0, parts;
7d7d93ac 1893 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1894 const char *logtype;
b39fdcf4 1895 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1896
b39fdcf4
MB
1897 if (uname(&uts) == -1) {
1898 log_error("Cannot read kernel release version.");
1899 return 0;
1900 }
1901
1902 /* Kernels with a major number of 2 always had 3 parts. */
1903 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1904 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1905 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1906 return 0;
1907 }
67b25ed4 1908
8f26e18c
JEB
1909 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1910 /*
1911 * Originally, block_on_error was an argument to the log
1912 * portion of the mirror CTR table. It was renamed to
1913 * "handle_errors" and now resides in the 'features'
1914 * section of the mirror CTR table (i.e. at the end).
1915 *
1916 * We can identify whether to use "block_on_error" or
1917 * "handle_errors" by the dm-mirror module's version
1918 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1919 */
ba61f848 1920 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
1921 handle_errors = 1;
1922 else
1923 block_on_error = 1;
1924 }
1925
1926 if (seg->clustered) {
1927 /* Cluster mirrors require a UUID */
1928 if (!seg->uuid)
1929 return_0;
1930
1931 /*
1932 * Cluster mirrors used to have their own log
1933 * types. Now they are accessed through the
1934 * userspace log type.
1935 *
1936 * The dm-log-userspace module was added to the
1937 * 2.6.31 kernel.
1938 */
ba61f848 1939 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
1940 dm_log_userspace = 1;
1941 }
1942
1943 /* Region size */
1944 log_parm_count = 1;
1945
1946 /* [no]sync, block_on_error etc. */
1947 log_parm_count += hweight32(seg->flags);
311d6d81 1948
8f26e18c
JEB
1949 /* "handle_errors" is a feature arg now */
1950 if (handle_errors)
1951 log_parm_count--;
1952
1953 /* DM_CORELOG does not count in the param list */
1954 if (seg->flags & DM_CORELOG)
1955 log_parm_count--;
1956
1957 if (seg->clustered) {
1958 log_parm_count++; /* For UUID */
1959
1960 if (!dm_log_userspace)
ffa9b6a5 1961 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
1962 else
1963 /* For clustered-* type field inserted later */
1964 log_parm_count++;
8f26e18c 1965 }
dbcb64b8 1966
8f26e18c
JEB
1967 if (!seg->log)
1968 logtype = "core";
1969 else {
1970 logtype = "disk";
1971 log_parm_count++;
1972 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1973 return_0;
1974 }
dbcb64b8 1975
8f26e18c
JEB
1976 if (dm_log_userspace)
1977 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1978 log_parm_count, seg->uuid, logtype);
1979 else
ffa9b6a5 1980 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 1981
8f26e18c
JEB
1982 if (seg->log)
1983 EMIT_PARAMS(pos, " %s", logbuf);
1984
1985 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 1986
8f26e18c
JEB
1987 if (seg->clustered && !dm_log_userspace)
1988 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 1989
8f26e18c
JEB
1990 if ((seg->flags & DM_NOSYNC))
1991 EMIT_PARAMS(pos, " nosync");
1992 else if ((seg->flags & DM_FORCESYNC))
1993 EMIT_PARAMS(pos, " sync");
dbcb64b8 1994
8f26e18c
JEB
1995 if (block_on_error)
1996 EMIT_PARAMS(pos, " block_on_error");
1997
1998 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1999
5f3325fc 2000 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 2001 return_0;
dbcb64b8 2002
8f26e18c
JEB
2003 if (handle_errors)
2004 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 2005
3c74075f 2006 return 1;
8f26e18c
JEB
2007}
2008
cac52ca4
JEB
2009static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
2010 uint32_t minor, struct load_segment *seg,
2011 uint64_t *seg_start, char *params,
2012 size_t paramsize)
2013{
ad2432dc 2014 uint32_t i;
cac52ca4
JEB
2015 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
2016 int pos = 0;
2017
2018 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
2019 param_count++;
2020
2021 if (seg->region_size)
2022 param_count += 2;
2023
ad2432dc
MB
2024 /* rebuilds is 64-bit */
2025 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
2026 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 2027
cac52ca4
JEB
2028 if ((seg->type == SEG_RAID1) && seg->stripe_size)
2029 log_error("WARNING: Ignoring RAID1 stripe size");
2030
2031 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
2032 param_count, seg->stripe_size);
2033
2034 if (seg->flags & DM_NOSYNC)
2035 EMIT_PARAMS(pos, " nosync");
2036 else if (seg->flags & DM_FORCESYNC)
2037 EMIT_PARAMS(pos, " sync");
2038
2039 if (seg->region_size)
2040 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2041
f439e65b
JEB
2042 for (i = 0; i < (seg->area_count / 2); i++)
2043 if (seg->rebuilds & (1 << i))
2044 EMIT_PARAMS(pos, " rebuild %u", i);
2045
cac52ca4
JEB
2046 /* Print number of metadata/data device pairs */
2047 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2048
2049 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2050 return_0;
2051
2052 return 1;
2053}
2054
8f26e18c
JEB
2055static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2056 uint32_t minor, struct load_segment *seg,
2057 uint64_t *seg_start, char *params,
2058 size_t paramsize)
2059{
2060 int pos = 0;
2061 int r;
cac52ca4 2062 int target_type_is_raid = 0;
8f26e18c 2063 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2064 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2065
8f26e18c
JEB
2066 switch(seg->type) {
2067 case SEG_ERROR:
2068 case SEG_ZERO:
2069 case SEG_LINEAR:
2070 break;
2071 case SEG_MIRRORED:
2072 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2073 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2074 if (!r)
2075 return_0;
165e4a11 2076 break;
b262f3e1
ZK
2077 case SEG_REPLICATOR:
2078 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2079 &pos)) <= 0) {
2080 stack;
2081 return r;
2082 }
2083 break;
2084 case SEG_REPLICATOR_DEV:
2085 if (!seg->replicator || !_build_dev_string(originbuf,
2086 sizeof(originbuf),
2087 seg->replicator))
2088 return_0;
2089
2090 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2091 break;
165e4a11 2092 case SEG_SNAPSHOT:
aa6f4e51 2093 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2094 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2095 return_0;
2096 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2097 return_0;
ffa9b6a5
ZK
2098 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2099 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2100 break;
2101 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2102 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2103 return_0;
ffa9b6a5 2104 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2105 break;
2106 case SEG_STRIPED:
609faae9 2107 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2108 break;
12ca060e 2109 case SEG_CRYPT:
609faae9 2110 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2111 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2112 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2113 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2114 seg->iv_offset : *seg_start);
2115 break;
cac52ca4
JEB
2116 case SEG_RAID1:
2117 case SEG_RAID4:
2118 case SEG_RAID5_LA:
2119 case SEG_RAID5_RA:
2120 case SEG_RAID5_LS:
2121 case SEG_RAID5_RS:
2122 case SEG_RAID6_ZR:
2123 case SEG_RAID6_NR:
2124 case SEG_RAID6_NC:
2125 target_type_is_raid = 1;
2126 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2127 params, paramsize);
2128 if (!r)
2129 return_0;
2130
2131 break;
4251236e
ZK
2132 case SEG_THIN_POOL:
2133 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2134 return_0;
2135 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2136 return_0;
2137 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2138 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2139 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2140 break;
2141 case SEG_THIN:
2142 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2143 return_0;
2144 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2145 break;
165e4a11
AK
2146 }
2147
2148 switch(seg->type) {
2149 case SEG_ERROR:
b262f3e1 2150 case SEG_REPLICATOR:
165e4a11
AK
2151 case SEG_SNAPSHOT:
2152 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2153 case SEG_SNAPSHOT_MERGE:
165e4a11 2154 case SEG_ZERO:
4251236e
ZK
2155 case SEG_THIN_POOL:
2156 case SEG_THIN:
165e4a11 2157 break;
12ca060e 2158 case SEG_CRYPT:
165e4a11 2159 case SEG_LINEAR:
b262f3e1 2160 case SEG_REPLICATOR_DEV:
165e4a11
AK
2161 case SEG_STRIPED:
2162 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2163 stack;
2164 return r;
2165 }
b6793963
AK
2166 if (!params[0]) {
2167 log_error("No parameters supplied for %s target "
2168 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2169 major, minor);
b6793963
AK
2170 return 0;
2171 }
165e4a11
AK
2172 break;
2173 }
2174
4b2cae46
AK
2175 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2176 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2177 *seg_start, seg->size, target_type_is_raid ? "raid" :
2178 dm_segtypes[seg->type].target, params);
165e4a11 2179
cac52ca4
JEB
2180 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2181 target_type_is_raid ? "raid" :
2182 dm_segtypes[seg->type].target, params))
b4f1578f 2183 return_0;
165e4a11
AK
2184
2185 *seg_start += seg->size;
2186
2187 return 1;
2188}
2189
ffa9b6a5
ZK
2190#undef EMIT_PARAMS
2191
4b2cae46
AK
2192static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2193 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2194{
2195 char *params;
2196 size_t paramsize = 4096;
2197 int ret;
2198
2199 do {
2200 if (!(params = dm_malloc(paramsize))) {
2201 log_error("Insufficient space for target parameters.");
2202 return 0;
2203 }
2204
12ea7cb1 2205 params[0] = '\0';
4b2cae46
AK
2206 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2207 params, paramsize);
165e4a11
AK
2208 dm_free(params);
2209
2210 if (!ret)
2211 stack;
2212
2213 if (ret >= 0)
2214 return ret;
2215
2216 log_debug("Insufficient space in params[%" PRIsize_t
2217 "] for target parameters.", paramsize);
2218
2219 paramsize *= 2;
2220 } while (paramsize < MAX_TARGET_PARAMSIZE);
2221
2222 log_error("Target parameter size too big. Aborting.");
2223 return 0;
2224}
2225
b4f1578f 2226static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2227{
2228 int r = 0;
2229 struct dm_task *dmt;
2230 struct load_segment *seg;
df390f17 2231 uint64_t seg_start = 0, existing_table_size;
165e4a11 2232
4b2cae46
AK
2233 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2234 dnode->info.major, dnode->info.minor);
165e4a11
AK
2235
2236 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2237 log_error("Reload dm_task creation failed for %s", dnode->name);
2238 return 0;
2239 }
2240
2241 if (!dm_task_set_major(dmt, dnode->info.major) ||
2242 !dm_task_set_minor(dmt, dnode->info.minor)) {
2243 log_error("Failed to set device number for %s reload.", dnode->name);
2244 goto out;
2245 }
2246
2247 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2248 log_error("Failed to set read only flag for %s", dnode->name);
2249 goto out;
2250 }
2251
2252 if (!dm_task_no_open_count(dmt))
2253 log_error("Failed to disable open_count");
2254
2c44337b 2255 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2256 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2257 seg, &seg_start))
b4f1578f 2258 goto_out;
165e4a11 2259
ec289b64
AK
2260 if (!dm_task_suppress_identical_reload(dmt))
2261 log_error("Failed to suppress reload of identical tables.");
2262
2263 if ((r = dm_task_run(dmt))) {
165e4a11 2264 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2265 if (r && !dnode->info.inactive_table)
2266 log_verbose("Suppressed %s identical table reload.",
2267 dnode->name);
bb875bb9 2268
df390f17 2269 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2270 if ((dnode->props.size_changed =
df390f17 2271 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2272 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2273 PRIu64 " for %s", existing_table_size,
bb875bb9 2274 seg_start, dnode->name);
df390f17
AK
2275 /*
2276 * Kernel usually skips size validation on zero-length devices
2277 * now so no need to preload them.
2278 */
2279 /* FIXME In which kernel version did this begin? */
2280 if (!existing_table_size && dnode->props.delay_resume_if_new)
2281 dnode->props.size_changed = 0;
2282 }
ec289b64 2283 }
165e4a11
AK
2284
2285 dnode->props.segment_count = 0;
2286
2287out:
2288 dm_task_destroy(dmt);
2289
2290 return r;
165e4a11
AK
2291}
2292
b4f1578f 2293int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2294 const char *uuid_prefix,
2295 size_t uuid_prefix_len)
165e4a11 2296{
2ca6b865 2297 int r = 1;
165e4a11 2298 void *handle = NULL;
b4f1578f 2299 struct dm_tree_node *child;
165e4a11 2300 struct dm_info newinfo;
566515c0 2301 int update_devs_flag = 0;
165e4a11
AK
2302
2303 /* Preload children first */
b4f1578f 2304 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2305 /* Skip existing non-device-mapper devices */
2306 if (!child->info.exists && child->info.major)
2307 continue;
2308
2309 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2310 if (child->info.exists &&
2311 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2312 continue;
2313
b4f1578f 2314 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2315 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2316 return_0;
165e4a11 2317
165e4a11 2318 /* FIXME Cope if name exists with no uuid? */
3d6782b3
ZK
2319 if (!child->info.exists && !_create_node(child))
2320 return_0;
165e4a11 2321
3d6782b3
ZK
2322 if (!child->info.inactive_table &&
2323 child->props.segment_count &&
2324 !_load_node(child))
2325 return_0;
165e4a11 2326
eb91c4ee
MB
2327 /* Propagate device size change change */
2328 if (child->props.size_changed)
2329 dnode->props.size_changed = 1;
2330
bb875bb9 2331 /* Resume device immediately if it has parents and its size changed */
3776c494 2332 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2333 continue;
2334
7707ea90
AK
2335 if (!child->info.inactive_table && !child->info.suspended)
2336 continue;
2337
fc795d87 2338 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2339 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2340 &newinfo, &child->dtree->cookie, child->udev_flags,
2341 child->info.suspended)) {
165e4a11 2342 log_error("Unable to resume %s (%" PRIu32
fc795d87 2343 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2344 child->info.minor);
2ca6b865 2345 r = 0;
165e4a11
AK
2346 continue;
2347 }
2348
2349 /* Update cached info */
2350 child->info = newinfo;
bbcd37e4
ZK
2351 if (child->props.send_messages &&
2352 !(r = _node_send_messages(child, uuid_prefix, uuid_prefix_len))) {
2353 stack;
2354 continue;
2355 }
566515c0
PR
2356 /*
2357 * Prepare for immediate synchronization with udev and flush all stacked
2358 * dev node operations if requested by immediate_dev_node property. But
2359 * finish processing current level in the tree first.
2360 */
2361 if (child->props.immediate_dev_node)
2362 update_devs_flag = 1;
165e4a11
AK
2363 }
2364
bbcd37e4
ZK
2365 if (r && dnode->props.send_messages &&
2366 !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
2367 stack;
165e4a11 2368
566515c0
PR
2369 if (update_devs_flag) {
2370 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2371 stack;
2372 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2373 }
2374
11f64f0a 2375 if (r && !_node_send_messages(dnode, uuid_prefix, uuid_prefix_len)) {
25e6ab87
ZK
2376 stack;
2377 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
2378 log_error("Failed to deactivate %s", dnode->name);
2379 r = 0;
2380 }
2381
2ca6b865 2382 return r;
165e4a11
AK
2383}
2384
165e4a11
AK
2385/*
2386 * Returns 1 if unsure.
2387 */
b4f1578f 2388int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2389 const char *uuid_prefix,
2390 size_t uuid_prefix_len)
2391{
2392 void *handle = NULL;
b4f1578f 2393 struct dm_tree_node *child = dnode;
165e4a11
AK
2394 const char *uuid;
2395
b4f1578f
AK
2396 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2397 if (!(uuid = dm_tree_node_get_uuid(child))) {
2398 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2399 return 1;
2400 }
2401
87f98002 2402 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2403 return 1;
2404
b4f1578f
AK
2405 if (dm_tree_node_num_children(child, 0))
2406 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2407 }
2408
2409 return 0;
2410}
2411
2412/*
2413 * Target functions
2414 */
b4f1578f 2415static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2416{
2417 struct load_segment *seg;
2418
b4f1578f
AK
2419 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2420 log_error("dtree node segment allocation failed");
165e4a11
AK
2421 return NULL;
2422 }
2423
2424 seg->type = type;
2425 seg->size = size;
2426 seg->area_count = 0;
2c44337b 2427 dm_list_init(&seg->areas);
165e4a11
AK
2428 seg->stripe_size = 0;
2429 seg->persistent = 0;
2430 seg->chunk_size = 0;
2431 seg->cow = NULL;
2432 seg->origin = NULL;
aa6f4e51 2433 seg->merge = NULL;
165e4a11 2434
2c44337b 2435 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2436 dnode->props.segment_count++;
2437
2438 return seg;
2439}
2440
b4f1578f 2441int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2442 uint64_t size,
2443 const char *origin_uuid)
165e4a11
AK
2444{
2445 struct load_segment *seg;
b4f1578f 2446 struct dm_tree_node *origin_node;
165e4a11 2447
b4f1578f
AK
2448 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2449 return_0;
165e4a11 2450
b4f1578f 2451 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2452 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2453 return 0;
2454 }
2455
2456 seg->origin = origin_node;
b4f1578f
AK
2457 if (!_link_tree_nodes(dnode, origin_node))
2458 return_0;
165e4a11 2459
56c28292
AK
2460 /* Resume snapshot origins after new snapshots */
2461 dnode->activation_priority = 1;
2462
165e4a11
AK
2463 return 1;
2464}
2465
aa6f4e51
MS
2466static int _add_snapshot_target(struct dm_tree_node *node,
2467 uint64_t size,
2468 const char *origin_uuid,
2469 const char *cow_uuid,
2470 const char *merge_uuid,
2471 int persistent,
2472 uint32_t chunk_size)
165e4a11
AK
2473{
2474 struct load_segment *seg;
aa6f4e51
MS
2475 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2476 unsigned seg_type;
2477
2478 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2479
aa6f4e51 2480 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2481 return_0;
165e4a11 2482
b4f1578f 2483 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2484 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2485 return 0;
2486 }
2487
2488 seg->origin = origin_node;
b4f1578f
AK
2489 if (!_link_tree_nodes(node, origin_node))
2490 return_0;
165e4a11 2491
b4f1578f 2492 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2493 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2494 return 0;
2495 }
2496
2497 seg->cow = cow_node;
b4f1578f
AK
2498 if (!_link_tree_nodes(node, cow_node))
2499 return_0;
165e4a11
AK
2500
2501 seg->persistent = persistent ? 1 : 0;
2502 seg->chunk_size = chunk_size;
2503
aa6f4e51
MS
2504 if (merge_uuid) {
2505 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2506 /* not a pure error, merging snapshot may have been deactivated */
2507 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2508 } else {
2509 seg->merge = merge_node;
2510 /* must not link merging snapshot, would undermine activation_priority below */
2511 }
2512
2513 /* Resume snapshot-merge (acting origin) after other snapshots */
2514 node->activation_priority = 1;
2515 if (seg->merge) {
2516 /* Resume merging snapshot after snapshot-merge */
2517 seg->merge->activation_priority = 2;
2518 }
2519 }
2520
165e4a11
AK
2521 return 1;
2522}
2523
aa6f4e51
MS
2524
2525int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2526 uint64_t size,
2527 const char *origin_uuid,
2528 const char *cow_uuid,
2529 int persistent,
2530 uint32_t chunk_size)
2531{
2532 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2533 NULL, persistent, chunk_size);
2534}
2535
2536int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2537 uint64_t size,
2538 const char *origin_uuid,
2539 const char *cow_uuid,
2540 const char *merge_uuid,
2541 uint32_t chunk_size)
2542{
2543 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2544 merge_uuid, 1, chunk_size);
2545}
2546
b4f1578f 2547int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2548 uint64_t size)
165e4a11 2549{
b4f1578f
AK
2550 if (!_add_segment(node, SEG_ERROR, size))
2551 return_0;
165e4a11
AK
2552
2553 return 1;
2554}
2555
b4f1578f 2556int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2557 uint64_t size)
165e4a11 2558{
b4f1578f
AK
2559 if (!_add_segment(node, SEG_ZERO, size))
2560 return_0;
165e4a11
AK
2561
2562 return 1;
2563}
2564
b4f1578f 2565int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2566 uint64_t size)
165e4a11 2567{
b4f1578f
AK
2568 if (!_add_segment(node, SEG_LINEAR, size))
2569 return_0;
165e4a11
AK
2570
2571 return 1;
2572}
2573
b4f1578f 2574int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2575 uint64_t size,
2576 uint32_t stripe_size)
165e4a11
AK
2577{
2578 struct load_segment *seg;
2579
b4f1578f
AK
2580 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2581 return_0;
165e4a11
AK
2582
2583 seg->stripe_size = stripe_size;
2584
2585 return 1;
2586}
2587
12ca060e
MB
2588int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2589 uint64_t size,
2590 const char *cipher,
2591 const char *chainmode,
2592 const char *iv,
2593 uint64_t iv_offset,
2594 const char *key)
2595{
2596 struct load_segment *seg;
2597
2598 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2599 return_0;
2600
2601 seg->cipher = cipher;
2602 seg->chainmode = chainmode;
2603 seg->iv = iv;
2604 seg->iv_offset = iv_offset;
2605 seg->key = key;
2606
2607 return 1;
2608}
2609
b4f1578f 2610int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2611 uint32_t region_size,
08e64ce5 2612 unsigned clustered,
165e4a11 2613 const char *log_uuid,
ce7ed2c0
AK
2614 unsigned area_count,
2615 uint32_t flags)
165e4a11 2616{
908db078 2617 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2618 struct load_segment *seg;
2619
2620 if (!node->props.segment_count) {
b8175c33 2621 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2622 return 0;
2623 }
2624
2c44337b 2625 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2626
24b026e3 2627 if (log_uuid) {
67b25ed4
AK
2628 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2629 log_error("log uuid pool_strdup failed");
2630 return 0;
2631 }
df390f17
AK
2632 if ((flags & DM_CORELOG))
2633 /* For pvmove: immediate resume (for size validation) isn't needed. */
2634 node->props.delay_resume_if_new = 1;
2635 else {
9723090c
AK
2636 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2637 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2638 return 0;
2639 }
2640
566515c0
PR
2641 if (clustered)
2642 log_node->props.immediate_dev_node = 1;
2643
0a99713e
AK
2644 /* The kernel validates the size of disk logs. */
2645 /* FIXME Propagate to any devices below */
2646 log_node->props.delay_resume_if_new = 0;
2647
9723090c
AK
2648 if (!_link_tree_nodes(node, log_node))
2649 return_0;
2650 }
165e4a11
AK
2651 }
2652
2653 seg->log = log_node;
165e4a11
AK
2654 seg->region_size = region_size;
2655 seg->clustered = clustered;
2656 seg->mirror_area_count = area_count;
dbcb64b8 2657 seg->flags = flags;
165e4a11
AK
2658
2659 return 1;
2660}
2661
b4f1578f 2662int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2663 uint64_t size)
165e4a11 2664{
cbecd3cd 2665 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2666 return_0;
165e4a11
AK
2667
2668 return 1;
2669}
2670
cac52ca4
JEB
2671int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2672 uint64_t size,
2673 const char *raid_type,
2674 uint32_t region_size,
2675 uint32_t stripe_size,
f439e65b 2676 uint64_t rebuilds,
cac52ca4
JEB
2677 uint64_t reserved2)
2678{
2679 int i;
2680 struct load_segment *seg = NULL;
2681
2682 for (i = 0; dm_segtypes[i].target && !seg; i++)
2683 if (!strcmp(raid_type, dm_segtypes[i].target))
2684 if (!(seg = _add_segment(node,
2685 dm_segtypes[i].type, size)))
2686 return_0;
2687
b2fa9b43
JEB
2688 if (!seg)
2689 return_0;
2690
cac52ca4
JEB
2691 seg->region_size = region_size;
2692 seg->stripe_size = stripe_size;
2693 seg->area_count = 0;
f439e65b 2694 seg->rebuilds = rebuilds;
cac52ca4
JEB
2695
2696 return 1;
2697}
2698
b262f3e1
ZK
2699int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2700 uint64_t size,
2701 const char *rlog_uuid,
2702 const char *rlog_type,
2703 unsigned rsite_index,
2704 dm_replicator_mode_t mode,
2705 uint32_t async_timeout,
2706 uint64_t fall_behind_data,
2707 uint32_t fall_behind_ios)
2708{
2709 struct load_segment *rseg;
2710 struct replicator_site *rsite;
2711
2712 /* Local site0 - adds replicator segment and links rlog device */
2713 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2714 if (node->props.segment_count) {
2715 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2716 return 0;
2717 }
2718
2719 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2720 return_0;
2721
2722 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2723 log_error("Missing replicator log uuid %s.", rlog_uuid);
2724 return 0;
2725 }
2726
2727 if (!_link_tree_nodes(node, rseg->log))
2728 return_0;
2729
2730 if (strcmp(rlog_type, "ringbuffer") != 0) {
2731 log_error("Unsupported replicator log type %s.", rlog_type);
2732 return 0;
2733 }
2734
2735 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2736 return_0;
2737
2738 dm_list_init(&rseg->rsites);
2739 rseg->rdevice_count = 0;
2740 node->activation_priority = 1;
2741 }
2742
2743 /* Add site to segment */
2744 if (mode == DM_REPLICATOR_SYNC
2745 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2746 log_error("Async parameters passed for synchronnous replicator.");
2747 return 0;
2748 }
2749
2750 if (node->props.segment_count != 1) {
2751 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2752 return 0;
2753 }
2754
2755 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2756 if (rseg->type != SEG_REPLICATOR) {
2757 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2758 dm_segtypes[rseg->type].target);
2759 return 0;
2760 }
2761
2762 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2763 log_error("Failed to allocate remote site segment.");
2764 return 0;
2765 }
2766
2767 dm_list_add(&rseg->rsites, &rsite->list);
2768 rseg->rsite_count++;
2769
2770 rsite->mode = mode;
2771 rsite->async_timeout = async_timeout;
2772 rsite->fall_behind_data = fall_behind_data;
2773 rsite->fall_behind_ios = fall_behind_ios;
2774 rsite->rsite_index = rsite_index;
2775
2776 return 1;
2777}
2778
2779/* Appends device node to Replicator */
2780int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2781 uint64_t size,
2782 const char *replicator_uuid,
2783 uint64_t rdevice_index,
2784 const char *rdev_uuid,
2785 unsigned rsite_index,
2786 const char *slog_uuid,
2787 uint32_t slog_flags,
2788 uint32_t slog_region_size)
2789{
2790 struct seg_area *area;
2791 struct load_segment *rseg;
2792 struct load_segment *rep_seg;
2793
2794 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2795 /* Site index for local target */
2796 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2797 return_0;
2798
2799 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2800 log_error("Missing replicator uuid %s.", replicator_uuid);
2801 return 0;
2802 }
2803
2804 /* Local slink0 for replicator must be always initialized first */
2805 if (rseg->replicator->props.segment_count != 1) {
2806 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2807 return 0;
2808 }
2809
2810 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2811 if (rep_seg->type != SEG_REPLICATOR) {
2812 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2813 dm_segtypes[rep_seg->type].target);
2814 return 0;
2815 }
2816 rep_seg->rdevice_count++;
2817
2818 if (!_link_tree_nodes(node, rseg->replicator))
2819 return_0;
2820
2821 rseg->rdevice_index = rdevice_index;
2822 } else {
2823 /* Local slink0 for replicator must be always initialized first */
2824 if (node->props.segment_count != 1) {
2825 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2826 return 0;
2827 }
2828
2829 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2830 if (rseg->type != SEG_REPLICATOR_DEV) {
2831 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2832 dm_segtypes[rseg->type].target);
2833 return 0;
2834 }
2835 }
2836
2837 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2838 log_error("Unspecified sync log uuid.");
2839 return 0;
2840 }
2841
2842 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2843 return_0;
2844
2845 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2846
2847 if (!(slog_flags & DM_CORELOG)) {
2848 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2849 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2850 return 0;
2851 }
2852
2853 if (!_link_tree_nodes(node, area->slog))
2854 return_0;
2855 }
2856
2857 area->flags = slog_flags;
2858 area->region_size = slog_region_size;
2859 area->rsite_index = rsite_index;
2860
2861 return 1;
2862}
2863
5668fe04
ZK
2864static int _thin_validate_device_id(uint32_t device_id)
2865{
2866 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2867 log_error("Device id %u is higher then %u.",
2868 device_id, DM_THIN_MAX_DEVICE_ID);
2869 return 0;
2870 }
2871
2872 return 1;
2873}
2874
4251236e
ZK
2875int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2876 uint64_t size,
e0ea24be 2877 uint64_t transaction_id,
4251236e 2878 const char *metadata_uuid,
5668fd6a 2879 const char *pool_uuid,
4251236e 2880 uint32_t data_block_size,
e9156c2b 2881 uint64_t low_water_mark,
460c5991 2882 unsigned skip_block_zeroing)
4251236e
ZK
2883{
2884 struct load_segment *seg;
2885
3f53c059 2886 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2887 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2888 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2889 return 0;
2890 }
2891
3f53c059 2892 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2893 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2894 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2895 return 0;
2896 }
2897
2898 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2899 return_0;
2900
2901 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2902 log_error("Missing metadata uuid %s.", metadata_uuid);
2903 return 0;
2904 }
2905
2906 if (!_link_tree_nodes(node, seg->metadata))
2907 return_0;
2908
2909 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2910 log_error("Missing pool uuid %s.", pool_uuid);
2911 return 0;
2912 }
2913
2914 if (!_link_tree_nodes(node, seg->pool))
2915 return_0;
2916
bbcd37e4
ZK
2917 node->props.send_messages = 1;
2918 seg->transaction_id = transaction_id;
e9156c2b 2919 seg->low_water_mark = low_water_mark;
e0ea24be 2920 seg->data_block_size = data_block_size;
460c5991 2921 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
2922 dm_list_init(&seg->thin_messages);
2923
2924 return 1;
2925}
2926
2927int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
2e732e96
ZK
2928 dm_thin_message_t type,
2929 uint64_t id1, uint64_t id2)
25e6ab87
ZK
2930{
2931 struct load_segment *seg;
2932 struct thin_message *tm;
2933
2934 if (node->props.segment_count != 1) {
759b9592 2935 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
2936 return 0;
2937 }
2938
2939 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 2940 if (seg->type != SEG_THIN_POOL) {
759b9592 2941 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
2942 dm_segtypes[seg->type].target);
2943 return 0;
2944 }
2945
2946 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
2947 log_error("Failed to allocate thin message.");
2948 return 0;
2949 }
2950
2e732e96 2951 switch (type) {
25e6ab87 2952 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 2953 /* If the thin origin is active, it must be suspend first! */
2e732e96 2954 if (id1 == id2) {
759b9592 2955 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
2956 return 0;
2957 }
2e732e96
ZK
2958 if (!_thin_validate_device_id(id1) ||
2959 !_thin_validate_device_id(id2))
25e6ab87 2960 return_0;
2e732e96
ZK
2961 tm->message.u.m_create_snap.device_id = id1;
2962 tm->message.u.m_create_snap.origin_id = id2;
25e6ab87
ZK
2963 break;
2964 case DM_THIN_MESSAGE_CREATE_THIN:
2e732e96 2965 if (!_thin_validate_device_id(id1))
25e6ab87 2966 return_0;
2e732e96 2967 tm->message.u.m_create_thin.device_id = id1;
660a42bc 2968 tm->expected_errno = EEXIST;
25e6ab87
ZK
2969 break;
2970 case DM_THIN_MESSAGE_DELETE:
2e732e96 2971 if (!_thin_validate_device_id(id1))
25e6ab87 2972 return_0;
2e732e96 2973 tm->message.u.m_delete.device_id = id1;
660a42bc 2974 tm->expected_errno = ENODATA;
25e6ab87
ZK
2975 break;
2976 case DM_THIN_MESSAGE_TRIM:
2e732e96 2977 if (!_thin_validate_device_id(id1))
25e6ab87 2978 return_0;
2e732e96
ZK
2979 tm->message.u.m_trim.device_id = id1;
2980 tm->message.u.m_trim.new_size = id2;
25e6ab87
ZK
2981 break;
2982 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
2e732e96
ZK
2983 if ((id1 + 1) != id2) {
2984 log_error("New transaction id must be sequential.");
2985 return 0; /* FIXME: Maybe too strict here? */
2986 }
2987 if (id1 != seg->transaction_id) {
2988 log_error("Current transaction id is different from thin pool.");
25e6ab87
ZK
2989 return 0; /* FIXME: Maybe too strict here? */
2990 }
2e732e96
ZK
2991 tm->message.u.m_set_transaction_id.current_id = id1;
2992 tm->message.u.m_set_transaction_id.new_id = id2;
25e6ab87
ZK
2993 break;
2994 default:
2e732e96 2995 log_error("Unsupported message type %d.", (int) type);
25e6ab87
ZK
2996 return 0;
2997 }
2998
2e732e96 2999 tm->message.type = type;
25e6ab87 3000 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
3001
3002 return 1;
3003}
3004
3005int dm_tree_node_add_thin_target(struct dm_tree_node *node,
3006 uint64_t size,
4d25c81b 3007 const char *pool_uuid,
4251236e
ZK
3008 uint32_t device_id)
3009{
4d25c81b 3010 struct dm_tree_node *pool;
4251236e
ZK
3011 struct load_segment *seg;
3012
4d25c81b
ZK
3013 if (!(pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
3014 log_error("Missing thin pool uuid %s.", pool_uuid);
4251236e
ZK
3015 return 0;
3016 }
3017
4d25c81b 3018 if (!_link_tree_nodes(node, pool))
4251236e
ZK
3019 return_0;
3020
4d25c81b
ZK
3021 if (device_id == DM_THIN_ERROR_DEVICE_ID) {
3022 if (!dm_tree_node_add_error_target(node, size))
3023 return_0;
3024 } else {
3025 if (!_thin_validate_device_id(device_id))
3026 return_0;
3027
3028 if (!(seg = _add_segment(node, SEG_THIN, size)))
3029 return_0;
3030
3031 seg->pool = pool;
3032 seg->device_id = device_id;
3033 }
1419bf1c 3034
4251236e
ZK
3035 return 1;
3036}
3037
b4f1578f 3038static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
3039{
3040 struct seg_area *area;
3041
b4f1578f 3042 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
3043 log_error("Failed to allocate target segment area.");
3044 return 0;
3045 }
3046
3047 area->dev_node = dev_node;
3048 area->offset = offset;
3049
2c44337b 3050 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
3051 seg->area_count++;
3052
3053 return 1;
3054}
3055
b4f1578f 3056int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
3057 const char *dev_name,
3058 const char *uuid,
3059 uint64_t offset)
165e4a11
AK
3060{
3061 struct load_segment *seg;
3062 struct stat info;
b4f1578f 3063 struct dm_tree_node *dev_node;
165e4a11
AK
3064
3065 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3066 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3067 return 0;
3068 }
3069
3070 if (uuid) {
b4f1578f 3071 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3072 log_error("Couldn't find area uuid %s.", uuid);
3073 return 0;
3074 }
b4f1578f
AK
3075 if (!_link_tree_nodes(node, dev_node))
3076 return_0;
165e4a11 3077 } else {
6d04311e 3078 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3079 log_error("Device %s not found.", dev_name);
3080 return 0;
3081 }
3082
40e5fd8b 3083 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3084 log_error("Device %s is not a block device.", dev_name);
3085 return 0;
3086 }
3087
3088 /* FIXME Check correct macro use */
cda69e17
PR
3089 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3090 MINOR(info.st_rdev), 0)))
b4f1578f 3091 return_0;
165e4a11
AK
3092 }
3093
3094 if (!node->props.segment_count) {
b8175c33 3095 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3096 return 0;
3097 }
3098
2c44337b 3099 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3100
b4f1578f
AK
3101 if (!_add_area(node, seg, dev_node, offset))
3102 return_0;
165e4a11
AK
3103
3104 return 1;
db208f51 3105}
bd90c6b2 3106
6d04311e
JEB
3107int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3108{
3109 struct load_segment *seg;
3110
3111 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3112
415c0690
AK
3113 switch (seg->type) {
3114 case SEG_RAID1:
3115 case SEG_RAID4:
3116 case SEG_RAID5_LA:
3117 case SEG_RAID5_RA:
3118 case SEG_RAID5_LS:
3119 case SEG_RAID5_RS:
3120 case SEG_RAID6_ZR:
3121 case SEG_RAID6_NR:
3122 case SEG_RAID6_NC:
3123 break;
3124 default:
3125 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3126 return 0;
3127 }
3128
6d04311e
JEB
3129 if (!_add_area(node, seg, NULL, offset))
3130 return_0;
3131
3132 return 1;
3133}
3134
bd90c6b2
AK
3135void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
3136{
3137 node->dtree->cookie = cookie;
3138}
3139
3140uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
3141{
3142 return node->dtree->cookie;
3143}
This page took 0.493795 seconds and 5 git commands to generate.