]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Caller is still entitled to reference an LV that's unlinked, so don't
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
b262f3e1
ZK
27#define REPLICATOR_LOCAL_SITE 0
28
165e4a11
AK
29/* Supported segment types */
30enum {
12ca060e
MB
31 SEG_CRYPT,
32 SEG_ERROR,
165e4a11
AK
33 SEG_LINEAR,
34 SEG_MIRRORED,
b262f3e1
ZK
35 SEG_REPLICATOR,
36 SEG_REPLICATOR_DEV,
165e4a11
AK
37 SEG_SNAPSHOT,
38 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 39 SEG_SNAPSHOT_MERGE,
165e4a11
AK
40 SEG_STRIPED,
41 SEG_ZERO,
4251236e
ZK
42 SEG_THIN_POOL,
43 SEG_THIN,
cac52ca4
JEB
44 SEG_RAID1,
45 SEG_RAID4,
46 SEG_RAID5_LA,
47 SEG_RAID5_RA,
48 SEG_RAID5_LS,
49 SEG_RAID5_RS,
50 SEG_RAID6_ZR,
51 SEG_RAID6_NR,
52 SEG_RAID6_NC,
53 SEG_LAST,
165e4a11 54};
b4f1578f 55
165e4a11
AK
56/* FIXME Add crypt and multipath support */
57
58struct {
59 unsigned type;
60 const char *target;
61} dm_segtypes[] = {
12ca060e 62 { SEG_CRYPT, "crypt" },
165e4a11
AK
63 { SEG_ERROR, "error" },
64 { SEG_LINEAR, "linear" },
65 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
66 { SEG_REPLICATOR, "replicator" },
67 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
68 { SEG_SNAPSHOT, "snapshot" },
69 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 70 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
71 { SEG_STRIPED, "striped" },
72 { SEG_ZERO, "zero"},
4251236e
ZK
73 { SEG_THIN_POOL, "thin-pool"},
74 { SEG_THIN, "thin"},
cac52ca4
JEB
75 { SEG_RAID1, "raid1"},
76 { SEG_RAID4, "raid4"},
77 { SEG_RAID5_LA, "raid5_la"},
78 { SEG_RAID5_RA, "raid5_ra"},
79 { SEG_RAID5_LS, "raid5_ls"},
80 { SEG_RAID5_RS, "raid5_rs"},
81 { SEG_RAID6_ZR, "raid6_zr"},
82 { SEG_RAID6_NR, "raid6_nr"},
83 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
84
85 /*
86 *WARNING: Since 'raid' target overloads this 1:1 mapping table
87 * for search do not add new enum elements past them!
88 */
cac52ca4
JEB
89 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
90 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
91 { SEG_LAST, NULL },
165e4a11
AK
92};
93
94/* Some segment types have a list of areas of other devices attached */
95struct seg_area {
2c44337b 96 struct dm_list list;
165e4a11 97
b4f1578f 98 struct dm_tree_node *dev_node;
165e4a11
AK
99
100 uint64_t offset;
b262f3e1
ZK
101
102 unsigned rsite_index; /* Replicator site index */
103 struct dm_tree_node *slog; /* Replicator sync log node */
104 uint64_t region_size; /* Replicator sync log size */
105 uint32_t flags; /* Replicator sync log flags */
106};
107
2e732e96
ZK
108struct dm_thin_message {
109 dm_thin_message_t type;
110 union {
111 struct {
112 uint32_t device_id;
113 uint32_t origin_id;
114 } m_create_snap;
115 struct {
116 uint32_t device_id;
117 } m_create_thin;
118 struct {
119 uint32_t device_id;
120 } m_delete;
121 struct {
122 uint64_t current_id;
123 uint64_t new_id;
124 } m_set_transaction_id;
125 struct {
126 uint32_t device_id;
127 uint64_t new_size;
128 } m_trim;
129 } u;
130};
131
25e6ab87
ZK
132struct thin_message {
133 struct dm_list list;
134 struct dm_thin_message message;
660a42bc 135 int expected_errno;
25e6ab87
ZK
136};
137
b262f3e1
ZK
138/* Replicator-log has a list of sites */
139/* FIXME: maybe move to seg_area too? */
140struct replicator_site {
141 struct dm_list list;
142
143 unsigned rsite_index;
144 dm_replicator_mode_t mode;
145 uint32_t async_timeout;
146 uint32_t fall_behind_ios;
147 uint64_t fall_behind_data;
165e4a11
AK
148};
149
150/* Per-segment properties */
151struct load_segment {
2c44337b 152 struct dm_list list;
165e4a11
AK
153
154 unsigned type;
155
156 uint64_t size;
157
b262f3e1
ZK
158 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
159 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 160
cac52ca4 161 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
162
163 int persistent; /* Snapshot */
164 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
165 struct dm_tree_node *cow; /* Snapshot */
166 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 167 struct dm_tree_node *merge; /* Snapshot */
165e4a11 168
b262f3e1 169 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 170 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
171 unsigned clustered; /* Mirror */
172 unsigned mirror_area_count; /* Mirror */
dbcb64b8 173 uint32_t flags; /* Mirror log */
67b25ed4 174 char *uuid; /* Clustered mirror log */
12ca060e
MB
175
176 const char *cipher; /* Crypt */
177 const char *chainmode; /* Crypt */
178 const char *iv; /* Crypt */
179 uint64_t iv_offset; /* Crypt */
180 const char *key; /* Crypt */
b262f3e1
ZK
181
182 const char *rlog_type; /* Replicator */
183 struct dm_list rsites; /* Replicator */
184 unsigned rsite_count; /* Replicator */
185 unsigned rdevice_count; /* Replicator */
186 struct dm_tree_node *replicator;/* Replicator-dev */
187 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 188
40e5fd8b 189 uint64_t rebuilds; /* raid */
4251236e
ZK
190
191 struct dm_tree_node *metadata; /* Thin_pool */
192 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 193 struct dm_list thin_messages; /* Thin_pool */
bbcd37e4 194 uint64_t transaction_id; /* Thin_pool */
e9156c2b 195 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 196 uint32_t data_block_size; /* Thin_pool */
460c5991 197 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
198 uint32_t device_id; /* Thin */
199
165e4a11
AK
200};
201
202/* Per-device properties */
203struct load_properties {
204 int read_only;
205 uint32_t major;
206 uint32_t minor;
207
52b84409
AK
208 uint32_t read_ahead;
209 uint32_t read_ahead_flags;
210
165e4a11 211 unsigned segment_count;
bb875bb9 212 unsigned size_changed;
2c44337b 213 struct dm_list segs;
165e4a11
AK
214
215 const char *new_name;
566515c0
PR
216
217 /* If immediate_dev_node is set to 1, try to create the dev node
218 * as soon as possible (e.g. in preload stage even during traversal
219 * and processing of dm tree). This will also flush all stacked dev
220 * node operations, synchronizing with udev.
221 */
df390f17
AK
222 unsigned immediate_dev_node;
223
224 /*
225 * If the device size changed from zero and this is set,
226 * don't resume the device immediately, even if the device
227 * has parents. This works provided the parents do not
228 * validate the device size and is required by pvmove to
229 * avoid starting the mirror resync operation too early.
230 */
231 unsigned delay_resume_if_new;
bbcd37e4
ZK
232
233 /* Send messages for this node in preload */
234 unsigned send_messages;
165e4a11
AK
235};
236
237/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 238struct dm_tree_link {
2c44337b 239 struct dm_list list;
b4f1578f 240 struct dm_tree_node *node;
165e4a11
AK
241};
242
b4f1578f
AK
243struct dm_tree_node {
244 struct dm_tree *dtree;
3d0480ed 245
40e5fd8b
AK
246 const char *name;
247 const char *uuid;
248 struct dm_info info;
3d0480ed 249
40e5fd8b
AK
250 struct dm_list uses; /* Nodes this node uses */
251 struct dm_list used_by; /* Nodes that use this node */
165e4a11 252
56c28292
AK
253 int activation_priority; /* 0 gets activated first */
254
f16aea9e
PR
255 uint16_t udev_flags; /* Udev control flags */
256
165e4a11
AK
257 void *context; /* External supplied context */
258
259 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
260
261 /*
262 * If presuspend of child node is needed
263 * Note: only direct child is allowed
264 */
265 struct dm_tree_node *presuspend_node;
3d0480ed
AK
266};
267
b4f1578f 268struct dm_tree {
a3f6b2ce
AK
269 struct dm_pool *mem;
270 struct dm_hash_table *devs;
165e4a11 271 struct dm_hash_table *uuids;
b4f1578f 272 struct dm_tree_node root;
c55b1410 273 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
274 int no_flush; /* 1 sets noflush (mirrors/multipath) */
275 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 276 uint32_t cookie;
3d0480ed
AK
277};
278
5c9eae96
AK
279/*
280 * Tree functions.
281 */
b4f1578f 282struct dm_tree *dm_tree_create(void)
3d0480ed 283{
0395dd22 284 struct dm_pool *dmem;
b4f1578f 285 struct dm_tree *dtree;
3d0480ed 286
0395dd22
ZK
287 if (!(dmem = dm_pool_create("dtree", 1024)) ||
288 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
289 log_error("Failed to allocate dtree.");
290 if (dmem)
291 dm_pool_destroy(dmem);
3d0480ed
AK
292 return NULL;
293 }
294
b4f1578f 295 dtree->root.dtree = dtree;
2c44337b
AK
296 dm_list_init(&dtree->root.uses);
297 dm_list_init(&dtree->root.used_by);
c55b1410 298 dtree->skip_lockfs = 0;
b9ffd32c 299 dtree->no_flush = 0;
0395dd22 300 dtree->mem = dmem;
3d0480ed 301
b4f1578f
AK
302 if (!(dtree->devs = dm_hash_create(8))) {
303 log_error("dtree hash creation failed");
304 dm_pool_destroy(dtree->mem);
3d0480ed
AK
305 return NULL;
306 }
307
b4f1578f
AK
308 if (!(dtree->uuids = dm_hash_create(32))) {
309 log_error("dtree uuid hash creation failed");
310 dm_hash_destroy(dtree->devs);
311 dm_pool_destroy(dtree->mem);
165e4a11
AK
312 return NULL;
313 }
314
b4f1578f 315 return dtree;
3d0480ed
AK
316}
317
b4f1578f 318void dm_tree_free(struct dm_tree *dtree)
3d0480ed 319{
b4f1578f 320 if (!dtree)
3d0480ed
AK
321 return;
322
b4f1578f
AK
323 dm_hash_destroy(dtree->uuids);
324 dm_hash_destroy(dtree->devs);
325 dm_pool_destroy(dtree->mem);
3d0480ed
AK
326}
327
5c9eae96
AK
328void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
329{
330 node->dtree->cookie = cookie;
331}
332
333uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
334{
335 return node->dtree->cookie;
336}
337
338void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
339{
340 dnode->dtree->skip_lockfs = 1;
341}
342
343void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
344{
345 dnode->dtree->no_flush = 1;
346}
347
348void dm_tree_retry_remove(struct dm_tree_node *dnode)
349{
350 dnode->dtree->retry_remove = 1;
351}
352
353/*
354 * Node functions.
355 */
04bde319
ZK
356static int _nodes_are_linked(const struct dm_tree_node *parent,
357 const struct dm_tree_node *child)
3d0480ed 358{
b4f1578f 359 struct dm_tree_link *dlink;
3d0480ed 360
2c44337b 361 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
362 if (dlink->node == child)
363 return 1;
3d0480ed
AK
364
365 return 0;
366}
367
2c44337b 368static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 369{
b4f1578f 370 struct dm_tree_link *dlink;
3d0480ed 371
b4f1578f
AK
372 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
373 log_error("dtree link allocation failed");
3d0480ed
AK
374 return 0;
375 }
376
377 dlink->node = node;
2c44337b 378 dm_list_add(list, &dlink->list);
3d0480ed
AK
379
380 return 1;
381}
382
b4f1578f
AK
383static int _link_nodes(struct dm_tree_node *parent,
384 struct dm_tree_node *child)
3d0480ed
AK
385{
386 if (_nodes_are_linked(parent, child))
387 return 1;
388
389 if (!_link(&parent->uses, child))
390 return 0;
391
392 if (!_link(&child->used_by, parent))
393 return 0;
394
395 return 1;
396}
397
2c44337b 398static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 399{
b4f1578f 400 struct dm_tree_link *dlink;
3d0480ed 401
2c44337b 402 dm_list_iterate_items(dlink, list)
3d0480ed 403 if (dlink->node == node) {
2c44337b 404 dm_list_del(&dlink->list);
3d0480ed
AK
405 break;
406 }
3d0480ed
AK
407}
408
b4f1578f
AK
409static void _unlink_nodes(struct dm_tree_node *parent,
410 struct dm_tree_node *child)
3d0480ed
AK
411{
412 if (!_nodes_are_linked(parent, child))
413 return;
414
415 _unlink(&parent->uses, child);
416 _unlink(&child->used_by, parent);
417}
418
b4f1578f 419static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 420{
b4f1578f 421 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
422}
423
b4f1578f 424static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 425{
b1ebf028 426 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
427}
428
b4f1578f 429static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 430{
b4f1578f 431 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
432}
433
b4f1578f 434static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 435{
b1ebf028 436 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
437}
438
b4f1578f 439static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
440{
441 /* Don't link to root node if child already has a parent */
f77736ca 442 if (parent == &parent->dtree->root) {
b4f1578f 443 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
444 return 1;
445 } else
446 _remove_from_toplevel(child);
447
f77736ca 448 if (child == &child->dtree->root) {
b4f1578f 449 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
450 return 1;
451 } else
452 _remove_from_bottomlevel(parent);
453
454 return _link_nodes(parent, child);
455}
456
b4f1578f 457static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
458 const char *name,
459 const char *uuid,
165e4a11 460 struct dm_info *info,
f16aea9e
PR
461 void *context,
462 uint16_t udev_flags)
3d0480ed 463{
b4f1578f 464 struct dm_tree_node *node;
3d0480ed
AK
465 uint64_t dev;
466
b4f1578f
AK
467 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
468 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
469 return NULL;
470 }
471
b4f1578f 472 node->dtree = dtree;
3d0480ed
AK
473
474 node->name = name;
475 node->uuid = uuid;
476 node->info = *info;
165e4a11 477 node->context = context;
f16aea9e 478 node->udev_flags = udev_flags;
56c28292 479 node->activation_priority = 0;
3d0480ed 480
2c44337b
AK
481 dm_list_init(&node->uses);
482 dm_list_init(&node->used_by);
483 dm_list_init(&node->props.segs);
3d0480ed
AK
484
485 dev = MKDEV(info->major, info->minor);
486
b4f1578f 487 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 488 sizeof(dev), node)) {
b4f1578f
AK
489 log_error("dtree node hash insertion failed");
490 dm_pool_free(dtree->mem, node);
3d0480ed
AK
491 return NULL;
492 }
493
165e4a11 494 if (uuid && *uuid &&
b4f1578f
AK
495 !dm_hash_insert(dtree->uuids, uuid, node)) {
496 log_error("dtree uuid hash insertion failed");
497 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 498 sizeof(dev));
b4f1578f 499 dm_pool_free(dtree->mem, node);
165e4a11
AK
500 return NULL;
501 }
502
3d0480ed
AK
503 return node;
504}
505
b4f1578f 506static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
507 uint32_t major, uint32_t minor)
508{
509 uint64_t dev = MKDEV(major, minor);
510
b4f1578f 511 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
512 sizeof(dev));
513}
514
b4f1578f 515static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
516 const char *uuid)
517{
87f98002 518 struct dm_tree_node *node;
2e5ff5d1
AK
519 const char *default_uuid_prefix;
520 size_t default_uuid_prefix_len;
87f98002
AK
521
522 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
523 return node;
524
2e5ff5d1
AK
525 default_uuid_prefix = dm_uuid_prefix();
526 default_uuid_prefix_len = strlen(default_uuid_prefix);
527
528 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
87f98002
AK
529 return NULL;
530
2e5ff5d1 531 return dm_hash_lookup(dtree->uuids, uuid + default_uuid_prefix_len);
165e4a11
AK
532}
533
5c9eae96
AK
534void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
535
536{
537 struct dm_info *dinfo = &dnode->info;
538
539 if (udev_flags != dnode->udev_flags)
540 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
541 ") udev_flags from 0x%x to 0x%x",
542 dnode->name, dinfo->major, dinfo->minor,
543 dnode->udev_flags, udev_flags);
544 dnode->udev_flags = udev_flags;
545}
546
547void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
548 uint32_t read_ahead,
549 uint32_t read_ahead_flags)
550{
551 dnode->props.read_ahead = read_ahead;
552 dnode->props.read_ahead_flags = read_ahead_flags;
553}
554
555void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
556 struct dm_tree_node *presuspend_node)
557{
558 node->presuspend_node = presuspend_node;
559}
560
561const char *dm_tree_node_get_name(const struct dm_tree_node *node)
562{
563 return node->info.exists ? node->name : "";
564}
565
566const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
567{
568 return node->info.exists ? node->uuid : "";
569}
570
571const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
572{
573 return &node->info;
574}
575
576void *dm_tree_node_get_context(const struct dm_tree_node *node)
577{
578 return node->context;
579}
580
581int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
582{
583 return dnode->props.size_changed;
584}
585
586int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
587{
588 if (inverted) {
589 if (_nodes_are_linked(&node->dtree->root, node))
590 return 0;
591 return dm_list_size(&node->used_by);
592 }
593
594 if (_nodes_are_linked(node, &node->dtree->root))
595 return 0;
596
597 return dm_list_size(&node->uses);
598}
599
600/*
601 * Returns 1 if no prefix supplied
602 */
603static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
604{
605 const char *default_uuid_prefix = dm_uuid_prefix();
606 size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
607
608 if (!uuid_prefix)
609 return 1;
610
611 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
612 return 1;
613
614 /* Handle transition: active device uuids might be missing the prefix */
615 if (uuid_prefix_len <= 4)
616 return 0;
617
618 if (!strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
619 return 0;
620
621 if (strncmp(uuid_prefix, default_uuid_prefix, default_uuid_prefix_len))
622 return 0;
623
624 if (!strncmp(uuid, uuid_prefix + default_uuid_prefix_len, uuid_prefix_len - default_uuid_prefix_len))
625 return 1;
626
627 return 0;
628}
629
630/*
631 * Returns 1 if no children.
632 */
633static int _children_suspended(struct dm_tree_node *node,
634 uint32_t inverted,
635 const char *uuid_prefix,
636 size_t uuid_prefix_len)
637{
638 struct dm_list *list;
639 struct dm_tree_link *dlink;
640 const struct dm_info *dinfo;
641 const char *uuid;
642
643 if (inverted) {
644 if (_nodes_are_linked(&node->dtree->root, node))
645 return 1;
646 list = &node->used_by;
647 } else {
648 if (_nodes_are_linked(node, &node->dtree->root))
649 return 1;
650 list = &node->uses;
651 }
652
653 dm_list_iterate_items(dlink, list) {
654 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
655 stack;
656 continue;
657 }
658
659 /* Ignore if it doesn't belong to this VG */
660 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
661 continue;
662
663 /* Ignore if parent node wants to presuspend this node */
664 if (dlink->node->presuspend_node == node)
665 continue;
666
667 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
668 stack; /* FIXME Is this normal? */
669 return 0;
670 }
671
672 if (!dinfo->suspended)
673 return 0;
674 }
675
676 return 1;
677}
678
679/*
680 * Set major and minor to zero for root of tree.
681 */
682struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
683 uint32_t major,
684 uint32_t minor)
685{
686 if (!major && !minor)
687 return &dtree->root;
688
689 return _find_dm_tree_node(dtree, major, minor);
690}
691
692/*
693 * Set uuid to NULL for root of tree.
694 */
695struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
696 const char *uuid)
697{
698 if (!uuid || !*uuid)
699 return &dtree->root;
700
701 return _find_dm_tree_node_by_uuid(dtree, uuid);
702}
703
704/*
705 * First time set *handle to NULL.
706 * Set inverted to invert the tree.
707 */
708struct dm_tree_node *dm_tree_next_child(void **handle,
709 const struct dm_tree_node *parent,
710 uint32_t inverted)
711{
712 struct dm_list **dlink = (struct dm_list **) handle;
713 const struct dm_list *use_list;
714
715 if (inverted)
716 use_list = &parent->used_by;
717 else
718 use_list = &parent->uses;
719
720 if (!*dlink)
721 *dlink = dm_list_first(use_list);
722 else
723 *dlink = dm_list_next(use_list, *dlink);
724
725 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
726}
727
a3f6b2ce 728static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
2e5ff5d1 729 const char **name, const char **uuid, unsigned inactive_table,
3d0480ed
AK
730 struct dm_info *info, struct dm_deps **deps)
731{
732 memset(info, 0, sizeof(*info));
733
734 if (!dm_is_dm_major(major)) {
2e5ff5d1
AK
735 if (name)
736 *name = "";
737 if (uuid)
738 *uuid = "";
3d0480ed
AK
739 *deps = NULL;
740 info->major = major;
741 info->minor = minor;
742 info->exists = 0;
165e4a11
AK
743 info->live_table = 0;
744 info->inactive_table = 0;
745 info->read_only = 0;
3d0480ed
AK
746 return 1;
747 }
748
749 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
750 log_error("deps dm_task creation failed");
751 return 0;
752 }
753
b4f1578f
AK
754 if (!dm_task_set_major(*dmt, major)) {
755 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
756 major, minor);
3d0480ed 757 goto failed;
b4f1578f 758 }
3d0480ed 759
b4f1578f
AK
760 if (!dm_task_set_minor(*dmt, minor)) {
761 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
762 major, minor);
3d0480ed 763 goto failed;
b4f1578f 764 }
3d0480ed 765
2e5ff5d1
AK
766 if (inactive_table && !dm_task_query_inactive_table(*dmt)) {
767 log_error("_deps: failed to set inactive table for (%" PRIu32 ":%" PRIu32 ")",
768 major, minor);
769 goto failed;
770 }
771
b4f1578f
AK
772 if (!dm_task_run(*dmt)) {
773 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
774 major, minor);
3d0480ed 775 goto failed;
b4f1578f 776 }
3d0480ed 777
b4f1578f
AK
778 if (!dm_task_get_info(*dmt, info)) {
779 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
780 major, minor);
3d0480ed 781 goto failed;
b4f1578f 782 }
3d0480ed
AK
783
784 if (!info->exists) {
2e5ff5d1
AK
785 if (name)
786 *name = "";
787 if (uuid)
788 *uuid = "";
3d0480ed
AK
789 *deps = NULL;
790 } else {
791 if (info->major != major) {
b4f1578f 792 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
793 major, info->major);
794 goto failed;
795 }
796 if (info->minor != minor) {
b4f1578f 797 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
798 minor, info->minor);
799 goto failed;
800 }
2e5ff5d1 801 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
802 log_error("name pool_strdup failed");
803 goto failed;
804 }
2e5ff5d1 805 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
806 log_error("uuid pool_strdup failed");
807 goto failed;
808 }
809 *deps = dm_task_get_deps(*dmt);
810 }
811
812 return 1;
813
814failed:
815 dm_task_destroy(*dmt);
816 return 0;
817}
818
5c9eae96
AK
819/*
820 * Deactivate a device with its dependencies if the uuid prefix matches.
821 */
822static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
823 struct dm_info *info, struct dm_pool *mem,
824 const char **name, const char **uuid)
3d0480ed 825{
5c9eae96
AK
826 struct dm_task *dmt;
827 int r;
3d0480ed 828
5c9eae96
AK
829 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
830 log_error("_info_by_dev: dm_task creation failed");
831 return 0;
3d0480ed
AK
832 }
833
5c9eae96
AK
834 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
835 log_error("_info_by_dev: Failed to set device number");
836 dm_task_destroy(dmt);
837 return 0;
838 }
839
840 if (!with_open_count && !dm_task_no_open_count(dmt))
841 log_error("Failed to disable open_count");
842
843 if (!(r = dm_task_run(dmt)))
844 goto_out;
845
846 if (!(r = dm_task_get_info(dmt, info)))
847 goto_out;
848
849 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(dmt)))) {
850 log_error("name pool_strdup failed");
851 r = 0;
b4f1578f 852 goto_out;
165e4a11 853 }
3d0480ed 854
5c9eae96
AK
855 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(dmt)))) {
856 log_error("uuid pool_strdup failed");
857 r = 0;
858 goto_out;
859 }
3d0480ed 860
5c9eae96
AK
861out:
862 dm_task_destroy(dmt);
863
864 return r;
865}
866
867static int _check_device_not_in_use(const char *name, struct dm_info *info)
868{
869 if (!info->exists)
870 return 1;
871
872 /* If sysfs is not used, use open_count information only. */
873 if (!*dm_sysfs_dir()) {
874 if (info->open_count) {
875 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
876 name, info->major, info->minor);
877 return 0;
878 }
879
880 return 1;
881 }
882
883 if (dm_device_has_holders(info->major, info->minor)) {
884 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
885 "by another device.", name, info->major, info->minor);
886 return 0;
887 }
888
889 if (dm_device_has_mounted_fs(info->major, info->minor)) {
890 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
891 "a filesystem in use.", name, info->major, info->minor);
892 return 0;
893 }
894
895 return 1;
896}
897
898/* Check if all parent nodes of given node have open_count == 0 */
899static int _node_has_closed_parents(struct dm_tree_node *node,
900 const char *uuid_prefix,
901 size_t uuid_prefix_len)
902{
903 struct dm_tree_link *dlink;
904 const struct dm_info *dinfo;
905 struct dm_info info;
906 const char *uuid;
907
908 /* Iterate through parents of this node */
909 dm_list_iterate_items(dlink, &node->used_by) {
910 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
b4f1578f 911 stack;
5c9eae96 912 continue;
b4f1578f 913 }
5c9eae96
AK
914
915 /* Ignore if it doesn't belong to this VG */
916 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
917 continue;
918
919 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
920 stack; /* FIXME Is this normal? */
921 return 0;
922 }
923
924 /* Refresh open_count */
925 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
926 !info.exists)
927 continue;
928
929 if (info.open_count) {
930 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
931 dinfo->major, dinfo->minor, info.open_count);
932 return 0;
933 }
934 }
935
936 return 1;
937}
938
939static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
940 uint32_t *cookie, uint16_t udev_flags, int retry)
941{
942 struct dm_task *dmt;
943 int r = 0;
944
945 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
946
947 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
948 log_error("Deactivation dm_task creation failed for %s", name);
949 return 0;
950 }
951
952 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
953 log_error("Failed to set device number for %s deactivation", name);
165e4a11 954 goto out;
3d0480ed
AK
955 }
956
5c9eae96
AK
957 if (!dm_task_no_open_count(dmt))
958 log_error("Failed to disable open_count");
959
960 if (cookie)
961 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
962 goto out;
963
964 if (retry)
965 dm_task_retry_remove(dmt);
966
967 r = dm_task_run(dmt);
968
969 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
970 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
971 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
972
973 /* FIXME Remove node from tree or mark invalid? */
3d0480ed 974
3d0480ed 975out:
5c9eae96 976 dm_task_destroy(dmt);
3d0480ed 977
5c9eae96 978 return r;
165e4a11
AK
979}
980
2e5ff5d1 981static int _node_clear_table(struct dm_tree_node *dnode, uint16_t udev_flags)
165e4a11 982{
2e5ff5d1
AK
983 struct dm_task *dmt = NULL, *deps_dmt = NULL;
984 struct dm_info *info, deps_info;
985 struct dm_deps *deps = NULL;
986 const char *name, *uuid;
987 const char *default_uuid_prefix;
988 size_t default_uuid_prefix_len;
989 uint32_t i;
990 int r = 0;
165e4a11
AK
991
992 if (!(info = &dnode->info)) {
b4f1578f 993 log_error("_node_clear_table failed: missing info");
165e4a11
AK
994 return 0;
995 }
996
b4f1578f
AK
997 if (!(name = dm_tree_node_get_name(dnode))) {
998 log_error("_node_clear_table failed: missing name");
165e4a11
AK
999 return 0;
1000 }
1001
1002 /* Is there a table? */
1003 if (!info->exists || !info->inactive_table)
1004 return 1;
1005
2e5ff5d1
AK
1006 /* Get devices used by inactive table that's about to be deleted. */
1007 if (!_deps(&deps_dmt, dnode->dtree->mem, info->major, info->minor, NULL, NULL, 1, info, &deps)) {
1008 log_error("Failed to obtain dependencies for %s before clearing table.", name);
1009 return 0;
1010 }
10d0d9c7 1011
165e4a11
AK
1012 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
1013 name, info->major, info->minor);
1014
1015 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11 1016 log_error("Table clear dm_task creation failed for %s", name);
2e5ff5d1 1017 goto_out;
165e4a11
AK
1018 }
1019
1020 if (!dm_task_set_major(dmt, info->major) ||
1021 !dm_task_set_minor(dmt, info->minor)) {
1022 log_error("Failed to set device number for %s table clear", name);
2e5ff5d1 1023 goto_out;
165e4a11
AK
1024 }
1025
1026 r = dm_task_run(dmt);
1027
1028 if (!dm_task_get_info(dmt, info)) {
b4f1578f 1029 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
1030 r = 0;
1031 }
1032
2e5ff5d1
AK
1033 if (!r || !deps)
1034 goto_out;
1035
1036 /*
1037 * Remove (incomplete) devices that the inactive table referred to but
1038 * which are not in the tree, no longer referenced and don't have a live
1039 * table.
1040 */
1041 default_uuid_prefix = dm_uuid_prefix();
1042 default_uuid_prefix_len = strlen(default_uuid_prefix);
1043
1044 for (i = 0; i < deps->count; i++) {
1045 /* If already in tree, assume it's under control */
1046 if (_find_dm_tree_node(dnode->dtree, MAJOR(deps->device[i]), MINOR(deps->device[i])))
5c9eae96 1047 continue;
db208f51 1048
5c9eae96
AK
1049 if (!_info_by_dev(MAJOR(deps->device[i]), MINOR(deps->device[i]), 1,
1050 &deps_info, dnode->dtree->mem, &name, &uuid))
1051 continue;
2e5ff5d1 1052
5c9eae96
AK
1053 /* Proceed if device is an 'orphan' - unreferenced and without a live table. */
1054 if (!deps_info.exists || deps_info.live_table || deps_info.open_count)
1055 continue;
3e8c6b73 1056
5c9eae96
AK
1057 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
1058 continue;
2e5ff5d1 1059
5c9eae96
AK
1060 /* Remove device. */
1061 if (!_deactivate_node(name, deps_info.major, deps_info.minor, &dnode->dtree->cookie, udev_flags, 0)) {
1062 log_error("Failed to deactivate no-longer-used device %s (%"
1063 PRIu32 ":%" PRIu32 ")", name, deps_info.major, deps_info.minor);
1064 } else if (deps_info.suspended)
1065 dec_suspended();
2e5ff5d1
AK
1066 }
1067
1068out:
5c9eae96
AK
1069 if (dmt)
1070 dm_task_destroy(dmt);
1071
1072 if (deps_dmt)
1073 dm_task_destroy(deps_dmt);
3e8c6b73
AK
1074
1075 return r;
1076}
1077
5c9eae96
AK
1078struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
1079 const char *name,
1080 const char *uuid,
1081 uint32_t major,
1082 uint32_t minor,
1083 int read_only,
1084 int clear_inactive,
1085 void *context,
1086 uint16_t udev_flags)
125712be 1087{
5c9eae96
AK
1088 struct dm_tree_node *dnode;
1089 struct dm_info info;
1090 const char *name2;
1091 const char *uuid2;
125712be 1092
5c9eae96
AK
1093 /* Do we need to add node to tree? */
1094 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
1095 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
1096 log_error("name pool_strdup failed");
1097 return NULL;
1098 }
1099 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
1100 log_error("uuid pool_strdup failed");
1101 return NULL;
c3e5b497
PR
1102 }
1103
5c9eae96
AK
1104 info.major = 0;
1105 info.minor = 0;
1106 info.exists = 0;
1107 info.live_table = 0;
1108 info.inactive_table = 0;
1109 info.read_only = 0;
125712be 1110
5c9eae96
AK
1111 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
1112 context, 0)))
1113 return_NULL;
125712be 1114
5c9eae96
AK
1115 /* Attach to root node until a table is supplied */
1116 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
1117 return_NULL;
f3ef15ef 1118
5c9eae96
AK
1119 dnode->props.major = major;
1120 dnode->props.minor = minor;
1121 dnode->props.new_name = NULL;
1122 dnode->props.size_changed = 0;
1123 } else if (strcmp(name, dnode->name)) {
1124 /* Do we need to rename node? */
1125 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
1126 log_error("name pool_strdup failed");
1127 return NULL;
f3ef15ef 1128 }
5c9eae96 1129 }
f3ef15ef 1130
5c9eae96
AK
1131 dnode->props.read_only = read_only ? 1 : 0;
1132 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
1133 dnode->props.read_ahead_flags = 0;
f3ef15ef 1134
5c9eae96
AK
1135 if (clear_inactive && !_node_clear_table(dnode, udev_flags))
1136 return_NULL;
f3ef15ef 1137
5c9eae96
AK
1138 dnode->context = context;
1139 dnode->udev_flags = udev_flags;
f3ef15ef 1140
5c9eae96
AK
1141 return dnode;
1142}
f3ef15ef 1143
5c9eae96
AK
1144struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree, const char *name,
1145 const char *uuid, uint32_t major, uint32_t minor,
1146 int read_only, int clear_inactive, void *context)
1147{
1148 return dm_tree_add_new_dev_with_udev_flags(dtree, name, uuid, major, minor,
1149 read_only, clear_inactive, context, 0);
f3ef15ef
ZK
1150}
1151
5c9eae96
AK
1152static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
1153 struct dm_tree_node *parent,
1154 uint32_t major, uint32_t minor,
1155 uint16_t udev_flags)
3e8c6b73 1156{
5c9eae96
AK
1157 struct dm_task *dmt = NULL;
1158 struct dm_info info;
1159 struct dm_deps *deps = NULL;
1160 const char *name = NULL;
1161 const char *uuid = NULL;
1162 struct dm_tree_node *node = NULL;
1163 uint32_t i;
1164 int new = 0;
3e8c6b73 1165
5c9eae96
AK
1166 /* Already in tree? */
1167 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
1168 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, 0, &info, &deps))
1169 return_NULL;
3e8c6b73 1170
5c9eae96
AK
1171 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
1172 NULL, udev_flags)))
1173 goto_out;
1174 new = 1;
3e8c6b73
AK
1175 }
1176
5c9eae96
AK
1177 if (!_link_tree_nodes(parent, node)) {
1178 node = NULL;
1179 goto_out;
3e8c6b73
AK
1180 }
1181
5c9eae96
AK
1182 /* If node was already in tree, no need to recurse. */
1183 if (!new)
1184 goto out;
787200ef 1185
5c9eae96
AK
1186 /* Can't recurse if not a mapped device or there are no dependencies */
1187 if (!node->info.exists || !deps->count) {
1188 if (!_add_to_bottomlevel(node)) {
1189 stack;
1190 node = NULL;
1191 }
1192 goto out;
1193 }
787200ef 1194
5c9eae96
AK
1195 /* Add dependencies to tree */
1196 for (i = 0; i < deps->count; i++)
1197 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
1198 MINOR(deps->device[i]), udev_flags)) {
1199 node = NULL;
1200 goto_out;
1201 }
3e8c6b73 1202
5c9eae96
AK
1203out:
1204 if (dmt)
1205 dm_task_destroy(dmt);
165e4a11 1206
5c9eae96
AK
1207 return node;
1208}
db208f51 1209
5c9eae96
AK
1210int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
1211{
1212 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
1213}
db208f51 1214
5c9eae96
AK
1215int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
1216 uint32_t minor, uint16_t udev_flags)
1217{
1218 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
db208f51
AK
1219}
1220
bd90c6b2 1221static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1222 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1223{
1224 struct dm_task *dmt;
1225 int r = 0;
1226
1227 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1228
1229 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1230 log_error("Rename dm_task creation failed for %s", old_name);
1231 return 0;
1232 }
1233
1234 if (!dm_task_set_name(dmt, old_name)) {
1235 log_error("Failed to set name for %s rename.", old_name);
1236 goto out;
1237 }
1238
b4f1578f 1239 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1240 goto_out;
165e4a11
AK
1241
1242 if (!dm_task_no_open_count(dmt))
1243 log_error("Failed to disable open_count");
1244
f16aea9e 1245 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1246 goto out;
1247
165e4a11
AK
1248 r = dm_task_run(dmt);
1249
1250out:
1251 dm_task_destroy(dmt);
1252
1253 return r;
1254}
1255
165e4a11
AK
1256/* FIXME Merge with _suspend_node? */
1257static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1258 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1259 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1260 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1261{
1262 struct dm_task *dmt;
bd90c6b2 1263 int r = 0;
165e4a11
AK
1264
1265 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1266
1267 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1268 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1269 return 0;
1270 }
1271
0b7d16bc
AK
1272 /* FIXME Kernel should fill in name on return instead */
1273 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1274 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1275 goto out;
0b7d16bc
AK
1276 }
1277
165e4a11
AK
1278 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1279 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1280 goto out;
165e4a11
AK
1281 }
1282
1283 if (!dm_task_no_open_count(dmt))
1284 log_error("Failed to disable open_count");
1285
52b84409
AK
1286 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1287 log_error("Failed to set read ahead");
1288
f16aea9e 1289 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1290 goto_out;
bd90c6b2 1291
9a8f192a
ZK
1292 if (!(r = dm_task_run(dmt)))
1293 goto_out;
1294
1295 if (already_suspended)
1296 dec_suspended();
1297
1298 if (!(r = dm_task_get_info(dmt, newinfo)))
1299 stack;
165e4a11 1300
bd90c6b2 1301out:
165e4a11
AK
1302 dm_task_destroy(dmt);
1303
1304 return r;
1305}
1306
db208f51 1307static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1308 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1309{
1310 struct dm_task *dmt;
1311 int r;
1312
b9ffd32c
AK
1313 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1314 name, major, minor,
1315 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1316 no_flush ? "" : " with device flush");
db208f51
AK
1317
1318 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1319 log_error("Suspend dm_task creation failed for %s", name);
1320 return 0;
1321 }
1322
1323 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1324 log_error("Failed to set device number for %s suspension.", name);
1325 dm_task_destroy(dmt);
1326 return 0;
1327 }
1328
1329 if (!dm_task_no_open_count(dmt))
1330 log_error("Failed to disable open_count");
1331
c55b1410
AK
1332 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1333 log_error("Failed to set skip_lockfs flag.");
1334
b9ffd32c
AK
1335 if (no_flush && !dm_task_no_flush(dmt))
1336 log_error("Failed to set no_flush flag.");
1337
1840aa09
AK
1338 if ((r = dm_task_run(dmt))) {
1339 inc_suspended();
db208f51 1340 r = dm_task_get_info(dmt, newinfo);
1840aa09 1341 }
db208f51 1342
3e8c6b73
AK
1343 dm_task_destroy(dmt);
1344
1345 return r;
1346}
1347
25e6ab87 1348static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1349{
1350 struct dm_task *dmt;
1351 int r = 0;
1352 uint64_t start, length;
1353 char *type = NULL;
1354 char *params = NULL;
e0ea24be 1355
25e6ab87
ZK
1356 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1357 return_0;
e0ea24be 1358
25e6ab87
ZK
1359 if (!dm_task_set_major(dmt, dnode->info.major) ||
1360 !dm_task_set_minor(dmt, dnode->info.minor)) {
1361 log_error("Failed to set major minor.");
1362 goto out;
e0ea24be
ZK
1363 }
1364
25e6ab87
ZK
1365 if (!dm_task_run(dmt))
1366 goto_out;
1367
1368 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1369
1370 if (type && (strcmp(type, "thin-pool") != 0)) {
c590a9cd 1371 log_error("Expected thin-pool target for %d:%d and got %s.",
25e6ab87 1372 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1373 goto out;
1374 }
1375
25e6ab87 1376 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
c590a9cd 1377 log_error("Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1378 goto out;
1379 }
1380
25e6ab87 1381 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1382
25e6ab87
ZK
1383 r = 1;
1384out:
1385 dm_task_destroy(dmt);
e0ea24be 1386
25e6ab87
ZK
1387 return r;
1388}
e0ea24be 1389
25e6ab87
ZK
1390static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1391{
1392 struct dm_task *dmt;
1393 struct dm_thin_message *m = &tm->message;
1394 char buf[64];
1395 int r;
e0ea24be 1396
25e6ab87
ZK
1397 switch (m->type) {
1398 case DM_THIN_MESSAGE_CREATE_SNAP:
1399 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1400 m->u.m_create_snap.device_id,
1401 m->u.m_create_snap.origin_id);
1402 break;
1403 case DM_THIN_MESSAGE_CREATE_THIN:
1404 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1405 m->u.m_create_thin.device_id);
1406 break;
1407 case DM_THIN_MESSAGE_DELETE:
1408 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1409 m->u.m_delete.device_id);
1410 break;
1411 case DM_THIN_MESSAGE_TRIM:
1412 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1413 m->u.m_trim.device_id,
1414 m->u.m_trim.new_size);
1415 break;
1416 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1417 r = dm_snprintf(buf, sizeof(buf),
1418 "set_transaction_id %" PRIu64 " %" PRIu64,
1419 m->u.m_set_transaction_id.current_id,
1420 m->u.m_set_transaction_id.new_id);
1421 break;
25de9add
ZK
1422 default:
1423 r = -1;
25e6ab87
ZK
1424 }
1425
25de9add 1426 if (r < 0) {
25e6ab87
ZK
1427 log_error("Failed to prepare message.");
1428 return 0;
1429 }
1430
1431 r = 0;
1432
1433 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1434 return_0;
1435
1436 if (!dm_task_set_major(dmt, dnode->info.major) ||
1437 !dm_task_set_minor(dmt, dnode->info.minor)) {
1438 log_error("Failed to set message major minor.");
1439 goto out;
1440 }
1441
1442 if (!dm_task_set_message(dmt, buf))
1443 goto_out;
1444
660a42bc
ZK
1445 /* Internal functionality of dm_task */
1446 dmt->expected_errno = tm->expected_errno;
1447
25e6ab87
ZK
1448 if (!dm_task_run(dmt))
1449 goto_out;
1450
1451 r = 1;
e0ea24be
ZK
1452out:
1453 dm_task_destroy(dmt);
1454
1455 return r;
1456}
1457
11f64f0a
ZK
1458static int _node_send_messages(struct dm_tree_node *dnode,
1459 const char *uuid_prefix,
1460 size_t uuid_prefix_len)
25e6ab87
ZK
1461{
1462 struct load_segment *seg;
1463 struct thin_message *tmsg;
11f64f0a 1464 uint64_t trans_id;
25e6ab87
ZK
1465 const char *uuid;
1466
bbcd37e4 1467 if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1468 return 1;
1469
1470 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1471 if (seg->type != SEG_THIN_POOL)
1472 return 1;
1473
1474 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1475 return_0;
1476
1477 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1478 log_debug("UUID \"%s\" does not match.", uuid);
1479 return 1;
1480 }
1481
11f64f0a 1482 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
bbcd37e4 1483 goto_bad;
25e6ab87 1484
bbcd37e4 1485 if (trans_id == seg->transaction_id)
25e6ab87
ZK
1486 return 1; /* In sync - skip messages */
1487
bbcd37e4 1488 if (trans_id != (seg->transaction_id - 1)) {
25e6ab87 1489 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
bbcd37e4
ZK
1490 trans_id, seg->transaction_id - 1);
1491 goto bad; /* Nothing to send */
25e6ab87
ZK
1492 }
1493
1494 dm_list_iterate_items(tmsg, &seg->thin_messages)
1495 if (!(_thin_pool_node_message(dnode, tmsg)))
bbcd37e4 1496 goto_bad;
25e6ab87
ZK
1497
1498 return 1;
bbcd37e4
ZK
1499bad:
1500 /* Try to deactivate */
1501 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
1502 log_error("Failed to deactivate %s", dnode->name);
1503
1504 return 0;
25e6ab87
ZK
1505}
1506
18e0f934
AK
1507/*
1508 * FIXME Don't attempt to deactivate known internal dependencies.
1509 */
1510static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1511 const char *uuid_prefix,
1512 size_t uuid_prefix_len,
1513 unsigned level)
3e8c6b73 1514{
b7eb2ad0 1515 int r = 1;
3e8c6b73 1516 void *handle = NULL;
b4f1578f 1517 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1518 struct dm_info info;
1519 const struct dm_info *dinfo;
1520 const char *name;
1521 const char *uuid;
1522
b4f1578f
AK
1523 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1524 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1525 stack;
1526 continue;
1527 }
1528
b4f1578f 1529 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1530 stack;
1531 continue;
1532 }
1533
b4f1578f 1534 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1535 stack;
1536 continue;
1537 }
1538
1539 /* Ignore if it doesn't belong to this VG */
2b69db1f 1540 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1541 continue;
3e8c6b73
AK
1542
1543 /* Refresh open_count */
2e5ff5d1 1544 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
f55021f4 1545 !info.exists)
3e8c6b73
AK
1546 continue;
1547
4ce43894
ZK
1548 if (info.open_count) {
1549 /* Skip internal non-toplevel opened nodes */
1550 if (level)
1551 continue;
1552
1553 /* When retry is not allowed, error */
1554 if (!child->dtree->retry_remove) {
1555 log_error("Unable to deactivate open %s (%" PRIu32
1556 ":%" PRIu32 ")", name, info.major, info.minor);
1557 r = 0;
1558 continue;
1559 }
1560
1561 /* Check toplevel node for holders/mounted fs */
1562 if (!_check_device_not_in_use(name, &info)) {
1563 stack;
1564 r = 0;
1565 continue;
1566 }
1567 /* Go on with retry */
1568 }
125712be 1569
f3ef15ef 1570 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1571 if ((child->presuspend_node &&
f3ef15ef
ZK
1572 !_node_has_closed_parents(child->presuspend_node,
1573 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1574 /* Only report error from (likely non-internal) dependency at top level */
1575 if (!level) {
1576 log_error("Unable to deactivate open %s (%" PRIu32
1577 ":%" PRIu32 ")", name, info.major,
1578 info.minor);
1579 r = 0;
1580 }
f55021f4
AK
1581 continue;
1582 }
1583
76d1aec8
ZK
1584 /* Suspend child node first if requested */
1585 if (child->presuspend_node &&
1586 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1587 continue;
1588
f16aea9e 1589 if (!_deactivate_node(name, info.major, info.minor,
787200ef 1590 &child->dtree->cookie, child->udev_flags,
4ce43894 1591 (level == 0) ? child->dtree->retry_remove : 0)) {
3e8c6b73
AK
1592 log_error("Unable to deactivate %s (%" PRIu32
1593 ":%" PRIu32 ")", name, info.major,
1594 info.minor);
b7eb2ad0 1595 r = 0;
3e8c6b73 1596 continue;
f4249251
AK
1597 } else if (info.suspended)
1598 dec_suspended();
3e8c6b73 1599
18e0f934
AK
1600 if (dm_tree_node_num_children(child, 0)) {
1601 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1602 return_0;
18e0f934 1603 }
3e8c6b73
AK
1604 }
1605
b7eb2ad0 1606 return r;
3e8c6b73 1607}
db208f51 1608
18e0f934
AK
1609int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1610 const char *uuid_prefix,
1611 size_t uuid_prefix_len)
1612{
1613 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1614}
1615
b4f1578f 1616int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1617 const char *uuid_prefix,
1618 size_t uuid_prefix_len)
db208f51 1619{
68085c93 1620 int r = 1;
db208f51 1621 void *handle = NULL;
b4f1578f 1622 struct dm_tree_node *child = dnode;
db208f51
AK
1623 struct dm_info info, newinfo;
1624 const struct dm_info *dinfo;
1625 const char *name;
1626 const char *uuid;
1627
690a5da2 1628 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1629 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1630 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1631 stack;
1632 continue;
1633 }
1634
b4f1578f 1635 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1636 stack;
1637 continue;
1638 }
1639
b4f1578f 1640 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1641 stack;
1642 continue;
1643 }
1644
1645 /* Ignore if it doesn't belong to this VG */
2b69db1f 1646 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1647 continue;
1648
690a5da2
AK
1649 /* Ensure immediate parents are already suspended */
1650 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1651 continue;
1652
2e5ff5d1 1653 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info, NULL, NULL, NULL) ||
b700541f 1654 !info.exists || info.suspended)
db208f51
AK
1655 continue;
1656
c55b1410 1657 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1658 child->dtree->skip_lockfs,
1659 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1660 log_error("Unable to suspend %s (%" PRIu32
1661 ":%" PRIu32 ")", name, info.major,
1662 info.minor);
68085c93 1663 r = 0;
db208f51
AK
1664 continue;
1665 }
1666
1667 /* Update cached info */
1668 child->info = newinfo;
690a5da2
AK
1669 }
1670
1671 /* Then suspend any child nodes */
1672 handle = NULL;
1673
b4f1578f
AK
1674 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1675 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1676 stack;
1677 continue;
1678 }
1679
1680 /* Ignore if it doesn't belong to this VG */
87f98002 1681 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1682 continue;
db208f51 1683
b4f1578f 1684 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1685 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1686 return_0;
db208f51
AK
1687 }
1688
68085c93 1689 return r;
db208f51
AK
1690}
1691
b4f1578f 1692int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1693 const char *uuid_prefix,
1694 size_t uuid_prefix_len)
1695{
2ca6b865 1696 int r = 1;
db208f51 1697 void *handle = NULL;
b4f1578f 1698 struct dm_tree_node *child = dnode;
165e4a11
AK
1699 struct dm_info newinfo;
1700 const char *name;
db208f51 1701 const char *uuid;
56c28292 1702 int priority;
db208f51 1703
165e4a11 1704 /* Activate children first */
b4f1578f
AK
1705 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1706 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1707 stack;
1708 continue;
db208f51
AK
1709 }
1710
908db078
AK
1711 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1712 continue;
db208f51 1713
b4f1578f 1714 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1715 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1716 return_0;
56c28292 1717 }
165e4a11 1718
56c28292 1719 handle = NULL;
165e4a11 1720
aa6f4e51 1721 for (priority = 0; priority < 3; priority++) {
56c28292 1722 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1723 if (priority != child->activation_priority)
1724 continue;
1725
56c28292
AK
1726 if (!(uuid = dm_tree_node_get_uuid(child))) {
1727 stack;
1728 continue;
165e4a11 1729 }
165e4a11 1730
56c28292
AK
1731 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1732 continue;
165e4a11 1733
56c28292
AK
1734 if (!(name = dm_tree_node_get_name(child))) {
1735 stack;
1736 continue;
1737 }
1738
1739 /* Rename? */
1740 if (child->props.new_name) {
bd90c6b2 1741 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1742 child->info.minor, &child->dtree->cookie,
1743 child->udev_flags)) {
56c28292
AK
1744 log_error("Failed to rename %s (%" PRIu32
1745 ":%" PRIu32 ") to %s", name, child->info.major,
1746 child->info.minor, child->props.new_name);
1747 return 0;
1748 }
1749 child->name = child->props.new_name;
1750 child->props.new_name = NULL;
1751 }
1752
1753 if (!child->info.inactive_table && !child->info.suspended)
1754 continue;
1755
bafa2f39 1756 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1757 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1758 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1759 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1760 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1761 child->info.minor);
2ca6b865 1762 r = 0;
56c28292
AK
1763 continue;
1764 }
1765
1766 /* Update cached info */
1767 child->info = newinfo;
1768 }
db208f51
AK
1769 }
1770
165e4a11
AK
1771 handle = NULL;
1772
2ca6b865 1773 return r;
165e4a11
AK
1774}
1775
b4f1578f 1776static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1777{
1778 int r = 0;
1779 struct dm_task *dmt;
1780
1781 log_verbose("Creating %s", dnode->name);
1782
1783 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1784 log_error("Create dm_task creation failed for %s", dnode->name);
1785 return 0;
1786 }
1787
1788 if (!dm_task_set_name(dmt, dnode->name)) {
1789 log_error("Failed to set device name for %s", dnode->name);
1790 goto out;
1791 }
1792
1793 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1794 log_error("Failed to set uuid for %s", dnode->name);
1795 goto out;
1796 }
1797
1798 if (dnode->props.major &&
1799 (!dm_task_set_major(dmt, dnode->props.major) ||
1800 !dm_task_set_minor(dmt, dnode->props.minor))) {
1801 log_error("Failed to set device number for %s creation.", dnode->name);
1802 goto out;
1803 }
1804
1805 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1806 log_error("Failed to set read only flag for %s", dnode->name);
1807 goto out;
1808 }
1809
1810 if (!dm_task_no_open_count(dmt))
1811 log_error("Failed to disable open_count");
1812
1813 if ((r = dm_task_run(dmt)))
1814 r = dm_task_get_info(dmt, &dnode->info);
1815
1816out:
1817 dm_task_destroy(dmt);
1818
1819 return r;
1820}
1821
1822
b4f1578f 1823static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1824{
1825 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1826 log_error("Failed to format %s device number for %s as dm "
1827 "target (%u,%u)",
1828 node->name, node->uuid, node->info.major, node->info.minor);
1829 return 0;
165e4a11
AK
1830 }
1831
1832 return 1;
1833}
1834
ffa9b6a5
ZK
1835/* simplify string emiting code */
1836#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1837do {\
1838 int w;\
1839 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1840 stack; /* Out of space */\
1841 return -1;\
1842 }\
1843 p += w;\
1844} while (0)
ffa9b6a5 1845
3c74075f
JEB
1846/*
1847 * _emit_areas_line
1848 *
1849 * Returns: 1 on success, 0 on failure
1850 */
08f1ddea 1851static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1852 struct load_segment *seg, char *params,
1853 size_t paramsize, int *pos)
165e4a11
AK
1854{
1855 struct seg_area *area;
7d7d93ac 1856 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1857 unsigned first_time = 1;
db3c1ac1 1858 const char *logtype, *synctype;
b262f3e1 1859 unsigned log_parm_count;
165e4a11 1860
2c44337b 1861 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1862 switch (seg->type) {
1863 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1864 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1865 return_0;
1866
b262f3e1
ZK
1867 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1868 if (first_time)
1869 EMIT_PARAMS(*pos, " nolog 0");
1870 else {
1871 /* Remote devices */
1872 log_parm_count = (area->flags &
1873 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1874
1875 if (!area->slog) {
1876 devbuf[0] = 0; /* Only core log parameters */
1877 logtype = "core";
1878 } else {
1879 devbuf[0] = ' '; /* Extra space before device name */
1880 if (!_build_dev_string(devbuf + 1,
1881 sizeof(devbuf) - 1,
1882 area->slog))
1883 return_0;
1884 logtype = "disk";
1885 log_parm_count++; /* Extra sync log device name parameter */
1886 }
1887
1888 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1889 log_parm_count, devbuf, area->region_size);
1890
db3c1ac1
AK
1891 synctype = (area->flags & DM_NOSYNC) ?
1892 " nosync" : (area->flags & DM_FORCESYNC) ?
1893 " sync" : NULL;
b262f3e1 1894
db3c1ac1
AK
1895 if (synctype)
1896 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1897 }
1898 break;
cac52ca4
JEB
1899 case SEG_RAID1:
1900 case SEG_RAID4:
1901 case SEG_RAID5_LA:
1902 case SEG_RAID5_RA:
1903 case SEG_RAID5_LS:
1904 case SEG_RAID5_RS:
1905 case SEG_RAID6_ZR:
1906 case SEG_RAID6_NR:
1907 case SEG_RAID6_NC:
6d04311e
JEB
1908 if (!area->dev_node) {
1909 EMIT_PARAMS(*pos, " -");
1910 break;
1911 }
1912 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1913 return_0;
1914
cac52ca4
JEB
1915 EMIT_PARAMS(*pos, " %s", devbuf);
1916 break;
b262f3e1 1917 default:
6d04311e
JEB
1918 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1919 return_0;
1920
b262f3e1
ZK
1921 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1922 devbuf, area->offset);
1923 }
609faae9
AK
1924
1925 first_time = 0;
165e4a11
AK
1926 }
1927
1928 return 1;
1929}
1930
b262f3e1
ZK
1931static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1932 size_t paramsize, int *pos)
1933{
1934 const struct load_segment *rlog_seg;
1935 struct replicator_site *rsite;
1936 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1937 unsigned parm_count;
1938
1939 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1940 return_0;
1941
1942 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1943 struct load_segment);
1944
1945 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1946 seg->rlog_type, rlogbuf, rlog_seg->size);
1947
1948 dm_list_iterate_items(rsite, &seg->rsites) {
1949 parm_count = (rsite->fall_behind_data
1950 || rsite->fall_behind_ios
1951 || rsite->async_timeout) ? 4 : 2;
1952
1953 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1954 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1955
1956 if (rsite->fall_behind_data)
1957 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1958 else if (rsite->fall_behind_ios)
1959 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1960 else if (rsite->async_timeout)
1961 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1962 }
1963
1964 return 1;
1965}
1966
3c74075f 1967/*
3c74075f
JEB
1968 * Returns: 1 on success, 0 on failure
1969 */
beecb1e1
ZK
1970static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1971 char *params, size_t paramsize)
165e4a11 1972{
8f26e18c
JEB
1973 int block_on_error = 0;
1974 int handle_errors = 0;
1975 int dm_log_userspace = 0;
1976 struct utsname uts;
dbcb64b8 1977 unsigned log_parm_count;
b39fdcf4 1978 int pos = 0, parts;
7d7d93ac 1979 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1980 const char *logtype;
b39fdcf4 1981 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1982
b39fdcf4
MB
1983 if (uname(&uts) == -1) {
1984 log_error("Cannot read kernel release version.");
1985 return 0;
1986 }
1987
1988 /* Kernels with a major number of 2 always had 3 parts. */
1989 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1990 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1991 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1992 return 0;
1993 }
67b25ed4 1994
8f26e18c
JEB
1995 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1996 /*
1997 * Originally, block_on_error was an argument to the log
1998 * portion of the mirror CTR table. It was renamed to
1999 * "handle_errors" and now resides in the 'features'
2000 * section of the mirror CTR table (i.e. at the end).
2001 *
2002 * We can identify whether to use "block_on_error" or
2003 * "handle_errors" by the dm-mirror module's version
2004 * number (>= 1.12) or by the kernel version (>= 2.6.22).
2005 */
ba61f848 2006 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
2007 handle_errors = 1;
2008 else
2009 block_on_error = 1;
2010 }
2011
2012 if (seg->clustered) {
2013 /* Cluster mirrors require a UUID */
2014 if (!seg->uuid)
2015 return_0;
2016
2017 /*
2018 * Cluster mirrors used to have their own log
2019 * types. Now they are accessed through the
2020 * userspace log type.
2021 *
2022 * The dm-log-userspace module was added to the
2023 * 2.6.31 kernel.
2024 */
ba61f848 2025 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
2026 dm_log_userspace = 1;
2027 }
2028
2029 /* Region size */
2030 log_parm_count = 1;
2031
2032 /* [no]sync, block_on_error etc. */
2033 log_parm_count += hweight32(seg->flags);
311d6d81 2034
8f26e18c
JEB
2035 /* "handle_errors" is a feature arg now */
2036 if (handle_errors)
2037 log_parm_count--;
2038
2039 /* DM_CORELOG does not count in the param list */
2040 if (seg->flags & DM_CORELOG)
2041 log_parm_count--;
2042
2043 if (seg->clustered) {
2044 log_parm_count++; /* For UUID */
2045
2046 if (!dm_log_userspace)
ffa9b6a5 2047 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
2048 else
2049 /* For clustered-* type field inserted later */
2050 log_parm_count++;
8f26e18c 2051 }
dbcb64b8 2052
8f26e18c
JEB
2053 if (!seg->log)
2054 logtype = "core";
2055 else {
2056 logtype = "disk";
2057 log_parm_count++;
2058 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
2059 return_0;
2060 }
dbcb64b8 2061
8f26e18c
JEB
2062 if (dm_log_userspace)
2063 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
2064 log_parm_count, seg->uuid, logtype);
2065 else
ffa9b6a5 2066 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 2067
8f26e18c
JEB
2068 if (seg->log)
2069 EMIT_PARAMS(pos, " %s", logbuf);
2070
2071 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 2072
8f26e18c
JEB
2073 if (seg->clustered && !dm_log_userspace)
2074 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 2075
8f26e18c
JEB
2076 if ((seg->flags & DM_NOSYNC))
2077 EMIT_PARAMS(pos, " nosync");
2078 else if ((seg->flags & DM_FORCESYNC))
2079 EMIT_PARAMS(pos, " sync");
dbcb64b8 2080
8f26e18c
JEB
2081 if (block_on_error)
2082 EMIT_PARAMS(pos, " block_on_error");
2083
2084 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
2085
5f3325fc 2086 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 2087 return_0;
dbcb64b8 2088
8f26e18c
JEB
2089 if (handle_errors)
2090 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 2091
3c74075f 2092 return 1;
8f26e18c
JEB
2093}
2094
cac52ca4
JEB
2095static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
2096 uint32_t minor, struct load_segment *seg,
2097 uint64_t *seg_start, char *params,
2098 size_t paramsize)
2099{
ad2432dc 2100 uint32_t i;
cac52ca4
JEB
2101 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
2102 int pos = 0;
2103
2104 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
2105 param_count++;
2106
2107 if (seg->region_size)
2108 param_count += 2;
2109
ad2432dc
MB
2110 /* rebuilds is 64-bit */
2111 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
2112 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 2113
cac52ca4
JEB
2114 if ((seg->type == SEG_RAID1) && seg->stripe_size)
2115 log_error("WARNING: Ignoring RAID1 stripe size");
2116
2117 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
2118 param_count, seg->stripe_size);
2119
2120 if (seg->flags & DM_NOSYNC)
2121 EMIT_PARAMS(pos, " nosync");
2122 else if (seg->flags & DM_FORCESYNC)
2123 EMIT_PARAMS(pos, " sync");
2124
2125 if (seg->region_size)
2126 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2127
f439e65b
JEB
2128 for (i = 0; i < (seg->area_count / 2); i++)
2129 if (seg->rebuilds & (1 << i))
2130 EMIT_PARAMS(pos, " rebuild %u", i);
2131
cac52ca4
JEB
2132 /* Print number of metadata/data device pairs */
2133 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2134
2135 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2136 return_0;
2137
2138 return 1;
2139}
2140
8f26e18c
JEB
2141static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2142 uint32_t minor, struct load_segment *seg,
2143 uint64_t *seg_start, char *params,
2144 size_t paramsize)
2145{
2146 int pos = 0;
2147 int r;
cac52ca4 2148 int target_type_is_raid = 0;
8f26e18c 2149 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2150 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2151
8f26e18c
JEB
2152 switch(seg->type) {
2153 case SEG_ERROR:
2154 case SEG_ZERO:
2155 case SEG_LINEAR:
2156 break;
2157 case SEG_MIRRORED:
2158 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2159 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2160 if (!r)
2161 return_0;
165e4a11 2162 break;
b262f3e1
ZK
2163 case SEG_REPLICATOR:
2164 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2165 &pos)) <= 0) {
2166 stack;
2167 return r;
2168 }
2169 break;
2170 case SEG_REPLICATOR_DEV:
2171 if (!seg->replicator || !_build_dev_string(originbuf,
2172 sizeof(originbuf),
2173 seg->replicator))
2174 return_0;
2175
2176 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2177 break;
165e4a11 2178 case SEG_SNAPSHOT:
aa6f4e51 2179 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2180 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2181 return_0;
2182 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2183 return_0;
ffa9b6a5
ZK
2184 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2185 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2186 break;
2187 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2188 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2189 return_0;
ffa9b6a5 2190 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2191 break;
2192 case SEG_STRIPED:
609faae9 2193 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2194 break;
12ca060e 2195 case SEG_CRYPT:
609faae9 2196 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2197 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2198 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2199 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2200 seg->iv_offset : *seg_start);
2201 break;
cac52ca4
JEB
2202 case SEG_RAID1:
2203 case SEG_RAID4:
2204 case SEG_RAID5_LA:
2205 case SEG_RAID5_RA:
2206 case SEG_RAID5_LS:
2207 case SEG_RAID5_RS:
2208 case SEG_RAID6_ZR:
2209 case SEG_RAID6_NR:
2210 case SEG_RAID6_NC:
2211 target_type_is_raid = 1;
2212 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2213 params, paramsize);
2214 if (!r)
2215 return_0;
2216
2217 break;
4251236e
ZK
2218 case SEG_THIN_POOL:
2219 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2220 return_0;
2221 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2222 return_0;
2223 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2224 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2225 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2226 break;
2227 case SEG_THIN:
2228 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2229 return_0;
2230 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2231 break;
165e4a11
AK
2232 }
2233
2234 switch(seg->type) {
2235 case SEG_ERROR:
b262f3e1 2236 case SEG_REPLICATOR:
165e4a11
AK
2237 case SEG_SNAPSHOT:
2238 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2239 case SEG_SNAPSHOT_MERGE:
165e4a11 2240 case SEG_ZERO:
4251236e
ZK
2241 case SEG_THIN_POOL:
2242 case SEG_THIN:
165e4a11 2243 break;
12ca060e 2244 case SEG_CRYPT:
165e4a11 2245 case SEG_LINEAR:
b262f3e1 2246 case SEG_REPLICATOR_DEV:
165e4a11
AK
2247 case SEG_STRIPED:
2248 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2249 stack;
2250 return r;
2251 }
b6793963
AK
2252 if (!params[0]) {
2253 log_error("No parameters supplied for %s target "
2254 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2255 major, minor);
b6793963
AK
2256 return 0;
2257 }
165e4a11
AK
2258 break;
2259 }
2260
4b2cae46
AK
2261 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2262 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2263 *seg_start, seg->size, target_type_is_raid ? "raid" :
2264 dm_segtypes[seg->type].target, params);
165e4a11 2265
cac52ca4
JEB
2266 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2267 target_type_is_raid ? "raid" :
2268 dm_segtypes[seg->type].target, params))
b4f1578f 2269 return_0;
165e4a11
AK
2270
2271 *seg_start += seg->size;
2272
2273 return 1;
2274}
2275
ffa9b6a5
ZK
2276#undef EMIT_PARAMS
2277
4b2cae46
AK
2278static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2279 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2280{
2281 char *params;
2282 size_t paramsize = 4096;
2283 int ret;
2284
2285 do {
2286 if (!(params = dm_malloc(paramsize))) {
2287 log_error("Insufficient space for target parameters.");
2288 return 0;
2289 }
2290
12ea7cb1 2291 params[0] = '\0';
4b2cae46
AK
2292 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2293 params, paramsize);
165e4a11
AK
2294 dm_free(params);
2295
2296 if (!ret)
2297 stack;
2298
2299 if (ret >= 0)
2300 return ret;
2301
2302 log_debug("Insufficient space in params[%" PRIsize_t
2303 "] for target parameters.", paramsize);
2304
2305 paramsize *= 2;
2306 } while (paramsize < MAX_TARGET_PARAMSIZE);
2307
2308 log_error("Target parameter size too big. Aborting.");
2309 return 0;
2310}
2311
b4f1578f 2312static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2313{
2314 int r = 0;
2315 struct dm_task *dmt;
2316 struct load_segment *seg;
df390f17 2317 uint64_t seg_start = 0, existing_table_size;
165e4a11 2318
4b2cae46
AK
2319 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2320 dnode->info.major, dnode->info.minor);
165e4a11
AK
2321
2322 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2323 log_error("Reload dm_task creation failed for %s", dnode->name);
2324 return 0;
2325 }
2326
2327 if (!dm_task_set_major(dmt, dnode->info.major) ||
2328 !dm_task_set_minor(dmt, dnode->info.minor)) {
2329 log_error("Failed to set device number for %s reload.", dnode->name);
2330 goto out;
2331 }
2332
2333 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2334 log_error("Failed to set read only flag for %s", dnode->name);
2335 goto out;
2336 }
2337
2338 if (!dm_task_no_open_count(dmt))
2339 log_error("Failed to disable open_count");
2340
2c44337b 2341 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2342 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2343 seg, &seg_start))
b4f1578f 2344 goto_out;
165e4a11 2345
ec289b64
AK
2346 if (!dm_task_suppress_identical_reload(dmt))
2347 log_error("Failed to suppress reload of identical tables.");
2348
2349 if ((r = dm_task_run(dmt))) {
165e4a11 2350 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2351 if (r && !dnode->info.inactive_table)
2352 log_verbose("Suppressed %s identical table reload.",
2353 dnode->name);
bb875bb9 2354
df390f17 2355 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2356 if ((dnode->props.size_changed =
df390f17 2357 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2358 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2359 PRIu64 " for %s", existing_table_size,
bb875bb9 2360 seg_start, dnode->name);
df390f17
AK
2361 /*
2362 * Kernel usually skips size validation on zero-length devices
2363 * now so no need to preload them.
2364 */
2365 /* FIXME In which kernel version did this begin? */
2366 if (!existing_table_size && dnode->props.delay_resume_if_new)
2367 dnode->props.size_changed = 0;
2368 }
ec289b64 2369 }
165e4a11
AK
2370
2371 dnode->props.segment_count = 0;
2372
2373out:
2374 dm_task_destroy(dmt);
2375
2376 return r;
165e4a11
AK
2377}
2378
b4f1578f 2379int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2380 const char *uuid_prefix,
2381 size_t uuid_prefix_len)
165e4a11 2382{
2ca6b865 2383 int r = 1;
165e4a11 2384 void *handle = NULL;
b4f1578f 2385 struct dm_tree_node *child;
165e4a11 2386 struct dm_info newinfo;
566515c0 2387 int update_devs_flag = 0;
165e4a11
AK
2388
2389 /* Preload children first */
b4f1578f 2390 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2391 /* Skip existing non-device-mapper devices */
2392 if (!child->info.exists && child->info.major)
2393 continue;
2394
2395 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2396 if (child->info.exists &&
2397 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2398 continue;
2399
b4f1578f 2400 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2401 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2402 return_0;
165e4a11 2403
165e4a11 2404 /* FIXME Cope if name exists with no uuid? */
3d6782b3
ZK
2405 if (!child->info.exists && !_create_node(child))
2406 return_0;
165e4a11 2407
3d6782b3
ZK
2408 if (!child->info.inactive_table &&
2409 child->props.segment_count &&
2410 !_load_node(child))
2411 return_0;
165e4a11 2412
eb91c4ee
MB
2413 /* Propagate device size change change */
2414 if (child->props.size_changed)
2415 dnode->props.size_changed = 1;
2416
bb875bb9 2417 /* Resume device immediately if it has parents and its size changed */
3776c494 2418 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2419 continue;
2420
7707ea90
AK
2421 if (!child->info.inactive_table && !child->info.suspended)
2422 continue;
2423
fc795d87 2424 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2425 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2426 &newinfo, &child->dtree->cookie, child->udev_flags,
2427 child->info.suspended)) {
165e4a11 2428 log_error("Unable to resume %s (%" PRIu32
fc795d87 2429 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2430 child->info.minor);
2ca6b865 2431 r = 0;
165e4a11
AK
2432 continue;
2433 }
2434
2435 /* Update cached info */
2436 child->info = newinfo;
bbcd37e4
ZK
2437 if (child->props.send_messages &&
2438 !(r = _node_send_messages(child, uuid_prefix, uuid_prefix_len))) {
2439 stack;
2440 continue;
2441 }
566515c0
PR
2442 /*
2443 * Prepare for immediate synchronization with udev and flush all stacked
2444 * dev node operations if requested by immediate_dev_node property. But
2445 * finish processing current level in the tree first.
2446 */
2447 if (child->props.immediate_dev_node)
2448 update_devs_flag = 1;
165e4a11
AK
2449 }
2450
bbcd37e4
ZK
2451 if (r && dnode->props.send_messages &&
2452 !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
2453 stack;
165e4a11 2454
566515c0
PR
2455 if (update_devs_flag) {
2456 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2457 stack;
2458 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2459 }
2460
11f64f0a 2461 if (r && !_node_send_messages(dnode, uuid_prefix, uuid_prefix_len)) {
25e6ab87
ZK
2462 stack;
2463 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
2464 log_error("Failed to deactivate %s", dnode->name);
2465 r = 0;
2466 }
2467
2ca6b865 2468 return r;
165e4a11
AK
2469}
2470
165e4a11
AK
2471/*
2472 * Returns 1 if unsure.
2473 */
b4f1578f 2474int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2475 const char *uuid_prefix,
2476 size_t uuid_prefix_len)
2477{
2478 void *handle = NULL;
b4f1578f 2479 struct dm_tree_node *child = dnode;
165e4a11
AK
2480 const char *uuid;
2481
b4f1578f
AK
2482 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2483 if (!(uuid = dm_tree_node_get_uuid(child))) {
2484 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2485 return 1;
2486 }
2487
87f98002 2488 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2489 return 1;
2490
b4f1578f
AK
2491 if (dm_tree_node_num_children(child, 0))
2492 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2493 }
2494
2495 return 0;
2496}
2497
2498/*
2499 * Target functions
2500 */
b4f1578f 2501static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2502{
2503 struct load_segment *seg;
2504
b4f1578f
AK
2505 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2506 log_error("dtree node segment allocation failed");
165e4a11
AK
2507 return NULL;
2508 }
2509
2510 seg->type = type;
2511 seg->size = size;
2512 seg->area_count = 0;
2c44337b 2513 dm_list_init(&seg->areas);
165e4a11
AK
2514 seg->stripe_size = 0;
2515 seg->persistent = 0;
2516 seg->chunk_size = 0;
2517 seg->cow = NULL;
2518 seg->origin = NULL;
aa6f4e51 2519 seg->merge = NULL;
165e4a11 2520
2c44337b 2521 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2522 dnode->props.segment_count++;
2523
2524 return seg;
2525}
2526
b4f1578f 2527int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2528 uint64_t size,
2529 const char *origin_uuid)
165e4a11
AK
2530{
2531 struct load_segment *seg;
b4f1578f 2532 struct dm_tree_node *origin_node;
165e4a11 2533
b4f1578f
AK
2534 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2535 return_0;
165e4a11 2536
b4f1578f 2537 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2538 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2539 return 0;
2540 }
2541
2542 seg->origin = origin_node;
b4f1578f
AK
2543 if (!_link_tree_nodes(dnode, origin_node))
2544 return_0;
165e4a11 2545
56c28292
AK
2546 /* Resume snapshot origins after new snapshots */
2547 dnode->activation_priority = 1;
2548
165e4a11
AK
2549 return 1;
2550}
2551
aa6f4e51
MS
2552static int _add_snapshot_target(struct dm_tree_node *node,
2553 uint64_t size,
2554 const char *origin_uuid,
2555 const char *cow_uuid,
2556 const char *merge_uuid,
2557 int persistent,
2558 uint32_t chunk_size)
165e4a11
AK
2559{
2560 struct load_segment *seg;
aa6f4e51
MS
2561 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2562 unsigned seg_type;
2563
2564 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2565
aa6f4e51 2566 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2567 return_0;
165e4a11 2568
b4f1578f 2569 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2570 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2571 return 0;
2572 }
2573
2574 seg->origin = origin_node;
b4f1578f
AK
2575 if (!_link_tree_nodes(node, origin_node))
2576 return_0;
165e4a11 2577
b4f1578f 2578 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2579 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2580 return 0;
2581 }
2582
2583 seg->cow = cow_node;
b4f1578f
AK
2584 if (!_link_tree_nodes(node, cow_node))
2585 return_0;
165e4a11
AK
2586
2587 seg->persistent = persistent ? 1 : 0;
2588 seg->chunk_size = chunk_size;
2589
aa6f4e51
MS
2590 if (merge_uuid) {
2591 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2592 /* not a pure error, merging snapshot may have been deactivated */
2593 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2594 } else {
2595 seg->merge = merge_node;
2596 /* must not link merging snapshot, would undermine activation_priority below */
2597 }
2598
2599 /* Resume snapshot-merge (acting origin) after other snapshots */
2600 node->activation_priority = 1;
2601 if (seg->merge) {
2602 /* Resume merging snapshot after snapshot-merge */
2603 seg->merge->activation_priority = 2;
2604 }
2605 }
2606
165e4a11
AK
2607 return 1;
2608}
2609
aa6f4e51
MS
2610
2611int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2612 uint64_t size,
2613 const char *origin_uuid,
2614 const char *cow_uuid,
2615 int persistent,
2616 uint32_t chunk_size)
2617{
2618 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2619 NULL, persistent, chunk_size);
2620}
2621
2622int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2623 uint64_t size,
2624 const char *origin_uuid,
2625 const char *cow_uuid,
2626 const char *merge_uuid,
2627 uint32_t chunk_size)
2628{
2629 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2630 merge_uuid, 1, chunk_size);
2631}
2632
b4f1578f 2633int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2634 uint64_t size)
165e4a11 2635{
b4f1578f
AK
2636 if (!_add_segment(node, SEG_ERROR, size))
2637 return_0;
165e4a11
AK
2638
2639 return 1;
2640}
2641
b4f1578f 2642int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2643 uint64_t size)
165e4a11 2644{
b4f1578f
AK
2645 if (!_add_segment(node, SEG_ZERO, size))
2646 return_0;
165e4a11
AK
2647
2648 return 1;
2649}
2650
b4f1578f 2651int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2652 uint64_t size)
165e4a11 2653{
b4f1578f
AK
2654 if (!_add_segment(node, SEG_LINEAR, size))
2655 return_0;
165e4a11
AK
2656
2657 return 1;
2658}
2659
b4f1578f 2660int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2661 uint64_t size,
2662 uint32_t stripe_size)
165e4a11
AK
2663{
2664 struct load_segment *seg;
2665
b4f1578f
AK
2666 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2667 return_0;
165e4a11
AK
2668
2669 seg->stripe_size = stripe_size;
2670
2671 return 1;
2672}
2673
12ca060e
MB
2674int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2675 uint64_t size,
2676 const char *cipher,
2677 const char *chainmode,
2678 const char *iv,
2679 uint64_t iv_offset,
2680 const char *key)
2681{
2682 struct load_segment *seg;
2683
2684 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2685 return_0;
2686
2687 seg->cipher = cipher;
2688 seg->chainmode = chainmode;
2689 seg->iv = iv;
2690 seg->iv_offset = iv_offset;
2691 seg->key = key;
2692
2693 return 1;
2694}
2695
b4f1578f 2696int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2697 uint32_t region_size,
08e64ce5 2698 unsigned clustered,
165e4a11 2699 const char *log_uuid,
ce7ed2c0
AK
2700 unsigned area_count,
2701 uint32_t flags)
165e4a11 2702{
908db078 2703 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2704 struct load_segment *seg;
2705
2706 if (!node->props.segment_count) {
b8175c33 2707 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2708 return 0;
2709 }
2710
2c44337b 2711 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2712
24b026e3 2713 if (log_uuid) {
67b25ed4
AK
2714 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2715 log_error("log uuid pool_strdup failed");
2716 return 0;
2717 }
df390f17
AK
2718 if ((flags & DM_CORELOG))
2719 /* For pvmove: immediate resume (for size validation) isn't needed. */
2720 node->props.delay_resume_if_new = 1;
2721 else {
9723090c
AK
2722 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2723 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2724 return 0;
2725 }
2726
566515c0
PR
2727 if (clustered)
2728 log_node->props.immediate_dev_node = 1;
2729
0a99713e
AK
2730 /* The kernel validates the size of disk logs. */
2731 /* FIXME Propagate to any devices below */
2732 log_node->props.delay_resume_if_new = 0;
2733
9723090c
AK
2734 if (!_link_tree_nodes(node, log_node))
2735 return_0;
2736 }
165e4a11
AK
2737 }
2738
2739 seg->log = log_node;
165e4a11
AK
2740 seg->region_size = region_size;
2741 seg->clustered = clustered;
2742 seg->mirror_area_count = area_count;
dbcb64b8 2743 seg->flags = flags;
165e4a11
AK
2744
2745 return 1;
2746}
2747
b4f1578f 2748int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2749 uint64_t size)
165e4a11 2750{
cbecd3cd 2751 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2752 return_0;
165e4a11
AK
2753
2754 return 1;
2755}
2756
cac52ca4
JEB
2757int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2758 uint64_t size,
2759 const char *raid_type,
2760 uint32_t region_size,
2761 uint32_t stripe_size,
f439e65b 2762 uint64_t rebuilds,
cac52ca4
JEB
2763 uint64_t reserved2)
2764{
2765 int i;
2766 struct load_segment *seg = NULL;
2767
2768 for (i = 0; dm_segtypes[i].target && !seg; i++)
2769 if (!strcmp(raid_type, dm_segtypes[i].target))
2770 if (!(seg = _add_segment(node,
2771 dm_segtypes[i].type, size)))
2772 return_0;
2773
b2fa9b43
JEB
2774 if (!seg)
2775 return_0;
2776
cac52ca4
JEB
2777 seg->region_size = region_size;
2778 seg->stripe_size = stripe_size;
2779 seg->area_count = 0;
f439e65b 2780 seg->rebuilds = rebuilds;
cac52ca4
JEB
2781
2782 return 1;
2783}
2784
b262f3e1
ZK
2785int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2786 uint64_t size,
2787 const char *rlog_uuid,
2788 const char *rlog_type,
2789 unsigned rsite_index,
2790 dm_replicator_mode_t mode,
2791 uint32_t async_timeout,
2792 uint64_t fall_behind_data,
2793 uint32_t fall_behind_ios)
2794{
2795 struct load_segment *rseg;
2796 struct replicator_site *rsite;
2797
2798 /* Local site0 - adds replicator segment and links rlog device */
2799 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2800 if (node->props.segment_count) {
2801 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2802 return 0;
2803 }
2804
2805 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2806 return_0;
2807
2808 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2809 log_error("Missing replicator log uuid %s.", rlog_uuid);
2810 return 0;
2811 }
2812
2813 if (!_link_tree_nodes(node, rseg->log))
2814 return_0;
2815
2816 if (strcmp(rlog_type, "ringbuffer") != 0) {
2817 log_error("Unsupported replicator log type %s.", rlog_type);
2818 return 0;
2819 }
2820
2821 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2822 return_0;
2823
2824 dm_list_init(&rseg->rsites);
2825 rseg->rdevice_count = 0;
2826 node->activation_priority = 1;
2827 }
2828
2829 /* Add site to segment */
2830 if (mode == DM_REPLICATOR_SYNC
2831 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2832 log_error("Async parameters passed for synchronnous replicator.");
2833 return 0;
2834 }
2835
2836 if (node->props.segment_count != 1) {
2837 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2838 return 0;
2839 }
2840
2841 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2842 if (rseg->type != SEG_REPLICATOR) {
2843 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2844 dm_segtypes[rseg->type].target);
2845 return 0;
2846 }
2847
2848 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2849 log_error("Failed to allocate remote site segment.");
2850 return 0;
2851 }
2852
2853 dm_list_add(&rseg->rsites, &rsite->list);
2854 rseg->rsite_count++;
2855
2856 rsite->mode = mode;
2857 rsite->async_timeout = async_timeout;
2858 rsite->fall_behind_data = fall_behind_data;
2859 rsite->fall_behind_ios = fall_behind_ios;
2860 rsite->rsite_index = rsite_index;
2861
2862 return 1;
2863}
2864
2865/* Appends device node to Replicator */
2866int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2867 uint64_t size,
2868 const char *replicator_uuid,
2869 uint64_t rdevice_index,
2870 const char *rdev_uuid,
2871 unsigned rsite_index,
2872 const char *slog_uuid,
2873 uint32_t slog_flags,
2874 uint32_t slog_region_size)
2875{
2876 struct seg_area *area;
2877 struct load_segment *rseg;
2878 struct load_segment *rep_seg;
2879
2880 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2881 /* Site index for local target */
2882 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2883 return_0;
2884
2885 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2886 log_error("Missing replicator uuid %s.", replicator_uuid);
2887 return 0;
2888 }
2889
2890 /* Local slink0 for replicator must be always initialized first */
2891 if (rseg->replicator->props.segment_count != 1) {
2892 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2893 return 0;
2894 }
2895
2896 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2897 if (rep_seg->type != SEG_REPLICATOR) {
2898 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2899 dm_segtypes[rep_seg->type].target);
2900 return 0;
2901 }
2902 rep_seg->rdevice_count++;
2903
2904 if (!_link_tree_nodes(node, rseg->replicator))
2905 return_0;
2906
2907 rseg->rdevice_index = rdevice_index;
2908 } else {
2909 /* Local slink0 for replicator must be always initialized first */
2910 if (node->props.segment_count != 1) {
2911 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2912 return 0;
2913 }
2914
2915 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2916 if (rseg->type != SEG_REPLICATOR_DEV) {
2917 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2918 dm_segtypes[rseg->type].target);
2919 return 0;
2920 }
2921 }
2922
2923 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2924 log_error("Unspecified sync log uuid.");
2925 return 0;
2926 }
2927
2928 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2929 return_0;
2930
2931 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2932
2933 if (!(slog_flags & DM_CORELOG)) {
2934 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2935 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2936 return 0;
2937 }
2938
2939 if (!_link_tree_nodes(node, area->slog))
2940 return_0;
2941 }
2942
2943 area->flags = slog_flags;
2944 area->region_size = slog_region_size;
2945 area->rsite_index = rsite_index;
2946
2947 return 1;
2948}
2949
5668fe04
ZK
2950static int _thin_validate_device_id(uint32_t device_id)
2951{
2952 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2953 log_error("Device id %u is higher then %u.",
2954 device_id, DM_THIN_MAX_DEVICE_ID);
2955 return 0;
2956 }
2957
2958 return 1;
2959}
2960
4251236e
ZK
2961int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2962 uint64_t size,
e0ea24be 2963 uint64_t transaction_id,
4251236e 2964 const char *metadata_uuid,
5668fd6a 2965 const char *pool_uuid,
4251236e 2966 uint32_t data_block_size,
e9156c2b 2967 uint64_t low_water_mark,
460c5991 2968 unsigned skip_block_zeroing)
4251236e
ZK
2969{
2970 struct load_segment *seg;
2971
3f53c059 2972 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2973 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2974 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2975 return 0;
2976 }
2977
3f53c059 2978 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2979 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2980 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2981 return 0;
2982 }
2983
2984 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2985 return_0;
2986
2987 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2988 log_error("Missing metadata uuid %s.", metadata_uuid);
2989 return 0;
2990 }
2991
2992 if (!_link_tree_nodes(node, seg->metadata))
2993 return_0;
2994
2995 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2996 log_error("Missing pool uuid %s.", pool_uuid);
2997 return 0;
2998 }
2999
3000 if (!_link_tree_nodes(node, seg->pool))
3001 return_0;
3002
bbcd37e4
ZK
3003 node->props.send_messages = 1;
3004 seg->transaction_id = transaction_id;
e9156c2b 3005 seg->low_water_mark = low_water_mark;
e0ea24be 3006 seg->data_block_size = data_block_size;
460c5991 3007 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
3008 dm_list_init(&seg->thin_messages);
3009
3010 return 1;
3011}
3012
3013int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
2e732e96
ZK
3014 dm_thin_message_t type,
3015 uint64_t id1, uint64_t id2)
25e6ab87
ZK
3016{
3017 struct load_segment *seg;
3018 struct thin_message *tm;
3019
3020 if (node->props.segment_count != 1) {
759b9592 3021 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
3022 return 0;
3023 }
3024
3025 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 3026 if (seg->type != SEG_THIN_POOL) {
759b9592 3027 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
3028 dm_segtypes[seg->type].target);
3029 return 0;
3030 }
3031
3032 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
3033 log_error("Failed to allocate thin message.");
3034 return 0;
3035 }
3036
2e732e96 3037 switch (type) {
25e6ab87 3038 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 3039 /* If the thin origin is active, it must be suspend first! */
2e732e96 3040 if (id1 == id2) {
759b9592 3041 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
3042 return 0;
3043 }
2e732e96
ZK
3044 if (!_thin_validate_device_id(id1) ||
3045 !_thin_validate_device_id(id2))
25e6ab87 3046 return_0;
2e732e96
ZK
3047 tm->message.u.m_create_snap.device_id = id1;
3048 tm->message.u.m_create_snap.origin_id = id2;
25e6ab87
ZK
3049 break;
3050 case DM_THIN_MESSAGE_CREATE_THIN:
2e732e96 3051 if (!_thin_validate_device_id(id1))
25e6ab87 3052 return_0;
2e732e96 3053 tm->message.u.m_create_thin.device_id = id1;
660a42bc 3054 tm->expected_errno = EEXIST;
25e6ab87
ZK
3055 break;
3056 case DM_THIN_MESSAGE_DELETE:
2e732e96 3057 if (!_thin_validate_device_id(id1))
25e6ab87 3058 return_0;
2e732e96 3059 tm->message.u.m_delete.device_id = id1;
660a42bc 3060 tm->expected_errno = ENODATA;
25e6ab87
ZK
3061 break;
3062 case DM_THIN_MESSAGE_TRIM:
2e732e96 3063 if (!_thin_validate_device_id(id1))
25e6ab87 3064 return_0;
2e732e96
ZK
3065 tm->message.u.m_trim.device_id = id1;
3066 tm->message.u.m_trim.new_size = id2;
25e6ab87
ZK
3067 break;
3068 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
19e3f8c3 3069 if ((id1 + 1) != id2) {
2e732e96
ZK
3070 log_error("New transaction id must be sequential.");
3071 return 0; /* FIXME: Maybe too strict here? */
3072 }
19e3f8c3 3073 if (id2 != seg->transaction_id) {
2e732e96 3074 log_error("Current transaction id is different from thin pool.");
25e6ab87
ZK
3075 return 0; /* FIXME: Maybe too strict here? */
3076 }
2e732e96
ZK
3077 tm->message.u.m_set_transaction_id.current_id = id1;
3078 tm->message.u.m_set_transaction_id.new_id = id2;
25e6ab87
ZK
3079 break;
3080 default:
2e732e96 3081 log_error("Unsupported message type %d.", (int) type);
25e6ab87
ZK
3082 return 0;
3083 }
3084
2e732e96 3085 tm->message.type = type;
25e6ab87 3086 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
3087
3088 return 1;
3089}
3090
3091int dm_tree_node_add_thin_target(struct dm_tree_node *node,
3092 uint64_t size,
4d25c81b 3093 const char *pool_uuid,
4251236e
ZK
3094 uint32_t device_id)
3095{
4d25c81b 3096 struct dm_tree_node *pool;
4251236e
ZK
3097 struct load_segment *seg;
3098
4d25c81b
ZK
3099 if (!(pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
3100 log_error("Missing thin pool uuid %s.", pool_uuid);
4251236e
ZK
3101 return 0;
3102 }
3103
4d25c81b 3104 if (!_link_tree_nodes(node, pool))
4251236e
ZK
3105 return_0;
3106
6744c143
ZK
3107 if (!_thin_validate_device_id(device_id))
3108 return_0;
4d25c81b 3109
6744c143
ZK
3110 if (!(seg = _add_segment(node, SEG_THIN, size)))
3111 return_0;
4d25c81b 3112
6744c143
ZK
3113 seg->pool = pool;
3114 seg->device_id = device_id;
1419bf1c 3115
4251236e
ZK
3116 return 1;
3117}
3118
077c4d1a
ZK
3119
3120int dm_get_status_thin_pool(struct dm_pool *mem, const char *params,
3121 struct dm_status_thin_pool **status)
3122{
3123 struct dm_status_thin_pool *s;
3124
3125 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin_pool)))) {
3126 log_error("Failed to allocate thin_pool status structure.");
3127 return 0;
3128 }
3129
5fd459f0 3130 /* FIXME: add support for held metadata root */
077c4d1a
ZK
3131 if (sscanf(params, "%" PRIu64 " %" PRIu64 "/%" PRIu64 " %" PRIu64 "/%" PRIu64,
3132 &s->transaction_id,
5fd459f0
ZK
3133 &s->used_metadata_blocks,
3134 &s->total_metadata_blocks,
077c4d1a
ZK
3135 &s->used_data_blocks,
3136 &s->total_data_blocks) != 5) {
3137 log_error("Failed to parse thin pool params: %s.", params);
3138 return 0;
3139 }
3140
3141 *status = s;
3142
3143 return 1;
3144}
3145
3146int dm_get_status_thin(struct dm_pool *mem, const char *params,
3147 struct dm_status_thin **status)
3148{
3149 struct dm_status_thin *s;
3150
3151 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin)))) {
3152 log_error("Failed to allocate thin status structure.");
3153 return 0;
3154 }
3155
9568f1b5
ZK
3156 if (strchr(params, '-')) {
3157 s->mapped_sectors = 0;
3158 s->highest_mapped_sector = 0;
3159 } else if (sscanf(params, "%" PRIu64 " %" PRIu64,
077c4d1a
ZK
3160 &s->mapped_sectors,
3161 &s->highest_mapped_sector) != 2) {
3162 log_error("Failed to parse thin params: %s.", params);
3163 return 0;
3164 }
3165
3166 *status = s;
3167
3168 return 1;
3169}
3170
b4f1578f 3171static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
3172{
3173 struct seg_area *area;
3174
b4f1578f 3175 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
3176 log_error("Failed to allocate target segment area.");
3177 return 0;
3178 }
3179
3180 area->dev_node = dev_node;
3181 area->offset = offset;
3182
2c44337b 3183 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
3184 seg->area_count++;
3185
3186 return 1;
3187}
3188
b4f1578f 3189int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
3190 const char *dev_name,
3191 const char *uuid,
3192 uint64_t offset)
165e4a11
AK
3193{
3194 struct load_segment *seg;
3195 struct stat info;
b4f1578f 3196 struct dm_tree_node *dev_node;
165e4a11
AK
3197
3198 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3199 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3200 return 0;
3201 }
3202
3203 if (uuid) {
b4f1578f 3204 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3205 log_error("Couldn't find area uuid %s.", uuid);
3206 return 0;
3207 }
b4f1578f
AK
3208 if (!_link_tree_nodes(node, dev_node))
3209 return_0;
165e4a11 3210 } else {
6d04311e 3211 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3212 log_error("Device %s not found.", dev_name);
3213 return 0;
3214 }
3215
40e5fd8b 3216 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3217 log_error("Device %s is not a block device.", dev_name);
3218 return 0;
3219 }
3220
3221 /* FIXME Check correct macro use */
cda69e17
PR
3222 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3223 MINOR(info.st_rdev), 0)))
b4f1578f 3224 return_0;
165e4a11
AK
3225 }
3226
3227 if (!node->props.segment_count) {
b8175c33 3228 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3229 return 0;
3230 }
3231
2c44337b 3232 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3233
b4f1578f
AK
3234 if (!_add_area(node, seg, dev_node, offset))
3235 return_0;
165e4a11
AK
3236
3237 return 1;
db208f51 3238}
bd90c6b2 3239
6d04311e
JEB
3240int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3241{
3242 struct load_segment *seg;
3243
3244 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3245
415c0690
AK
3246 switch (seg->type) {
3247 case SEG_RAID1:
3248 case SEG_RAID4:
3249 case SEG_RAID5_LA:
3250 case SEG_RAID5_RA:
3251 case SEG_RAID5_LS:
3252 case SEG_RAID5_RS:
3253 case SEG_RAID6_ZR:
3254 case SEG_RAID6_NR:
3255 case SEG_RAID6_NC:
3256 break;
3257 default:
3258 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3259 return 0;
3260 }
3261
6d04311e
JEB
3262 if (!_add_area(node, seg, NULL, offset))
3263 return_0;
3264
3265 return 1;
3266}
This page took 0.686057 seconds and 5 git commands to generate.