]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Fix resource leak of file handle
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
b262f3e1
ZK
27#define REPLICATOR_LOCAL_SITE 0
28
165e4a11
AK
29/* Supported segment types */
30enum {
12ca060e
MB
31 SEG_CRYPT,
32 SEG_ERROR,
165e4a11
AK
33 SEG_LINEAR,
34 SEG_MIRRORED,
b262f3e1
ZK
35 SEG_REPLICATOR,
36 SEG_REPLICATOR_DEV,
165e4a11
AK
37 SEG_SNAPSHOT,
38 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 39 SEG_SNAPSHOT_MERGE,
165e4a11
AK
40 SEG_STRIPED,
41 SEG_ZERO,
4251236e
ZK
42 SEG_THIN_POOL,
43 SEG_THIN,
cac52ca4
JEB
44 SEG_RAID1,
45 SEG_RAID4,
46 SEG_RAID5_LA,
47 SEG_RAID5_RA,
48 SEG_RAID5_LS,
49 SEG_RAID5_RS,
50 SEG_RAID6_ZR,
51 SEG_RAID6_NR,
52 SEG_RAID6_NC,
53 SEG_LAST,
165e4a11 54};
b4f1578f 55
165e4a11
AK
56/* FIXME Add crypt and multipath support */
57
58struct {
59 unsigned type;
60 const char *target;
61} dm_segtypes[] = {
12ca060e 62 { SEG_CRYPT, "crypt" },
165e4a11
AK
63 { SEG_ERROR, "error" },
64 { SEG_LINEAR, "linear" },
65 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
66 { SEG_REPLICATOR, "replicator" },
67 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
68 { SEG_SNAPSHOT, "snapshot" },
69 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 70 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
71 { SEG_STRIPED, "striped" },
72 { SEG_ZERO, "zero"},
4251236e
ZK
73 { SEG_THIN_POOL, "thin-pool"},
74 { SEG_THIN, "thin"},
cac52ca4
JEB
75 { SEG_RAID1, "raid1"},
76 { SEG_RAID4, "raid4"},
77 { SEG_RAID5_LA, "raid5_la"},
78 { SEG_RAID5_RA, "raid5_ra"},
79 { SEG_RAID5_LS, "raid5_ls"},
80 { SEG_RAID5_RS, "raid5_rs"},
81 { SEG_RAID6_ZR, "raid6_zr"},
82 { SEG_RAID6_NR, "raid6_nr"},
83 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
84
85 /*
86 *WARNING: Since 'raid' target overloads this 1:1 mapping table
87 * for search do not add new enum elements past them!
88 */
cac52ca4
JEB
89 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
90 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
91 { SEG_LAST, NULL },
165e4a11
AK
92};
93
94/* Some segment types have a list of areas of other devices attached */
95struct seg_area {
2c44337b 96 struct dm_list list;
165e4a11 97
b4f1578f 98 struct dm_tree_node *dev_node;
165e4a11
AK
99
100 uint64_t offset;
b262f3e1
ZK
101
102 unsigned rsite_index; /* Replicator site index */
103 struct dm_tree_node *slog; /* Replicator sync log node */
104 uint64_t region_size; /* Replicator sync log size */
105 uint32_t flags; /* Replicator sync log flags */
106};
107
2e732e96
ZK
108struct dm_thin_message {
109 dm_thin_message_t type;
110 union {
111 struct {
112 uint32_t device_id;
113 uint32_t origin_id;
114 } m_create_snap;
115 struct {
116 uint32_t device_id;
117 } m_create_thin;
118 struct {
119 uint32_t device_id;
120 } m_delete;
121 struct {
122 uint64_t current_id;
123 uint64_t new_id;
124 } m_set_transaction_id;
125 struct {
126 uint32_t device_id;
127 uint64_t new_size;
128 } m_trim;
129 } u;
130};
131
25e6ab87
ZK
132struct thin_message {
133 struct dm_list list;
134 struct dm_thin_message message;
660a42bc 135 int expected_errno;
25e6ab87
ZK
136};
137
b262f3e1
ZK
138/* Replicator-log has a list of sites */
139/* FIXME: maybe move to seg_area too? */
140struct replicator_site {
141 struct dm_list list;
142
143 unsigned rsite_index;
144 dm_replicator_mode_t mode;
145 uint32_t async_timeout;
146 uint32_t fall_behind_ios;
147 uint64_t fall_behind_data;
165e4a11
AK
148};
149
150/* Per-segment properties */
151struct load_segment {
2c44337b 152 struct dm_list list;
165e4a11
AK
153
154 unsigned type;
155
156 uint64_t size;
157
b262f3e1
ZK
158 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
159 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 160
cac52ca4 161 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
162
163 int persistent; /* Snapshot */
164 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
165 struct dm_tree_node *cow; /* Snapshot */
166 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 167 struct dm_tree_node *merge; /* Snapshot */
165e4a11 168
b262f3e1 169 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 170 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
171 unsigned clustered; /* Mirror */
172 unsigned mirror_area_count; /* Mirror */
dbcb64b8 173 uint32_t flags; /* Mirror log */
67b25ed4 174 char *uuid; /* Clustered mirror log */
12ca060e
MB
175
176 const char *cipher; /* Crypt */
177 const char *chainmode; /* Crypt */
178 const char *iv; /* Crypt */
179 uint64_t iv_offset; /* Crypt */
180 const char *key; /* Crypt */
b262f3e1
ZK
181
182 const char *rlog_type; /* Replicator */
183 struct dm_list rsites; /* Replicator */
184 unsigned rsite_count; /* Replicator */
185 unsigned rdevice_count; /* Replicator */
186 struct dm_tree_node *replicator;/* Replicator-dev */
187 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 188
40e5fd8b 189 uint64_t rebuilds; /* raid */
4251236e
ZK
190
191 struct dm_tree_node *metadata; /* Thin_pool */
192 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 193 struct dm_list thin_messages; /* Thin_pool */
bbcd37e4 194 uint64_t transaction_id; /* Thin_pool */
e9156c2b 195 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 196 uint32_t data_block_size; /* Thin_pool */
460c5991 197 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
198 uint32_t device_id; /* Thin */
199
165e4a11
AK
200};
201
202/* Per-device properties */
203struct load_properties {
204 int read_only;
205 uint32_t major;
206 uint32_t minor;
207
52b84409
AK
208 uint32_t read_ahead;
209 uint32_t read_ahead_flags;
210
165e4a11 211 unsigned segment_count;
bb875bb9 212 unsigned size_changed;
2c44337b 213 struct dm_list segs;
165e4a11
AK
214
215 const char *new_name;
566515c0
PR
216
217 /* If immediate_dev_node is set to 1, try to create the dev node
218 * as soon as possible (e.g. in preload stage even during traversal
219 * and processing of dm tree). This will also flush all stacked dev
220 * node operations, synchronizing with udev.
221 */
df390f17
AK
222 unsigned immediate_dev_node;
223
224 /*
225 * If the device size changed from zero and this is set,
226 * don't resume the device immediately, even if the device
227 * has parents. This works provided the parents do not
228 * validate the device size and is required by pvmove to
229 * avoid starting the mirror resync operation too early.
230 */
231 unsigned delay_resume_if_new;
bbcd37e4
ZK
232
233 /* Send messages for this node in preload */
234 unsigned send_messages;
165e4a11
AK
235};
236
237/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 238struct dm_tree_link {
2c44337b 239 struct dm_list list;
b4f1578f 240 struct dm_tree_node *node;
165e4a11
AK
241};
242
b4f1578f
AK
243struct dm_tree_node {
244 struct dm_tree *dtree;
3d0480ed 245
40e5fd8b
AK
246 const char *name;
247 const char *uuid;
248 struct dm_info info;
3d0480ed 249
40e5fd8b
AK
250 struct dm_list uses; /* Nodes this node uses */
251 struct dm_list used_by; /* Nodes that use this node */
165e4a11 252
56c28292
AK
253 int activation_priority; /* 0 gets activated first */
254
f16aea9e
PR
255 uint16_t udev_flags; /* Udev control flags */
256
165e4a11
AK
257 void *context; /* External supplied context */
258
259 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
260
261 /*
262 * If presuspend of child node is needed
263 * Note: only direct child is allowed
264 */
265 struct dm_tree_node *presuspend_node;
3d0480ed
AK
266};
267
b4f1578f 268struct dm_tree {
a3f6b2ce
AK
269 struct dm_pool *mem;
270 struct dm_hash_table *devs;
165e4a11 271 struct dm_hash_table *uuids;
b4f1578f 272 struct dm_tree_node root;
c55b1410 273 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
274 int no_flush; /* 1 sets noflush (mirrors/multipath) */
275 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 276 uint32_t cookie;
3d0480ed
AK
277};
278
5c9eae96
AK
279/*
280 * Tree functions.
281 */
b4f1578f 282struct dm_tree *dm_tree_create(void)
3d0480ed 283{
0395dd22 284 struct dm_pool *dmem;
b4f1578f 285 struct dm_tree *dtree;
3d0480ed 286
0395dd22
ZK
287 if (!(dmem = dm_pool_create("dtree", 1024)) ||
288 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
289 log_error("Failed to allocate dtree.");
290 if (dmem)
291 dm_pool_destroy(dmem);
3d0480ed
AK
292 return NULL;
293 }
294
b4f1578f 295 dtree->root.dtree = dtree;
2c44337b
AK
296 dm_list_init(&dtree->root.uses);
297 dm_list_init(&dtree->root.used_by);
c55b1410 298 dtree->skip_lockfs = 0;
b9ffd32c 299 dtree->no_flush = 0;
0395dd22 300 dtree->mem = dmem;
3d0480ed 301
b4f1578f
AK
302 if (!(dtree->devs = dm_hash_create(8))) {
303 log_error("dtree hash creation failed");
304 dm_pool_destroy(dtree->mem);
3d0480ed
AK
305 return NULL;
306 }
307
b4f1578f
AK
308 if (!(dtree->uuids = dm_hash_create(32))) {
309 log_error("dtree uuid hash creation failed");
310 dm_hash_destroy(dtree->devs);
311 dm_pool_destroy(dtree->mem);
165e4a11
AK
312 return NULL;
313 }
314
b4f1578f 315 return dtree;
3d0480ed
AK
316}
317
b4f1578f 318void dm_tree_free(struct dm_tree *dtree)
3d0480ed 319{
b4f1578f 320 if (!dtree)
3d0480ed
AK
321 return;
322
b4f1578f
AK
323 dm_hash_destroy(dtree->uuids);
324 dm_hash_destroy(dtree->devs);
325 dm_pool_destroy(dtree->mem);
3d0480ed
AK
326}
327
5c9eae96
AK
328void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
329{
330 node->dtree->cookie = cookie;
331}
332
333uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
334{
335 return node->dtree->cookie;
336}
337
338void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
339{
340 dnode->dtree->skip_lockfs = 1;
341}
342
343void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
344{
345 dnode->dtree->no_flush = 1;
346}
347
348void dm_tree_retry_remove(struct dm_tree_node *dnode)
349{
350 dnode->dtree->retry_remove = 1;
351}
352
353/*
354 * Node functions.
355 */
04bde319
ZK
356static int _nodes_are_linked(const struct dm_tree_node *parent,
357 const struct dm_tree_node *child)
3d0480ed 358{
b4f1578f 359 struct dm_tree_link *dlink;
3d0480ed 360
2c44337b 361 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
362 if (dlink->node == child)
363 return 1;
3d0480ed
AK
364
365 return 0;
366}
367
2c44337b 368static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 369{
b4f1578f 370 struct dm_tree_link *dlink;
3d0480ed 371
b4f1578f
AK
372 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
373 log_error("dtree link allocation failed");
3d0480ed
AK
374 return 0;
375 }
376
377 dlink->node = node;
2c44337b 378 dm_list_add(list, &dlink->list);
3d0480ed
AK
379
380 return 1;
381}
382
b4f1578f
AK
383static int _link_nodes(struct dm_tree_node *parent,
384 struct dm_tree_node *child)
3d0480ed
AK
385{
386 if (_nodes_are_linked(parent, child))
387 return 1;
388
389 if (!_link(&parent->uses, child))
390 return 0;
391
392 if (!_link(&child->used_by, parent))
393 return 0;
394
395 return 1;
396}
397
2c44337b 398static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 399{
b4f1578f 400 struct dm_tree_link *dlink;
3d0480ed 401
2c44337b 402 dm_list_iterate_items(dlink, list)
3d0480ed 403 if (dlink->node == node) {
2c44337b 404 dm_list_del(&dlink->list);
3d0480ed
AK
405 break;
406 }
3d0480ed
AK
407}
408
b4f1578f
AK
409static void _unlink_nodes(struct dm_tree_node *parent,
410 struct dm_tree_node *child)
3d0480ed
AK
411{
412 if (!_nodes_are_linked(parent, child))
413 return;
414
415 _unlink(&parent->uses, child);
416 _unlink(&child->used_by, parent);
417}
418
b4f1578f 419static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 420{
b4f1578f 421 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
422}
423
b4f1578f 424static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 425{
b1ebf028 426 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
427}
428
b4f1578f 429static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 430{
b4f1578f 431 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
432}
433
b4f1578f 434static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 435{
b1ebf028 436 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
437}
438
b4f1578f 439static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
440{
441 /* Don't link to root node if child already has a parent */
f77736ca 442 if (parent == &parent->dtree->root) {
b4f1578f 443 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
444 return 1;
445 } else
446 _remove_from_toplevel(child);
447
f77736ca 448 if (child == &child->dtree->root) {
b4f1578f 449 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
450 return 1;
451 } else
452 _remove_from_bottomlevel(parent);
453
454 return _link_nodes(parent, child);
455}
456
b4f1578f 457static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
458 const char *name,
459 const char *uuid,
165e4a11 460 struct dm_info *info,
f16aea9e
PR
461 void *context,
462 uint16_t udev_flags)
3d0480ed 463{
b4f1578f 464 struct dm_tree_node *node;
3d0480ed
AK
465 uint64_t dev;
466
b4f1578f
AK
467 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
468 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
469 return NULL;
470 }
471
b4f1578f 472 node->dtree = dtree;
3d0480ed
AK
473
474 node->name = name;
475 node->uuid = uuid;
476 node->info = *info;
165e4a11 477 node->context = context;
f16aea9e 478 node->udev_flags = udev_flags;
56c28292 479 node->activation_priority = 0;
3d0480ed 480
2c44337b
AK
481 dm_list_init(&node->uses);
482 dm_list_init(&node->used_by);
483 dm_list_init(&node->props.segs);
3d0480ed
AK
484
485 dev = MKDEV(info->major, info->minor);
486
b4f1578f 487 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 488 sizeof(dev), node)) {
b4f1578f
AK
489 log_error("dtree node hash insertion failed");
490 dm_pool_free(dtree->mem, node);
3d0480ed
AK
491 return NULL;
492 }
493
165e4a11 494 if (uuid && *uuid &&
b4f1578f
AK
495 !dm_hash_insert(dtree->uuids, uuid, node)) {
496 log_error("dtree uuid hash insertion failed");
497 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 498 sizeof(dev));
b4f1578f 499 dm_pool_free(dtree->mem, node);
165e4a11
AK
500 return NULL;
501 }
502
3d0480ed
AK
503 return node;
504}
505
b4f1578f 506static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
507 uint32_t major, uint32_t minor)
508{
509 uint64_t dev = MKDEV(major, minor);
510
b4f1578f 511 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
512 sizeof(dev));
513}
514
b4f1578f 515static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
516 const char *uuid)
517{
87f98002 518 struct dm_tree_node *node;
2e5ff5d1
AK
519 const char *default_uuid_prefix;
520 size_t default_uuid_prefix_len;
87f98002
AK
521
522 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
523 return node;
524
2e5ff5d1
AK
525 default_uuid_prefix = dm_uuid_prefix();
526 default_uuid_prefix_len = strlen(default_uuid_prefix);
527
528 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
87f98002
AK
529 return NULL;
530
2e5ff5d1 531 return dm_hash_lookup(dtree->uuids, uuid + default_uuid_prefix_len);
165e4a11
AK
532}
533
5c9eae96
AK
534void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
535
536{
537 struct dm_info *dinfo = &dnode->info;
538
539 if (udev_flags != dnode->udev_flags)
540 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
541 ") udev_flags from 0x%x to 0x%x",
542 dnode->name, dinfo->major, dinfo->minor,
543 dnode->udev_flags, udev_flags);
544 dnode->udev_flags = udev_flags;
545}
546
547void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
548 uint32_t read_ahead,
549 uint32_t read_ahead_flags)
550{
551 dnode->props.read_ahead = read_ahead;
552 dnode->props.read_ahead_flags = read_ahead_flags;
553}
554
555void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
556 struct dm_tree_node *presuspend_node)
557{
558 node->presuspend_node = presuspend_node;
559}
560
561const char *dm_tree_node_get_name(const struct dm_tree_node *node)
562{
563 return node->info.exists ? node->name : "";
564}
565
566const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
567{
568 return node->info.exists ? node->uuid : "";
569}
570
571const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
572{
573 return &node->info;
574}
575
576void *dm_tree_node_get_context(const struct dm_tree_node *node)
577{
578 return node->context;
579}
580
581int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
582{
583 return dnode->props.size_changed;
584}
585
586int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
587{
588 if (inverted) {
589 if (_nodes_are_linked(&node->dtree->root, node))
590 return 0;
591 return dm_list_size(&node->used_by);
592 }
593
594 if (_nodes_are_linked(node, &node->dtree->root))
595 return 0;
596
597 return dm_list_size(&node->uses);
598}
599
600/*
601 * Returns 1 if no prefix supplied
602 */
603static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
604{
605 const char *default_uuid_prefix = dm_uuid_prefix();
606 size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
607
608 if (!uuid_prefix)
609 return 1;
610
611 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
612 return 1;
613
614 /* Handle transition: active device uuids might be missing the prefix */
615 if (uuid_prefix_len <= 4)
616 return 0;
617
618 if (!strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
619 return 0;
620
621 if (strncmp(uuid_prefix, default_uuid_prefix, default_uuid_prefix_len))
622 return 0;
623
624 if (!strncmp(uuid, uuid_prefix + default_uuid_prefix_len, uuid_prefix_len - default_uuid_prefix_len))
625 return 1;
626
627 return 0;
628}
629
630/*
631 * Returns 1 if no children.
632 */
633static int _children_suspended(struct dm_tree_node *node,
634 uint32_t inverted,
635 const char *uuid_prefix,
636 size_t uuid_prefix_len)
637{
638 struct dm_list *list;
639 struct dm_tree_link *dlink;
640 const struct dm_info *dinfo;
641 const char *uuid;
642
643 if (inverted) {
644 if (_nodes_are_linked(&node->dtree->root, node))
645 return 1;
646 list = &node->used_by;
647 } else {
648 if (_nodes_are_linked(node, &node->dtree->root))
649 return 1;
650 list = &node->uses;
651 }
652
653 dm_list_iterate_items(dlink, list) {
654 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
655 stack;
656 continue;
657 }
658
659 /* Ignore if it doesn't belong to this VG */
660 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
661 continue;
662
663 /* Ignore if parent node wants to presuspend this node */
664 if (dlink->node->presuspend_node == node)
665 continue;
666
667 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
668 stack; /* FIXME Is this normal? */
669 return 0;
670 }
671
672 if (!dinfo->suspended)
673 return 0;
674 }
675
676 return 1;
677}
678
679/*
680 * Set major and minor to zero for root of tree.
681 */
682struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
683 uint32_t major,
684 uint32_t minor)
685{
686 if (!major && !minor)
687 return &dtree->root;
688
689 return _find_dm_tree_node(dtree, major, minor);
690}
691
692/*
693 * Set uuid to NULL for root of tree.
694 */
695struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
696 const char *uuid)
697{
698 if (!uuid || !*uuid)
699 return &dtree->root;
700
701 return _find_dm_tree_node_by_uuid(dtree, uuid);
702}
703
704/*
705 * First time set *handle to NULL.
706 * Set inverted to invert the tree.
707 */
708struct dm_tree_node *dm_tree_next_child(void **handle,
709 const struct dm_tree_node *parent,
710 uint32_t inverted)
711{
712 struct dm_list **dlink = (struct dm_list **) handle;
713 const struct dm_list *use_list;
714
715 if (inverted)
716 use_list = &parent->used_by;
717 else
718 use_list = &parent->uses;
719
720 if (!*dlink)
721 *dlink = dm_list_first(use_list);
722 else
723 *dlink = dm_list_next(use_list, *dlink);
724
725 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
726}
727
a3f6b2ce 728static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
2e5ff5d1 729 const char **name, const char **uuid, unsigned inactive_table,
3d0480ed
AK
730 struct dm_info *info, struct dm_deps **deps)
731{
732 memset(info, 0, sizeof(*info));
733
734 if (!dm_is_dm_major(major)) {
2e5ff5d1
AK
735 if (name)
736 *name = "";
737 if (uuid)
738 *uuid = "";
3d0480ed
AK
739 *deps = NULL;
740 info->major = major;
741 info->minor = minor;
742 info->exists = 0;
165e4a11
AK
743 info->live_table = 0;
744 info->inactive_table = 0;
745 info->read_only = 0;
3d0480ed
AK
746 return 1;
747 }
748
749 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
750 log_error("deps dm_task creation failed");
751 return 0;
752 }
753
b4f1578f
AK
754 if (!dm_task_set_major(*dmt, major)) {
755 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
756 major, minor);
3d0480ed 757 goto failed;
b4f1578f 758 }
3d0480ed 759
b4f1578f
AK
760 if (!dm_task_set_minor(*dmt, minor)) {
761 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
762 major, minor);
3d0480ed 763 goto failed;
b4f1578f 764 }
3d0480ed 765
2e5ff5d1
AK
766 if (inactive_table && !dm_task_query_inactive_table(*dmt)) {
767 log_error("_deps: failed to set inactive table for (%" PRIu32 ":%" PRIu32 ")",
768 major, minor);
769 goto failed;
770 }
771
b4f1578f
AK
772 if (!dm_task_run(*dmt)) {
773 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
774 major, minor);
3d0480ed 775 goto failed;
b4f1578f 776 }
3d0480ed 777
b4f1578f
AK
778 if (!dm_task_get_info(*dmt, info)) {
779 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
780 major, minor);
3d0480ed 781 goto failed;
b4f1578f 782 }
3d0480ed
AK
783
784 if (!info->exists) {
2e5ff5d1
AK
785 if (name)
786 *name = "";
787 if (uuid)
788 *uuid = "";
3d0480ed
AK
789 *deps = NULL;
790 } else {
791 if (info->major != major) {
b4f1578f 792 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
793 major, info->major);
794 goto failed;
795 }
796 if (info->minor != minor) {
b4f1578f 797 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
798 minor, info->minor);
799 goto failed;
800 }
2e5ff5d1 801 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
802 log_error("name pool_strdup failed");
803 goto failed;
804 }
2e5ff5d1 805 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
806 log_error("uuid pool_strdup failed");
807 goto failed;
808 }
809 *deps = dm_task_get_deps(*dmt);
810 }
811
812 return 1;
813
814failed:
815 dm_task_destroy(*dmt);
816 return 0;
817}
818
5c9eae96
AK
819/*
820 * Deactivate a device with its dependencies if the uuid prefix matches.
821 */
822static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
823 struct dm_info *info, struct dm_pool *mem,
824 const char **name, const char **uuid)
3d0480ed 825{
5c9eae96
AK
826 struct dm_task *dmt;
827 int r;
3d0480ed 828
5c9eae96
AK
829 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
830 log_error("_info_by_dev: dm_task creation failed");
831 return 0;
3d0480ed
AK
832 }
833
5c9eae96
AK
834 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
835 log_error("_info_by_dev: Failed to set device number");
836 dm_task_destroy(dmt);
837 return 0;
838 }
839
840 if (!with_open_count && !dm_task_no_open_count(dmt))
841 log_error("Failed to disable open_count");
842
843 if (!(r = dm_task_run(dmt)))
844 goto_out;
845
846 if (!(r = dm_task_get_info(dmt, info)))
847 goto_out;
848
849 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(dmt)))) {
850 log_error("name pool_strdup failed");
851 r = 0;
b4f1578f 852 goto_out;
165e4a11 853 }
3d0480ed 854
5c9eae96
AK
855 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(dmt)))) {
856 log_error("uuid pool_strdup failed");
857 r = 0;
858 goto_out;
859 }
3d0480ed 860
5c9eae96
AK
861out:
862 dm_task_destroy(dmt);
863
864 return r;
865}
866
867static int _check_device_not_in_use(const char *name, struct dm_info *info)
868{
869 if (!info->exists)
870 return 1;
871
872 /* If sysfs is not used, use open_count information only. */
873 if (!*dm_sysfs_dir()) {
874 if (info->open_count) {
875 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
876 name, info->major, info->minor);
877 return 0;
878 }
879
880 return 1;
881 }
882
883 if (dm_device_has_holders(info->major, info->minor)) {
884 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
885 "by another device.", name, info->major, info->minor);
886 return 0;
887 }
888
889 if (dm_device_has_mounted_fs(info->major, info->minor)) {
890 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
891 "a filesystem in use.", name, info->major, info->minor);
892 return 0;
893 }
894
895 return 1;
896}
897
898/* Check if all parent nodes of given node have open_count == 0 */
899static int _node_has_closed_parents(struct dm_tree_node *node,
900 const char *uuid_prefix,
901 size_t uuid_prefix_len)
902{
903 struct dm_tree_link *dlink;
904 const struct dm_info *dinfo;
905 struct dm_info info;
906 const char *uuid;
907
908 /* Iterate through parents of this node */
909 dm_list_iterate_items(dlink, &node->used_by) {
910 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
b4f1578f 911 stack;
5c9eae96 912 continue;
b4f1578f 913 }
5c9eae96
AK
914
915 /* Ignore if it doesn't belong to this VG */
916 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
917 continue;
918
919 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
920 stack; /* FIXME Is this normal? */
921 return 0;
922 }
923
924 /* Refresh open_count */
925 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
926 !info.exists)
927 continue;
928
929 if (info.open_count) {
930 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
931 dinfo->major, dinfo->minor, info.open_count);
932 return 0;
933 }
934 }
935
936 return 1;
937}
938
939static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
940 uint32_t *cookie, uint16_t udev_flags, int retry)
941{
942 struct dm_task *dmt;
943 int r = 0;
944
945 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
946
947 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
948 log_error("Deactivation dm_task creation failed for %s", name);
949 return 0;
950 }
951
952 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
953 log_error("Failed to set device number for %s deactivation", name);
165e4a11 954 goto out;
3d0480ed
AK
955 }
956
5c9eae96
AK
957 if (!dm_task_no_open_count(dmt))
958 log_error("Failed to disable open_count");
959
960 if (cookie)
961 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
962 goto out;
963
964 if (retry)
965 dm_task_retry_remove(dmt);
966
967 r = dm_task_run(dmt);
968
969 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
970 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
971 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
972
973 /* FIXME Remove node from tree or mark invalid? */
3d0480ed 974
3d0480ed 975out:
5c9eae96 976 dm_task_destroy(dmt);
3d0480ed 977
5c9eae96 978 return r;
165e4a11
AK
979}
980
2e5ff5d1 981static int _node_clear_table(struct dm_tree_node *dnode, uint16_t udev_flags)
165e4a11 982{
2e5ff5d1
AK
983 struct dm_task *dmt = NULL, *deps_dmt = NULL;
984 struct dm_info *info, deps_info;
985 struct dm_deps *deps = NULL;
986 const char *name, *uuid;
987 const char *default_uuid_prefix;
988 size_t default_uuid_prefix_len;
989 uint32_t i;
990 int r = 0;
165e4a11
AK
991
992 if (!(info = &dnode->info)) {
b4f1578f 993 log_error("_node_clear_table failed: missing info");
165e4a11
AK
994 return 0;
995 }
996
b4f1578f
AK
997 if (!(name = dm_tree_node_get_name(dnode))) {
998 log_error("_node_clear_table failed: missing name");
165e4a11
AK
999 return 0;
1000 }
1001
1002 /* Is there a table? */
1003 if (!info->exists || !info->inactive_table)
1004 return 1;
1005
2e5ff5d1
AK
1006 /* Get devices used by inactive table that's about to be deleted. */
1007 if (!_deps(&deps_dmt, dnode->dtree->mem, info->major, info->minor, NULL, NULL, 1, info, &deps)) {
1008 log_error("Failed to obtain dependencies for %s before clearing table.", name);
1009 return 0;
1010 }
10d0d9c7 1011
165e4a11
AK
1012 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
1013 name, info->major, info->minor);
1014
1015 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11 1016 log_error("Table clear dm_task creation failed for %s", name);
2e5ff5d1 1017 goto_out;
165e4a11
AK
1018 }
1019
1020 if (!dm_task_set_major(dmt, info->major) ||
1021 !dm_task_set_minor(dmt, info->minor)) {
1022 log_error("Failed to set device number for %s table clear", name);
2e5ff5d1 1023 goto_out;
165e4a11
AK
1024 }
1025
1026 r = dm_task_run(dmt);
1027
1028 if (!dm_task_get_info(dmt, info)) {
b4f1578f 1029 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
1030 r = 0;
1031 }
1032
2e5ff5d1
AK
1033 if (!r || !deps)
1034 goto_out;
1035
1036 /*
1037 * Remove (incomplete) devices that the inactive table referred to but
1038 * which are not in the tree, no longer referenced and don't have a live
1039 * table.
1040 */
1041 default_uuid_prefix = dm_uuid_prefix();
1042 default_uuid_prefix_len = strlen(default_uuid_prefix);
1043
1044 for (i = 0; i < deps->count; i++) {
1045 /* If already in tree, assume it's under control */
1046 if (_find_dm_tree_node(dnode->dtree, MAJOR(deps->device[i]), MINOR(deps->device[i])))
5c9eae96 1047 continue;
db208f51 1048
5c9eae96
AK
1049 if (!_info_by_dev(MAJOR(deps->device[i]), MINOR(deps->device[i]), 1,
1050 &deps_info, dnode->dtree->mem, &name, &uuid))
1051 continue;
2e5ff5d1 1052
5c9eae96
AK
1053 /* Proceed if device is an 'orphan' - unreferenced and without a live table. */
1054 if (!deps_info.exists || deps_info.live_table || deps_info.open_count)
1055 continue;
3e8c6b73 1056
5c9eae96
AK
1057 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
1058 continue;
2e5ff5d1 1059
5c9eae96
AK
1060 /* Remove device. */
1061 if (!_deactivate_node(name, deps_info.major, deps_info.minor, &dnode->dtree->cookie, udev_flags, 0)) {
1062 log_error("Failed to deactivate no-longer-used device %s (%"
1063 PRIu32 ":%" PRIu32 ")", name, deps_info.major, deps_info.minor);
1064 } else if (deps_info.suspended)
1065 dec_suspended();
2e5ff5d1
AK
1066 }
1067
1068out:
5c9eae96
AK
1069 if (dmt)
1070 dm_task_destroy(dmt);
1071
1072 if (deps_dmt)
1073 dm_task_destroy(deps_dmt);
3e8c6b73
AK
1074
1075 return r;
1076}
1077
5c9eae96
AK
1078struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
1079 const char *name,
1080 const char *uuid,
1081 uint32_t major,
1082 uint32_t minor,
1083 int read_only,
1084 int clear_inactive,
1085 void *context,
1086 uint16_t udev_flags)
125712be 1087{
5c9eae96
AK
1088 struct dm_tree_node *dnode;
1089 struct dm_info info;
1090 const char *name2;
1091 const char *uuid2;
125712be 1092
5c9eae96
AK
1093 /* Do we need to add node to tree? */
1094 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
1095 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
1096 log_error("name pool_strdup failed");
1097 return NULL;
1098 }
1099 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
1100 log_error("uuid pool_strdup failed");
1101 return NULL;
c3e5b497
PR
1102 }
1103
5c9eae96
AK
1104 info.major = 0;
1105 info.minor = 0;
1106 info.exists = 0;
1107 info.live_table = 0;
1108 info.inactive_table = 0;
1109 info.read_only = 0;
125712be 1110
5c9eae96
AK
1111 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
1112 context, 0)))
1113 return_NULL;
125712be 1114
5c9eae96
AK
1115 /* Attach to root node until a table is supplied */
1116 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
1117 return_NULL;
f3ef15ef 1118
5c9eae96
AK
1119 dnode->props.major = major;
1120 dnode->props.minor = minor;
1121 dnode->props.new_name = NULL;
1122 dnode->props.size_changed = 0;
1123 } else if (strcmp(name, dnode->name)) {
1124 /* Do we need to rename node? */
1125 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
1126 log_error("name pool_strdup failed");
1127 return NULL;
f3ef15ef 1128 }
5c9eae96 1129 }
f3ef15ef 1130
5c9eae96
AK
1131 dnode->props.read_only = read_only ? 1 : 0;
1132 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
1133 dnode->props.read_ahead_flags = 0;
f3ef15ef 1134
5c9eae96
AK
1135 if (clear_inactive && !_node_clear_table(dnode, udev_flags))
1136 return_NULL;
f3ef15ef 1137
5c9eae96
AK
1138 dnode->context = context;
1139 dnode->udev_flags = udev_flags;
f3ef15ef 1140
5c9eae96
AK
1141 return dnode;
1142}
f3ef15ef 1143
5c9eae96
AK
1144struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree, const char *name,
1145 const char *uuid, uint32_t major, uint32_t minor,
1146 int read_only, int clear_inactive, void *context)
1147{
1148 return dm_tree_add_new_dev_with_udev_flags(dtree, name, uuid, major, minor,
1149 read_only, clear_inactive, context, 0);
f3ef15ef
ZK
1150}
1151
5c9eae96
AK
1152static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
1153 struct dm_tree_node *parent,
1154 uint32_t major, uint32_t minor,
1155 uint16_t udev_flags)
3e8c6b73 1156{
5c9eae96
AK
1157 struct dm_task *dmt = NULL;
1158 struct dm_info info;
1159 struct dm_deps *deps = NULL;
1160 const char *name = NULL;
1161 const char *uuid = NULL;
1162 struct dm_tree_node *node = NULL;
1163 uint32_t i;
1164 int new = 0;
3e8c6b73 1165
5c9eae96
AK
1166 /* Already in tree? */
1167 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
1168 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, 0, &info, &deps))
1169 return_NULL;
3e8c6b73 1170
5c9eae96
AK
1171 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
1172 NULL, udev_flags)))
1173 goto_out;
1174 new = 1;
3e8c6b73
AK
1175 }
1176
5c9eae96
AK
1177 if (!_link_tree_nodes(parent, node)) {
1178 node = NULL;
1179 goto_out;
3e8c6b73
AK
1180 }
1181
5c9eae96
AK
1182 /* If node was already in tree, no need to recurse. */
1183 if (!new)
1184 goto out;
787200ef 1185
5c9eae96
AK
1186 /* Can't recurse if not a mapped device or there are no dependencies */
1187 if (!node->info.exists || !deps->count) {
1188 if (!_add_to_bottomlevel(node)) {
1189 stack;
1190 node = NULL;
1191 }
1192 goto out;
1193 }
787200ef 1194
5c9eae96
AK
1195 /* Add dependencies to tree */
1196 for (i = 0; i < deps->count; i++)
1197 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
1198 MINOR(deps->device[i]), udev_flags)) {
1199 node = NULL;
1200 goto_out;
1201 }
3e8c6b73 1202
5c9eae96
AK
1203out:
1204 if (dmt)
1205 dm_task_destroy(dmt);
165e4a11 1206
5c9eae96
AK
1207 return node;
1208}
db208f51 1209
5c9eae96
AK
1210int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
1211{
1212 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
1213}
db208f51 1214
5c9eae96
AK
1215int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
1216 uint32_t minor, uint16_t udev_flags)
1217{
1218 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
db208f51
AK
1219}
1220
bd90c6b2 1221static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1222 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1223{
1224 struct dm_task *dmt;
1225 int r = 0;
1226
1227 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1228
1229 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1230 log_error("Rename dm_task creation failed for %s", old_name);
1231 return 0;
1232 }
1233
1234 if (!dm_task_set_name(dmt, old_name)) {
1235 log_error("Failed to set name for %s rename.", old_name);
1236 goto out;
1237 }
1238
b4f1578f 1239 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1240 goto_out;
165e4a11
AK
1241
1242 if (!dm_task_no_open_count(dmt))
1243 log_error("Failed to disable open_count");
1244
f16aea9e 1245 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1246 goto out;
1247
165e4a11
AK
1248 r = dm_task_run(dmt);
1249
1250out:
1251 dm_task_destroy(dmt);
1252
1253 return r;
1254}
1255
165e4a11
AK
1256/* FIXME Merge with _suspend_node? */
1257static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1258 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1259 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1260 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1261{
1262 struct dm_task *dmt;
bd90c6b2 1263 int r = 0;
165e4a11
AK
1264
1265 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1266
1267 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1268 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1269 return 0;
1270 }
1271
0b7d16bc
AK
1272 /* FIXME Kernel should fill in name on return instead */
1273 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1274 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1275 goto out;
0b7d16bc
AK
1276 }
1277
165e4a11
AK
1278 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1279 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1280 goto out;
165e4a11
AK
1281 }
1282
1283 if (!dm_task_no_open_count(dmt))
1284 log_error("Failed to disable open_count");
1285
52b84409
AK
1286 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1287 log_error("Failed to set read ahead");
1288
f16aea9e 1289 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1290 goto_out;
bd90c6b2 1291
9a8f192a
ZK
1292 if (!(r = dm_task_run(dmt)))
1293 goto_out;
1294
1295 if (already_suspended)
1296 dec_suspended();
1297
1298 if (!(r = dm_task_get_info(dmt, newinfo)))
1299 stack;
165e4a11 1300
bd90c6b2 1301out:
165e4a11
AK
1302 dm_task_destroy(dmt);
1303
1304 return r;
1305}
1306
db208f51 1307static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1308 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1309{
1310 struct dm_task *dmt;
1311 int r;
1312
b9ffd32c
AK
1313 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1314 name, major, minor,
1315 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1316 no_flush ? "" : " with device flush");
db208f51
AK
1317
1318 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1319 log_error("Suspend dm_task creation failed for %s", name);
1320 return 0;
1321 }
1322
1323 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1324 log_error("Failed to set device number for %s suspension.", name);
1325 dm_task_destroy(dmt);
1326 return 0;
1327 }
1328
1329 if (!dm_task_no_open_count(dmt))
1330 log_error("Failed to disable open_count");
1331
c55b1410
AK
1332 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1333 log_error("Failed to set skip_lockfs flag.");
1334
b9ffd32c
AK
1335 if (no_flush && !dm_task_no_flush(dmt))
1336 log_error("Failed to set no_flush flag.");
1337
1840aa09
AK
1338 if ((r = dm_task_run(dmt))) {
1339 inc_suspended();
db208f51 1340 r = dm_task_get_info(dmt, newinfo);
1840aa09 1341 }
db208f51 1342
3e8c6b73
AK
1343 dm_task_destroy(dmt);
1344
1345 return r;
1346}
1347
25e6ab87 1348static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1349{
1350 struct dm_task *dmt;
1351 int r = 0;
1352 uint64_t start, length;
1353 char *type = NULL;
1354 char *params = NULL;
e0ea24be 1355
25e6ab87
ZK
1356 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1357 return_0;
e0ea24be 1358
25e6ab87
ZK
1359 if (!dm_task_set_major(dmt, dnode->info.major) ||
1360 !dm_task_set_minor(dmt, dnode->info.minor)) {
1361 log_error("Failed to set major minor.");
1362 goto out;
e0ea24be
ZK
1363 }
1364
25e6ab87
ZK
1365 if (!dm_task_run(dmt))
1366 goto_out;
1367
1368 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1369
1370 if (type && (strcmp(type, "thin-pool") != 0)) {
c590a9cd 1371 log_error("Expected thin-pool target for %d:%d and got %s.",
25e6ab87 1372 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1373 goto out;
1374 }
1375
25e6ab87 1376 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
c590a9cd 1377 log_error("Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1378 goto out;
1379 }
1380
25e6ab87 1381 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1382
25e6ab87
ZK
1383 r = 1;
1384out:
1385 dm_task_destroy(dmt);
e0ea24be 1386
25e6ab87
ZK
1387 return r;
1388}
e0ea24be 1389
25e6ab87
ZK
1390static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1391{
1392 struct dm_task *dmt;
1393 struct dm_thin_message *m = &tm->message;
1394 char buf[64];
1395 int r;
e0ea24be 1396
25e6ab87
ZK
1397 switch (m->type) {
1398 case DM_THIN_MESSAGE_CREATE_SNAP:
1399 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1400 m->u.m_create_snap.device_id,
1401 m->u.m_create_snap.origin_id);
1402 break;
1403 case DM_THIN_MESSAGE_CREATE_THIN:
1404 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1405 m->u.m_create_thin.device_id);
1406 break;
1407 case DM_THIN_MESSAGE_DELETE:
1408 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1409 m->u.m_delete.device_id);
1410 break;
1411 case DM_THIN_MESSAGE_TRIM:
1412 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1413 m->u.m_trim.device_id,
1414 m->u.m_trim.new_size);
1415 break;
1416 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1417 r = dm_snprintf(buf, sizeof(buf),
1418 "set_transaction_id %" PRIu64 " %" PRIu64,
1419 m->u.m_set_transaction_id.current_id,
1420 m->u.m_set_transaction_id.new_id);
1421 break;
25de9add
ZK
1422 default:
1423 r = -1;
25e6ab87
ZK
1424 }
1425
25de9add 1426 if (r < 0) {
25e6ab87
ZK
1427 log_error("Failed to prepare message.");
1428 return 0;
1429 }
1430
1431 r = 0;
1432
1433 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1434 return_0;
1435
1436 if (!dm_task_set_major(dmt, dnode->info.major) ||
1437 !dm_task_set_minor(dmt, dnode->info.minor)) {
1438 log_error("Failed to set message major minor.");
1439 goto out;
1440 }
1441
1442 if (!dm_task_set_message(dmt, buf))
1443 goto_out;
1444
660a42bc
ZK
1445 /* Internal functionality of dm_task */
1446 dmt->expected_errno = tm->expected_errno;
1447
25e6ab87
ZK
1448 if (!dm_task_run(dmt))
1449 goto_out;
1450
1451 r = 1;
e0ea24be
ZK
1452out:
1453 dm_task_destroy(dmt);
1454
1455 return r;
1456}
1457
11f64f0a
ZK
1458static int _node_send_messages(struct dm_tree_node *dnode,
1459 const char *uuid_prefix,
1460 size_t uuid_prefix_len)
25e6ab87
ZK
1461{
1462 struct load_segment *seg;
1463 struct thin_message *tmsg;
11f64f0a 1464 uint64_t trans_id;
25e6ab87
ZK
1465 const char *uuid;
1466
bbcd37e4 1467 if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1468 return 1;
1469
1470 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1471 if (seg->type != SEG_THIN_POOL)
1472 return 1;
1473
1474 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1475 return_0;
1476
1477 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1478 log_debug("UUID \"%s\" does not match.", uuid);
1479 return 1;
1480 }
1481
11f64f0a 1482 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
bbcd37e4 1483 goto_bad;
25e6ab87 1484
bbcd37e4 1485 if (trans_id == seg->transaction_id)
25e6ab87
ZK
1486 return 1; /* In sync - skip messages */
1487
bbcd37e4 1488 if (trans_id != (seg->transaction_id - 1)) {
25e6ab87 1489 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
bbcd37e4
ZK
1490 trans_id, seg->transaction_id - 1);
1491 goto bad; /* Nothing to send */
25e6ab87
ZK
1492 }
1493
1494 dm_list_iterate_items(tmsg, &seg->thin_messages)
1495 if (!(_thin_pool_node_message(dnode, tmsg)))
bbcd37e4 1496 goto_bad;
25e6ab87
ZK
1497
1498 return 1;
bbcd37e4
ZK
1499bad:
1500 /* Try to deactivate */
1501 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
1502 log_error("Failed to deactivate %s", dnode->name);
1503
1504 return 0;
25e6ab87
ZK
1505}
1506
18e0f934
AK
1507/*
1508 * FIXME Don't attempt to deactivate known internal dependencies.
1509 */
1510static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1511 const char *uuid_prefix,
1512 size_t uuid_prefix_len,
1513 unsigned level)
3e8c6b73 1514{
b7eb2ad0 1515 int r = 1;
3e8c6b73 1516 void *handle = NULL;
b4f1578f 1517 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1518 struct dm_info info;
1519 const struct dm_info *dinfo;
1520 const char *name;
1521 const char *uuid;
1522
b4f1578f
AK
1523 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1524 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1525 stack;
1526 continue;
1527 }
1528
b4f1578f 1529 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1530 stack;
1531 continue;
1532 }
1533
b4f1578f 1534 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1535 stack;
1536 continue;
1537 }
1538
1539 /* Ignore if it doesn't belong to this VG */
2b69db1f 1540 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1541 continue;
3e8c6b73
AK
1542
1543 /* Refresh open_count */
2e5ff5d1 1544 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
f55021f4 1545 !info.exists)
3e8c6b73
AK
1546 continue;
1547
4ce43894
ZK
1548 if (info.open_count) {
1549 /* Skip internal non-toplevel opened nodes */
1550 if (level)
1551 continue;
1552
1553 /* When retry is not allowed, error */
1554 if (!child->dtree->retry_remove) {
1555 log_error("Unable to deactivate open %s (%" PRIu32
1556 ":%" PRIu32 ")", name, info.major, info.minor);
1557 r = 0;
1558 continue;
1559 }
1560
1561 /* Check toplevel node for holders/mounted fs */
1562 if (!_check_device_not_in_use(name, &info)) {
1563 stack;
1564 r = 0;
1565 continue;
1566 }
1567 /* Go on with retry */
1568 }
125712be 1569
f3ef15ef 1570 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1571 if ((child->presuspend_node &&
f3ef15ef
ZK
1572 !_node_has_closed_parents(child->presuspend_node,
1573 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1574 /* Only report error from (likely non-internal) dependency at top level */
1575 if (!level) {
1576 log_error("Unable to deactivate open %s (%" PRIu32
1577 ":%" PRIu32 ")", name, info.major,
1578 info.minor);
1579 r = 0;
1580 }
f55021f4
AK
1581 continue;
1582 }
1583
76d1aec8
ZK
1584 /* Suspend child node first if requested */
1585 if (child->presuspend_node &&
1586 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1587 continue;
1588
f16aea9e 1589 if (!_deactivate_node(name, info.major, info.minor,
787200ef 1590 &child->dtree->cookie, child->udev_flags,
4ce43894 1591 (level == 0) ? child->dtree->retry_remove : 0)) {
3e8c6b73
AK
1592 log_error("Unable to deactivate %s (%" PRIu32
1593 ":%" PRIu32 ")", name, info.major,
1594 info.minor);
b7eb2ad0 1595 r = 0;
3e8c6b73 1596 continue;
f4249251
AK
1597 } else if (info.suspended)
1598 dec_suspended();
3e8c6b73 1599
18e0f934
AK
1600 if (dm_tree_node_num_children(child, 0)) {
1601 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1602 return_0;
18e0f934 1603 }
3e8c6b73
AK
1604 }
1605
b7eb2ad0 1606 return r;
3e8c6b73 1607}
db208f51 1608
18e0f934
AK
1609int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1610 const char *uuid_prefix,
1611 size_t uuid_prefix_len)
1612{
1613 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1614}
1615
b4f1578f 1616int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1617 const char *uuid_prefix,
1618 size_t uuid_prefix_len)
db208f51 1619{
68085c93 1620 int r = 1;
db208f51 1621 void *handle = NULL;
b4f1578f 1622 struct dm_tree_node *child = dnode;
db208f51
AK
1623 struct dm_info info, newinfo;
1624 const struct dm_info *dinfo;
1625 const char *name;
1626 const char *uuid;
1627
690a5da2 1628 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1629 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1630 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1631 stack;
1632 continue;
1633 }
1634
b4f1578f 1635 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1636 stack;
1637 continue;
1638 }
1639
b4f1578f 1640 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1641 stack;
1642 continue;
1643 }
1644
1645 /* Ignore if it doesn't belong to this VG */
2b69db1f 1646 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1647 continue;
1648
690a5da2
AK
1649 /* Ensure immediate parents are already suspended */
1650 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1651 continue;
1652
2e5ff5d1 1653 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info, NULL, NULL, NULL) ||
b700541f 1654 !info.exists || info.suspended)
db208f51
AK
1655 continue;
1656
c55b1410 1657 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1658 child->dtree->skip_lockfs,
1659 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1660 log_error("Unable to suspend %s (%" PRIu32
1661 ":%" PRIu32 ")", name, info.major,
1662 info.minor);
68085c93 1663 r = 0;
db208f51
AK
1664 continue;
1665 }
1666
1667 /* Update cached info */
1668 child->info = newinfo;
690a5da2
AK
1669 }
1670
1671 /* Then suspend any child nodes */
1672 handle = NULL;
1673
b4f1578f
AK
1674 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1675 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1676 stack;
1677 continue;
1678 }
1679
1680 /* Ignore if it doesn't belong to this VG */
87f98002 1681 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1682 continue;
db208f51 1683
b4f1578f 1684 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1685 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1686 return_0;
db208f51
AK
1687 }
1688
68085c93 1689 return r;
db208f51
AK
1690}
1691
b4f1578f 1692int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1693 const char *uuid_prefix,
1694 size_t uuid_prefix_len)
1695{
2ca6b865 1696 int r = 1;
db208f51 1697 void *handle = NULL;
b4f1578f 1698 struct dm_tree_node *child = dnode;
165e4a11
AK
1699 struct dm_info newinfo;
1700 const char *name;
db208f51 1701 const char *uuid;
56c28292 1702 int priority;
db208f51 1703
165e4a11 1704 /* Activate children first */
b4f1578f
AK
1705 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1706 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1707 stack;
1708 continue;
db208f51
AK
1709 }
1710
908db078
AK
1711 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1712 continue;
db208f51 1713
b4f1578f 1714 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1715 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1716 return_0;
56c28292 1717 }
165e4a11 1718
56c28292 1719 handle = NULL;
165e4a11 1720
aa6f4e51 1721 for (priority = 0; priority < 3; priority++) {
56c28292 1722 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1723 if (priority != child->activation_priority)
1724 continue;
1725
56c28292
AK
1726 if (!(uuid = dm_tree_node_get_uuid(child))) {
1727 stack;
1728 continue;
165e4a11 1729 }
165e4a11 1730
56c28292
AK
1731 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1732 continue;
165e4a11 1733
56c28292
AK
1734 if (!(name = dm_tree_node_get_name(child))) {
1735 stack;
1736 continue;
1737 }
1738
1739 /* Rename? */
1740 if (child->props.new_name) {
bd90c6b2 1741 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1742 child->info.minor, &child->dtree->cookie,
1743 child->udev_flags)) {
56c28292
AK
1744 log_error("Failed to rename %s (%" PRIu32
1745 ":%" PRIu32 ") to %s", name, child->info.major,
1746 child->info.minor, child->props.new_name);
1747 return 0;
1748 }
1749 child->name = child->props.new_name;
1750 child->props.new_name = NULL;
1751 }
1752
1753 if (!child->info.inactive_table && !child->info.suspended)
1754 continue;
1755
bafa2f39 1756 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1757 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1758 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1759 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1760 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1761 child->info.minor);
2ca6b865 1762 r = 0;
56c28292
AK
1763 continue;
1764 }
1765
1766 /* Update cached info */
1767 child->info = newinfo;
1768 }
db208f51
AK
1769 }
1770
4173a228
ZK
1771 /*
1772 * FIXME: Implement delayed error reporting
1773 * activation should be stopped only in the case,
1774 * the submission of transation_id message fails,
1775 * resume should continue further, just whole command
1776 * has to report failure.
1777 */
1778 if (r && dnode->props.send_messages &&
1779 !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
1780 stack;
1781
165e4a11
AK
1782 handle = NULL;
1783
2ca6b865 1784 return r;
165e4a11
AK
1785}
1786
b4f1578f 1787static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1788{
1789 int r = 0;
1790 struct dm_task *dmt;
1791
1792 log_verbose("Creating %s", dnode->name);
1793
1794 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1795 log_error("Create dm_task creation failed for %s", dnode->name);
1796 return 0;
1797 }
1798
1799 if (!dm_task_set_name(dmt, dnode->name)) {
1800 log_error("Failed to set device name for %s", dnode->name);
1801 goto out;
1802 }
1803
1804 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1805 log_error("Failed to set uuid for %s", dnode->name);
1806 goto out;
1807 }
1808
1809 if (dnode->props.major &&
1810 (!dm_task_set_major(dmt, dnode->props.major) ||
1811 !dm_task_set_minor(dmt, dnode->props.minor))) {
1812 log_error("Failed to set device number for %s creation.", dnode->name);
1813 goto out;
1814 }
1815
1816 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1817 log_error("Failed to set read only flag for %s", dnode->name);
1818 goto out;
1819 }
1820
1821 if (!dm_task_no_open_count(dmt))
1822 log_error("Failed to disable open_count");
1823
1824 if ((r = dm_task_run(dmt)))
1825 r = dm_task_get_info(dmt, &dnode->info);
1826
1827out:
1828 dm_task_destroy(dmt);
1829
1830 return r;
1831}
1832
1833
b4f1578f 1834static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1835{
1836 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1837 log_error("Failed to format %s device number for %s as dm "
1838 "target (%u,%u)",
1839 node->name, node->uuid, node->info.major, node->info.minor);
1840 return 0;
165e4a11
AK
1841 }
1842
1843 return 1;
1844}
1845
ffa9b6a5
ZK
1846/* simplify string emiting code */
1847#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1848do {\
1849 int w;\
1850 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1851 stack; /* Out of space */\
1852 return -1;\
1853 }\
1854 p += w;\
1855} while (0)
ffa9b6a5 1856
3c74075f
JEB
1857/*
1858 * _emit_areas_line
1859 *
1860 * Returns: 1 on success, 0 on failure
1861 */
08f1ddea 1862static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1863 struct load_segment *seg, char *params,
1864 size_t paramsize, int *pos)
165e4a11
AK
1865{
1866 struct seg_area *area;
7d7d93ac 1867 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1868 unsigned first_time = 1;
db3c1ac1 1869 const char *logtype, *synctype;
b262f3e1 1870 unsigned log_parm_count;
165e4a11 1871
2c44337b 1872 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1873 switch (seg->type) {
1874 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1875 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1876 return_0;
1877
b262f3e1
ZK
1878 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1879 if (first_time)
1880 EMIT_PARAMS(*pos, " nolog 0");
1881 else {
1882 /* Remote devices */
1883 log_parm_count = (area->flags &
1884 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1885
1886 if (!area->slog) {
1887 devbuf[0] = 0; /* Only core log parameters */
1888 logtype = "core";
1889 } else {
1890 devbuf[0] = ' '; /* Extra space before device name */
1891 if (!_build_dev_string(devbuf + 1,
1892 sizeof(devbuf) - 1,
1893 area->slog))
1894 return_0;
1895 logtype = "disk";
1896 log_parm_count++; /* Extra sync log device name parameter */
1897 }
1898
1899 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1900 log_parm_count, devbuf, area->region_size);
1901
db3c1ac1
AK
1902 synctype = (area->flags & DM_NOSYNC) ?
1903 " nosync" : (area->flags & DM_FORCESYNC) ?
1904 " sync" : NULL;
b262f3e1 1905
db3c1ac1
AK
1906 if (synctype)
1907 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1908 }
1909 break;
cac52ca4
JEB
1910 case SEG_RAID1:
1911 case SEG_RAID4:
1912 case SEG_RAID5_LA:
1913 case SEG_RAID5_RA:
1914 case SEG_RAID5_LS:
1915 case SEG_RAID5_RS:
1916 case SEG_RAID6_ZR:
1917 case SEG_RAID6_NR:
1918 case SEG_RAID6_NC:
6d04311e
JEB
1919 if (!area->dev_node) {
1920 EMIT_PARAMS(*pos, " -");
1921 break;
1922 }
1923 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1924 return_0;
1925
cac52ca4
JEB
1926 EMIT_PARAMS(*pos, " %s", devbuf);
1927 break;
b262f3e1 1928 default:
6d04311e
JEB
1929 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1930 return_0;
1931
b262f3e1
ZK
1932 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1933 devbuf, area->offset);
1934 }
609faae9
AK
1935
1936 first_time = 0;
165e4a11
AK
1937 }
1938
1939 return 1;
1940}
1941
b262f3e1
ZK
1942static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1943 size_t paramsize, int *pos)
1944{
1945 const struct load_segment *rlog_seg;
1946 struct replicator_site *rsite;
1947 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1948 unsigned parm_count;
1949
1950 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1951 return_0;
1952
1953 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1954 struct load_segment);
1955
1956 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1957 seg->rlog_type, rlogbuf, rlog_seg->size);
1958
1959 dm_list_iterate_items(rsite, &seg->rsites) {
1960 parm_count = (rsite->fall_behind_data
1961 || rsite->fall_behind_ios
1962 || rsite->async_timeout) ? 4 : 2;
1963
1964 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1965 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1966
1967 if (rsite->fall_behind_data)
1968 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1969 else if (rsite->fall_behind_ios)
1970 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1971 else if (rsite->async_timeout)
1972 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1973 }
1974
1975 return 1;
1976}
1977
3c74075f 1978/*
3c74075f
JEB
1979 * Returns: 1 on success, 0 on failure
1980 */
beecb1e1
ZK
1981static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1982 char *params, size_t paramsize)
165e4a11 1983{
8f26e18c
JEB
1984 int block_on_error = 0;
1985 int handle_errors = 0;
1986 int dm_log_userspace = 0;
1987 struct utsname uts;
dbcb64b8 1988 unsigned log_parm_count;
b39fdcf4 1989 int pos = 0, parts;
7d7d93ac 1990 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1991 const char *logtype;
b39fdcf4 1992 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1993
b39fdcf4
MB
1994 if (uname(&uts) == -1) {
1995 log_error("Cannot read kernel release version.");
1996 return 0;
1997 }
1998
1999 /* Kernels with a major number of 2 always had 3 parts. */
2000 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
2001 if (parts < 1 || (kmaj < 3 && parts < 3)) {
2002 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
2003 return 0;
2004 }
67b25ed4 2005
8f26e18c
JEB
2006 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
2007 /*
2008 * Originally, block_on_error was an argument to the log
2009 * portion of the mirror CTR table. It was renamed to
2010 * "handle_errors" and now resides in the 'features'
2011 * section of the mirror CTR table (i.e. at the end).
2012 *
2013 * We can identify whether to use "block_on_error" or
2014 * "handle_errors" by the dm-mirror module's version
2015 * number (>= 1.12) or by the kernel version (>= 2.6.22).
2016 */
ba61f848 2017 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
2018 handle_errors = 1;
2019 else
2020 block_on_error = 1;
2021 }
2022
2023 if (seg->clustered) {
2024 /* Cluster mirrors require a UUID */
2025 if (!seg->uuid)
2026 return_0;
2027
2028 /*
2029 * Cluster mirrors used to have their own log
2030 * types. Now they are accessed through the
2031 * userspace log type.
2032 *
2033 * The dm-log-userspace module was added to the
2034 * 2.6.31 kernel.
2035 */
ba61f848 2036 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
2037 dm_log_userspace = 1;
2038 }
2039
2040 /* Region size */
2041 log_parm_count = 1;
2042
2043 /* [no]sync, block_on_error etc. */
2044 log_parm_count += hweight32(seg->flags);
311d6d81 2045
8f26e18c
JEB
2046 /* "handle_errors" is a feature arg now */
2047 if (handle_errors)
2048 log_parm_count--;
2049
2050 /* DM_CORELOG does not count in the param list */
2051 if (seg->flags & DM_CORELOG)
2052 log_parm_count--;
2053
2054 if (seg->clustered) {
2055 log_parm_count++; /* For UUID */
2056
2057 if (!dm_log_userspace)
ffa9b6a5 2058 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
2059 else
2060 /* For clustered-* type field inserted later */
2061 log_parm_count++;
8f26e18c 2062 }
dbcb64b8 2063
8f26e18c
JEB
2064 if (!seg->log)
2065 logtype = "core";
2066 else {
2067 logtype = "disk";
2068 log_parm_count++;
2069 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
2070 return_0;
2071 }
dbcb64b8 2072
8f26e18c
JEB
2073 if (dm_log_userspace)
2074 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
2075 log_parm_count, seg->uuid, logtype);
2076 else
ffa9b6a5 2077 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 2078
8f26e18c
JEB
2079 if (seg->log)
2080 EMIT_PARAMS(pos, " %s", logbuf);
2081
2082 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 2083
8f26e18c
JEB
2084 if (seg->clustered && !dm_log_userspace)
2085 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 2086
8f26e18c
JEB
2087 if ((seg->flags & DM_NOSYNC))
2088 EMIT_PARAMS(pos, " nosync");
2089 else if ((seg->flags & DM_FORCESYNC))
2090 EMIT_PARAMS(pos, " sync");
dbcb64b8 2091
8f26e18c
JEB
2092 if (block_on_error)
2093 EMIT_PARAMS(pos, " block_on_error");
2094
2095 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
2096
5f3325fc 2097 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 2098 return_0;
dbcb64b8 2099
8f26e18c
JEB
2100 if (handle_errors)
2101 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 2102
3c74075f 2103 return 1;
8f26e18c
JEB
2104}
2105
cac52ca4
JEB
2106static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
2107 uint32_t minor, struct load_segment *seg,
2108 uint64_t *seg_start, char *params,
2109 size_t paramsize)
2110{
ad2432dc 2111 uint32_t i;
cac52ca4
JEB
2112 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
2113 int pos = 0;
2114
2115 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
2116 param_count++;
2117
2118 if (seg->region_size)
2119 param_count += 2;
2120
ad2432dc
MB
2121 /* rebuilds is 64-bit */
2122 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
2123 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 2124
cac52ca4
JEB
2125 if ((seg->type == SEG_RAID1) && seg->stripe_size)
2126 log_error("WARNING: Ignoring RAID1 stripe size");
2127
2128 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
2129 param_count, seg->stripe_size);
2130
2131 if (seg->flags & DM_NOSYNC)
2132 EMIT_PARAMS(pos, " nosync");
2133 else if (seg->flags & DM_FORCESYNC)
2134 EMIT_PARAMS(pos, " sync");
2135
2136 if (seg->region_size)
2137 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2138
f439e65b
JEB
2139 for (i = 0; i < (seg->area_count / 2); i++)
2140 if (seg->rebuilds & (1 << i))
2141 EMIT_PARAMS(pos, " rebuild %u", i);
2142
cac52ca4
JEB
2143 /* Print number of metadata/data device pairs */
2144 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2145
2146 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2147 return_0;
2148
2149 return 1;
2150}
2151
8f26e18c
JEB
2152static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2153 uint32_t minor, struct load_segment *seg,
2154 uint64_t *seg_start, char *params,
2155 size_t paramsize)
2156{
2157 int pos = 0;
2158 int r;
cac52ca4 2159 int target_type_is_raid = 0;
8f26e18c 2160 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2161 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2162
8f26e18c
JEB
2163 switch(seg->type) {
2164 case SEG_ERROR:
2165 case SEG_ZERO:
2166 case SEG_LINEAR:
2167 break;
2168 case SEG_MIRRORED:
2169 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2170 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2171 if (!r)
2172 return_0;
165e4a11 2173 break;
b262f3e1
ZK
2174 case SEG_REPLICATOR:
2175 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2176 &pos)) <= 0) {
2177 stack;
2178 return r;
2179 }
2180 break;
2181 case SEG_REPLICATOR_DEV:
2182 if (!seg->replicator || !_build_dev_string(originbuf,
2183 sizeof(originbuf),
2184 seg->replicator))
2185 return_0;
2186
2187 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2188 break;
165e4a11 2189 case SEG_SNAPSHOT:
aa6f4e51 2190 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2191 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2192 return_0;
2193 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2194 return_0;
ffa9b6a5
ZK
2195 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2196 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2197 break;
2198 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2199 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2200 return_0;
ffa9b6a5 2201 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2202 break;
2203 case SEG_STRIPED:
609faae9 2204 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2205 break;
12ca060e 2206 case SEG_CRYPT:
609faae9 2207 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2208 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2209 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2210 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2211 seg->iv_offset : *seg_start);
2212 break;
cac52ca4
JEB
2213 case SEG_RAID1:
2214 case SEG_RAID4:
2215 case SEG_RAID5_LA:
2216 case SEG_RAID5_RA:
2217 case SEG_RAID5_LS:
2218 case SEG_RAID5_RS:
2219 case SEG_RAID6_ZR:
2220 case SEG_RAID6_NR:
2221 case SEG_RAID6_NC:
2222 target_type_is_raid = 1;
2223 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2224 params, paramsize);
2225 if (!r)
2226 return_0;
2227
2228 break;
4251236e
ZK
2229 case SEG_THIN_POOL:
2230 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2231 return_0;
2232 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2233 return_0;
2234 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2235 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2236 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2237 break;
2238 case SEG_THIN:
2239 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2240 return_0;
2241 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2242 break;
165e4a11
AK
2243 }
2244
2245 switch(seg->type) {
2246 case SEG_ERROR:
b262f3e1 2247 case SEG_REPLICATOR:
165e4a11
AK
2248 case SEG_SNAPSHOT:
2249 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2250 case SEG_SNAPSHOT_MERGE:
165e4a11 2251 case SEG_ZERO:
4251236e
ZK
2252 case SEG_THIN_POOL:
2253 case SEG_THIN:
165e4a11 2254 break;
12ca060e 2255 case SEG_CRYPT:
165e4a11 2256 case SEG_LINEAR:
b262f3e1 2257 case SEG_REPLICATOR_DEV:
165e4a11
AK
2258 case SEG_STRIPED:
2259 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2260 stack;
2261 return r;
2262 }
b6793963
AK
2263 if (!params[0]) {
2264 log_error("No parameters supplied for %s target "
2265 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2266 major, minor);
b6793963
AK
2267 return 0;
2268 }
165e4a11
AK
2269 break;
2270 }
2271
4b2cae46
AK
2272 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2273 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2274 *seg_start, seg->size, target_type_is_raid ? "raid" :
2275 dm_segtypes[seg->type].target, params);
165e4a11 2276
cac52ca4
JEB
2277 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2278 target_type_is_raid ? "raid" :
2279 dm_segtypes[seg->type].target, params))
b4f1578f 2280 return_0;
165e4a11
AK
2281
2282 *seg_start += seg->size;
2283
2284 return 1;
2285}
2286
ffa9b6a5
ZK
2287#undef EMIT_PARAMS
2288
4b2cae46
AK
2289static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2290 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2291{
2292 char *params;
2293 size_t paramsize = 4096;
2294 int ret;
2295
2296 do {
2297 if (!(params = dm_malloc(paramsize))) {
2298 log_error("Insufficient space for target parameters.");
2299 return 0;
2300 }
2301
12ea7cb1 2302 params[0] = '\0';
4b2cae46
AK
2303 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2304 params, paramsize);
165e4a11
AK
2305 dm_free(params);
2306
2307 if (!ret)
2308 stack;
2309
2310 if (ret >= 0)
2311 return ret;
2312
2313 log_debug("Insufficient space in params[%" PRIsize_t
2314 "] for target parameters.", paramsize);
2315
2316 paramsize *= 2;
2317 } while (paramsize < MAX_TARGET_PARAMSIZE);
2318
2319 log_error("Target parameter size too big. Aborting.");
2320 return 0;
2321}
2322
b4f1578f 2323static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2324{
2325 int r = 0;
2326 struct dm_task *dmt;
2327 struct load_segment *seg;
df390f17 2328 uint64_t seg_start = 0, existing_table_size;
165e4a11 2329
4b2cae46
AK
2330 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2331 dnode->info.major, dnode->info.minor);
165e4a11
AK
2332
2333 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2334 log_error("Reload dm_task creation failed for %s", dnode->name);
2335 return 0;
2336 }
2337
2338 if (!dm_task_set_major(dmt, dnode->info.major) ||
2339 !dm_task_set_minor(dmt, dnode->info.minor)) {
2340 log_error("Failed to set device number for %s reload.", dnode->name);
2341 goto out;
2342 }
2343
2344 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2345 log_error("Failed to set read only flag for %s", dnode->name);
2346 goto out;
2347 }
2348
2349 if (!dm_task_no_open_count(dmt))
2350 log_error("Failed to disable open_count");
2351
2c44337b 2352 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2353 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2354 seg, &seg_start))
b4f1578f 2355 goto_out;
165e4a11 2356
ec289b64
AK
2357 if (!dm_task_suppress_identical_reload(dmt))
2358 log_error("Failed to suppress reload of identical tables.");
2359
2360 if ((r = dm_task_run(dmt))) {
165e4a11 2361 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2362 if (r && !dnode->info.inactive_table)
2363 log_verbose("Suppressed %s identical table reload.",
2364 dnode->name);
bb875bb9 2365
df390f17 2366 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2367 if ((dnode->props.size_changed =
df390f17 2368 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2369 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2370 PRIu64 " for %s", existing_table_size,
bb875bb9 2371 seg_start, dnode->name);
df390f17
AK
2372 /*
2373 * Kernel usually skips size validation on zero-length devices
2374 * now so no need to preload them.
2375 */
2376 /* FIXME In which kernel version did this begin? */
2377 if (!existing_table_size && dnode->props.delay_resume_if_new)
2378 dnode->props.size_changed = 0;
2379 }
ec289b64 2380 }
165e4a11
AK
2381
2382 dnode->props.segment_count = 0;
2383
2384out:
2385 dm_task_destroy(dmt);
2386
2387 return r;
165e4a11
AK
2388}
2389
b4f1578f 2390int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2391 const char *uuid_prefix,
2392 size_t uuid_prefix_len)
165e4a11 2393{
2ca6b865 2394 int r = 1;
165e4a11 2395 void *handle = NULL;
b4f1578f 2396 struct dm_tree_node *child;
165e4a11 2397 struct dm_info newinfo;
566515c0 2398 int update_devs_flag = 0;
165e4a11
AK
2399
2400 /* Preload children first */
b4f1578f 2401 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2402 /* Skip existing non-device-mapper devices */
2403 if (!child->info.exists && child->info.major)
2404 continue;
2405
2406 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2407 if (child->info.exists &&
2408 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2409 continue;
2410
b4f1578f 2411 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2412 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2413 return_0;
165e4a11 2414
165e4a11 2415 /* FIXME Cope if name exists with no uuid? */
3d6782b3
ZK
2416 if (!child->info.exists && !_create_node(child))
2417 return_0;
165e4a11 2418
3d6782b3
ZK
2419 if (!child->info.inactive_table &&
2420 child->props.segment_count &&
2421 !_load_node(child))
2422 return_0;
165e4a11 2423
eb91c4ee
MB
2424 /* Propagate device size change change */
2425 if (child->props.size_changed)
2426 dnode->props.size_changed = 1;
2427
bb875bb9 2428 /* Resume device immediately if it has parents and its size changed */
3776c494 2429 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2430 continue;
2431
7707ea90
AK
2432 if (!child->info.inactive_table && !child->info.suspended)
2433 continue;
2434
fc795d87 2435 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2436 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2437 &newinfo, &child->dtree->cookie, child->udev_flags,
2438 child->info.suspended)) {
165e4a11 2439 log_error("Unable to resume %s (%" PRIu32
fc795d87 2440 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2441 child->info.minor);
2ca6b865 2442 r = 0;
165e4a11
AK
2443 continue;
2444 }
2445
2446 /* Update cached info */
2447 child->info = newinfo;
566515c0
PR
2448 /*
2449 * Prepare for immediate synchronization with udev and flush all stacked
2450 * dev node operations if requested by immediate_dev_node property. But
2451 * finish processing current level in the tree first.
2452 */
2453 if (child->props.immediate_dev_node)
2454 update_devs_flag = 1;
165e4a11
AK
2455 }
2456
566515c0
PR
2457 if (update_devs_flag) {
2458 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2459 stack;
2460 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2461 }
2462
2ca6b865 2463 return r;
165e4a11
AK
2464}
2465
165e4a11
AK
2466/*
2467 * Returns 1 if unsure.
2468 */
b4f1578f 2469int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2470 const char *uuid_prefix,
2471 size_t uuid_prefix_len)
2472{
2473 void *handle = NULL;
b4f1578f 2474 struct dm_tree_node *child = dnode;
165e4a11
AK
2475 const char *uuid;
2476
b4f1578f
AK
2477 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2478 if (!(uuid = dm_tree_node_get_uuid(child))) {
2479 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2480 return 1;
2481 }
2482
87f98002 2483 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2484 return 1;
2485
b4f1578f
AK
2486 if (dm_tree_node_num_children(child, 0))
2487 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2488 }
2489
2490 return 0;
2491}
2492
2493/*
2494 * Target functions
2495 */
b4f1578f 2496static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2497{
2498 struct load_segment *seg;
2499
b4f1578f
AK
2500 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2501 log_error("dtree node segment allocation failed");
165e4a11
AK
2502 return NULL;
2503 }
2504
2505 seg->type = type;
2506 seg->size = size;
2507 seg->area_count = 0;
2c44337b 2508 dm_list_init(&seg->areas);
165e4a11
AK
2509 seg->stripe_size = 0;
2510 seg->persistent = 0;
2511 seg->chunk_size = 0;
2512 seg->cow = NULL;
2513 seg->origin = NULL;
aa6f4e51 2514 seg->merge = NULL;
165e4a11 2515
2c44337b 2516 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2517 dnode->props.segment_count++;
2518
2519 return seg;
2520}
2521
b4f1578f 2522int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2523 uint64_t size,
2524 const char *origin_uuid)
165e4a11
AK
2525{
2526 struct load_segment *seg;
b4f1578f 2527 struct dm_tree_node *origin_node;
165e4a11 2528
b4f1578f
AK
2529 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2530 return_0;
165e4a11 2531
b4f1578f 2532 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2533 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2534 return 0;
2535 }
2536
2537 seg->origin = origin_node;
b4f1578f
AK
2538 if (!_link_tree_nodes(dnode, origin_node))
2539 return_0;
165e4a11 2540
56c28292
AK
2541 /* Resume snapshot origins after new snapshots */
2542 dnode->activation_priority = 1;
2543
165e4a11
AK
2544 return 1;
2545}
2546
aa6f4e51
MS
2547static int _add_snapshot_target(struct dm_tree_node *node,
2548 uint64_t size,
2549 const char *origin_uuid,
2550 const char *cow_uuid,
2551 const char *merge_uuid,
2552 int persistent,
2553 uint32_t chunk_size)
165e4a11
AK
2554{
2555 struct load_segment *seg;
aa6f4e51
MS
2556 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2557 unsigned seg_type;
2558
2559 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2560
aa6f4e51 2561 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2562 return_0;
165e4a11 2563
b4f1578f 2564 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2565 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2566 return 0;
2567 }
2568
2569 seg->origin = origin_node;
b4f1578f
AK
2570 if (!_link_tree_nodes(node, origin_node))
2571 return_0;
165e4a11 2572
b4f1578f 2573 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2574 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2575 return 0;
2576 }
2577
2578 seg->cow = cow_node;
b4f1578f
AK
2579 if (!_link_tree_nodes(node, cow_node))
2580 return_0;
165e4a11
AK
2581
2582 seg->persistent = persistent ? 1 : 0;
2583 seg->chunk_size = chunk_size;
2584
aa6f4e51
MS
2585 if (merge_uuid) {
2586 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2587 /* not a pure error, merging snapshot may have been deactivated */
2588 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2589 } else {
2590 seg->merge = merge_node;
2591 /* must not link merging snapshot, would undermine activation_priority below */
2592 }
2593
2594 /* Resume snapshot-merge (acting origin) after other snapshots */
2595 node->activation_priority = 1;
2596 if (seg->merge) {
2597 /* Resume merging snapshot after snapshot-merge */
2598 seg->merge->activation_priority = 2;
2599 }
2600 }
2601
165e4a11
AK
2602 return 1;
2603}
2604
aa6f4e51
MS
2605
2606int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2607 uint64_t size,
2608 const char *origin_uuid,
2609 const char *cow_uuid,
2610 int persistent,
2611 uint32_t chunk_size)
2612{
2613 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2614 NULL, persistent, chunk_size);
2615}
2616
2617int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2618 uint64_t size,
2619 const char *origin_uuid,
2620 const char *cow_uuid,
2621 const char *merge_uuid,
2622 uint32_t chunk_size)
2623{
2624 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2625 merge_uuid, 1, chunk_size);
2626}
2627
b4f1578f 2628int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2629 uint64_t size)
165e4a11 2630{
b4f1578f
AK
2631 if (!_add_segment(node, SEG_ERROR, size))
2632 return_0;
165e4a11
AK
2633
2634 return 1;
2635}
2636
b4f1578f 2637int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2638 uint64_t size)
165e4a11 2639{
b4f1578f
AK
2640 if (!_add_segment(node, SEG_ZERO, size))
2641 return_0;
165e4a11
AK
2642
2643 return 1;
2644}
2645
b4f1578f 2646int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2647 uint64_t size)
165e4a11 2648{
b4f1578f
AK
2649 if (!_add_segment(node, SEG_LINEAR, size))
2650 return_0;
165e4a11
AK
2651
2652 return 1;
2653}
2654
b4f1578f 2655int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2656 uint64_t size,
2657 uint32_t stripe_size)
165e4a11
AK
2658{
2659 struct load_segment *seg;
2660
b4f1578f
AK
2661 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2662 return_0;
165e4a11
AK
2663
2664 seg->stripe_size = stripe_size;
2665
2666 return 1;
2667}
2668
12ca060e
MB
2669int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2670 uint64_t size,
2671 const char *cipher,
2672 const char *chainmode,
2673 const char *iv,
2674 uint64_t iv_offset,
2675 const char *key)
2676{
2677 struct load_segment *seg;
2678
2679 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2680 return_0;
2681
2682 seg->cipher = cipher;
2683 seg->chainmode = chainmode;
2684 seg->iv = iv;
2685 seg->iv_offset = iv_offset;
2686 seg->key = key;
2687
2688 return 1;
2689}
2690
b4f1578f 2691int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2692 uint32_t region_size,
08e64ce5 2693 unsigned clustered,
165e4a11 2694 const char *log_uuid,
ce7ed2c0
AK
2695 unsigned area_count,
2696 uint32_t flags)
165e4a11 2697{
908db078 2698 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2699 struct load_segment *seg;
2700
2701 if (!node->props.segment_count) {
b8175c33 2702 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2703 return 0;
2704 }
2705
2c44337b 2706 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2707
24b026e3 2708 if (log_uuid) {
67b25ed4
AK
2709 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2710 log_error("log uuid pool_strdup failed");
2711 return 0;
2712 }
df390f17
AK
2713 if ((flags & DM_CORELOG))
2714 /* For pvmove: immediate resume (for size validation) isn't needed. */
2715 node->props.delay_resume_if_new = 1;
2716 else {
9723090c
AK
2717 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2718 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2719 return 0;
2720 }
2721
566515c0
PR
2722 if (clustered)
2723 log_node->props.immediate_dev_node = 1;
2724
0a99713e
AK
2725 /* The kernel validates the size of disk logs. */
2726 /* FIXME Propagate to any devices below */
2727 log_node->props.delay_resume_if_new = 0;
2728
9723090c
AK
2729 if (!_link_tree_nodes(node, log_node))
2730 return_0;
2731 }
165e4a11
AK
2732 }
2733
2734 seg->log = log_node;
165e4a11
AK
2735 seg->region_size = region_size;
2736 seg->clustered = clustered;
2737 seg->mirror_area_count = area_count;
dbcb64b8 2738 seg->flags = flags;
165e4a11
AK
2739
2740 return 1;
2741}
2742
b4f1578f 2743int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2744 uint64_t size)
165e4a11 2745{
cbecd3cd 2746 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2747 return_0;
165e4a11
AK
2748
2749 return 1;
2750}
2751
cac52ca4
JEB
2752int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2753 uint64_t size,
2754 const char *raid_type,
2755 uint32_t region_size,
2756 uint32_t stripe_size,
f439e65b 2757 uint64_t rebuilds,
cac52ca4
JEB
2758 uint64_t reserved2)
2759{
2760 int i;
2761 struct load_segment *seg = NULL;
2762
2763 for (i = 0; dm_segtypes[i].target && !seg; i++)
2764 if (!strcmp(raid_type, dm_segtypes[i].target))
2765 if (!(seg = _add_segment(node,
2766 dm_segtypes[i].type, size)))
2767 return_0;
2768
b2fa9b43
JEB
2769 if (!seg)
2770 return_0;
2771
cac52ca4
JEB
2772 seg->region_size = region_size;
2773 seg->stripe_size = stripe_size;
2774 seg->area_count = 0;
f439e65b 2775 seg->rebuilds = rebuilds;
cac52ca4
JEB
2776
2777 return 1;
2778}
2779
b262f3e1
ZK
2780int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2781 uint64_t size,
2782 const char *rlog_uuid,
2783 const char *rlog_type,
2784 unsigned rsite_index,
2785 dm_replicator_mode_t mode,
2786 uint32_t async_timeout,
2787 uint64_t fall_behind_data,
2788 uint32_t fall_behind_ios)
2789{
2790 struct load_segment *rseg;
2791 struct replicator_site *rsite;
2792
2793 /* Local site0 - adds replicator segment and links rlog device */
2794 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2795 if (node->props.segment_count) {
2796 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2797 return 0;
2798 }
2799
2800 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2801 return_0;
2802
2803 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2804 log_error("Missing replicator log uuid %s.", rlog_uuid);
2805 return 0;
2806 }
2807
2808 if (!_link_tree_nodes(node, rseg->log))
2809 return_0;
2810
2811 if (strcmp(rlog_type, "ringbuffer") != 0) {
2812 log_error("Unsupported replicator log type %s.", rlog_type);
2813 return 0;
2814 }
2815
2816 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2817 return_0;
2818
2819 dm_list_init(&rseg->rsites);
2820 rseg->rdevice_count = 0;
2821 node->activation_priority = 1;
2822 }
2823
2824 /* Add site to segment */
2825 if (mode == DM_REPLICATOR_SYNC
2826 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2827 log_error("Async parameters passed for synchronnous replicator.");
2828 return 0;
2829 }
2830
2831 if (node->props.segment_count != 1) {
2832 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2833 return 0;
2834 }
2835
2836 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2837 if (rseg->type != SEG_REPLICATOR) {
2838 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2839 dm_segtypes[rseg->type].target);
2840 return 0;
2841 }
2842
2843 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2844 log_error("Failed to allocate remote site segment.");
2845 return 0;
2846 }
2847
2848 dm_list_add(&rseg->rsites, &rsite->list);
2849 rseg->rsite_count++;
2850
2851 rsite->mode = mode;
2852 rsite->async_timeout = async_timeout;
2853 rsite->fall_behind_data = fall_behind_data;
2854 rsite->fall_behind_ios = fall_behind_ios;
2855 rsite->rsite_index = rsite_index;
2856
2857 return 1;
2858}
2859
2860/* Appends device node to Replicator */
2861int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2862 uint64_t size,
2863 const char *replicator_uuid,
2864 uint64_t rdevice_index,
2865 const char *rdev_uuid,
2866 unsigned rsite_index,
2867 const char *slog_uuid,
2868 uint32_t slog_flags,
2869 uint32_t slog_region_size)
2870{
2871 struct seg_area *area;
2872 struct load_segment *rseg;
2873 struct load_segment *rep_seg;
2874
2875 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2876 /* Site index for local target */
2877 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2878 return_0;
2879
2880 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2881 log_error("Missing replicator uuid %s.", replicator_uuid);
2882 return 0;
2883 }
2884
2885 /* Local slink0 for replicator must be always initialized first */
2886 if (rseg->replicator->props.segment_count != 1) {
2887 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2888 return 0;
2889 }
2890
2891 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2892 if (rep_seg->type != SEG_REPLICATOR) {
2893 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2894 dm_segtypes[rep_seg->type].target);
2895 return 0;
2896 }
2897 rep_seg->rdevice_count++;
2898
2899 if (!_link_tree_nodes(node, rseg->replicator))
2900 return_0;
2901
2902 rseg->rdevice_index = rdevice_index;
2903 } else {
2904 /* Local slink0 for replicator must be always initialized first */
2905 if (node->props.segment_count != 1) {
2906 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2907 return 0;
2908 }
2909
2910 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2911 if (rseg->type != SEG_REPLICATOR_DEV) {
2912 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2913 dm_segtypes[rseg->type].target);
2914 return 0;
2915 }
2916 }
2917
2918 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2919 log_error("Unspecified sync log uuid.");
2920 return 0;
2921 }
2922
2923 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2924 return_0;
2925
2926 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2927
2928 if (!(slog_flags & DM_CORELOG)) {
2929 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2930 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2931 return 0;
2932 }
2933
2934 if (!_link_tree_nodes(node, area->slog))
2935 return_0;
2936 }
2937
2938 area->flags = slog_flags;
2939 area->region_size = slog_region_size;
2940 area->rsite_index = rsite_index;
2941
2942 return 1;
2943}
2944
5668fe04
ZK
2945static int _thin_validate_device_id(uint32_t device_id)
2946{
2947 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2948 log_error("Device id %u is higher then %u.",
2949 device_id, DM_THIN_MAX_DEVICE_ID);
2950 return 0;
2951 }
2952
2953 return 1;
2954}
2955
4251236e
ZK
2956int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2957 uint64_t size,
e0ea24be 2958 uint64_t transaction_id,
4251236e 2959 const char *metadata_uuid,
5668fd6a 2960 const char *pool_uuid,
4251236e 2961 uint32_t data_block_size,
e9156c2b 2962 uint64_t low_water_mark,
460c5991 2963 unsigned skip_block_zeroing)
4251236e
ZK
2964{
2965 struct load_segment *seg;
2966
3f53c059 2967 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2968 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2969 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2970 return 0;
2971 }
2972
3f53c059 2973 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2974 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2975 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2976 return 0;
2977 }
2978
2979 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2980 return_0;
2981
2982 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2983 log_error("Missing metadata uuid %s.", metadata_uuid);
2984 return 0;
2985 }
2986
2987 if (!_link_tree_nodes(node, seg->metadata))
2988 return_0;
2989
2990 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2991 log_error("Missing pool uuid %s.", pool_uuid);
2992 return 0;
2993 }
2994
2995 if (!_link_tree_nodes(node, seg->pool))
2996 return_0;
2997
bbcd37e4
ZK
2998 node->props.send_messages = 1;
2999 seg->transaction_id = transaction_id;
e9156c2b 3000 seg->low_water_mark = low_water_mark;
e0ea24be 3001 seg->data_block_size = data_block_size;
460c5991 3002 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
3003 dm_list_init(&seg->thin_messages);
3004
3005 return 1;
3006}
3007
3008int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
2e732e96
ZK
3009 dm_thin_message_t type,
3010 uint64_t id1, uint64_t id2)
25e6ab87
ZK
3011{
3012 struct load_segment *seg;
3013 struct thin_message *tm;
3014
3015 if (node->props.segment_count != 1) {
759b9592 3016 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
3017 return 0;
3018 }
3019
3020 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 3021 if (seg->type != SEG_THIN_POOL) {
759b9592 3022 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
3023 dm_segtypes[seg->type].target);
3024 return 0;
3025 }
3026
3027 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
3028 log_error("Failed to allocate thin message.");
3029 return 0;
3030 }
3031
2e732e96 3032 switch (type) {
25e6ab87 3033 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 3034 /* If the thin origin is active, it must be suspend first! */
2e732e96 3035 if (id1 == id2) {
759b9592 3036 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
3037 return 0;
3038 }
2e732e96
ZK
3039 if (!_thin_validate_device_id(id1) ||
3040 !_thin_validate_device_id(id2))
25e6ab87 3041 return_0;
2e732e96
ZK
3042 tm->message.u.m_create_snap.device_id = id1;
3043 tm->message.u.m_create_snap.origin_id = id2;
25e6ab87
ZK
3044 break;
3045 case DM_THIN_MESSAGE_CREATE_THIN:
2e732e96 3046 if (!_thin_validate_device_id(id1))
25e6ab87 3047 return_0;
2e732e96 3048 tm->message.u.m_create_thin.device_id = id1;
660a42bc 3049 tm->expected_errno = EEXIST;
25e6ab87
ZK
3050 break;
3051 case DM_THIN_MESSAGE_DELETE:
2e732e96 3052 if (!_thin_validate_device_id(id1))
25e6ab87 3053 return_0;
2e732e96 3054 tm->message.u.m_delete.device_id = id1;
660a42bc 3055 tm->expected_errno = ENODATA;
25e6ab87
ZK
3056 break;
3057 case DM_THIN_MESSAGE_TRIM:
2e732e96 3058 if (!_thin_validate_device_id(id1))
25e6ab87 3059 return_0;
2e732e96
ZK
3060 tm->message.u.m_trim.device_id = id1;
3061 tm->message.u.m_trim.new_size = id2;
25e6ab87
ZK
3062 break;
3063 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
19e3f8c3 3064 if ((id1 + 1) != id2) {
2e732e96
ZK
3065 log_error("New transaction id must be sequential.");
3066 return 0; /* FIXME: Maybe too strict here? */
3067 }
19e3f8c3 3068 if (id2 != seg->transaction_id) {
2e732e96 3069 log_error("Current transaction id is different from thin pool.");
25e6ab87
ZK
3070 return 0; /* FIXME: Maybe too strict here? */
3071 }
2e732e96
ZK
3072 tm->message.u.m_set_transaction_id.current_id = id1;
3073 tm->message.u.m_set_transaction_id.new_id = id2;
25e6ab87
ZK
3074 break;
3075 default:
2e732e96 3076 log_error("Unsupported message type %d.", (int) type);
25e6ab87
ZK
3077 return 0;
3078 }
3079
2e732e96 3080 tm->message.type = type;
25e6ab87 3081 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
3082
3083 return 1;
3084}
3085
3086int dm_tree_node_add_thin_target(struct dm_tree_node *node,
3087 uint64_t size,
4d25c81b 3088 const char *pool_uuid,
4251236e
ZK
3089 uint32_t device_id)
3090{
4d25c81b 3091 struct dm_tree_node *pool;
4251236e
ZK
3092 struct load_segment *seg;
3093
4d25c81b
ZK
3094 if (!(pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
3095 log_error("Missing thin pool uuid %s.", pool_uuid);
4251236e
ZK
3096 return 0;
3097 }
3098
4d25c81b 3099 if (!_link_tree_nodes(node, pool))
4251236e
ZK
3100 return_0;
3101
6744c143
ZK
3102 if (!_thin_validate_device_id(device_id))
3103 return_0;
4d25c81b 3104
6744c143
ZK
3105 if (!(seg = _add_segment(node, SEG_THIN, size)))
3106 return_0;
4d25c81b 3107
6744c143
ZK
3108 seg->pool = pool;
3109 seg->device_id = device_id;
1419bf1c 3110
4251236e
ZK
3111 return 1;
3112}
3113
077c4d1a
ZK
3114
3115int dm_get_status_thin_pool(struct dm_pool *mem, const char *params,
3116 struct dm_status_thin_pool **status)
3117{
3118 struct dm_status_thin_pool *s;
3119
3120 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin_pool)))) {
3121 log_error("Failed to allocate thin_pool status structure.");
3122 return 0;
3123 }
3124
5fd459f0 3125 /* FIXME: add support for held metadata root */
077c4d1a
ZK
3126 if (sscanf(params, "%" PRIu64 " %" PRIu64 "/%" PRIu64 " %" PRIu64 "/%" PRIu64,
3127 &s->transaction_id,
5fd459f0
ZK
3128 &s->used_metadata_blocks,
3129 &s->total_metadata_blocks,
077c4d1a
ZK
3130 &s->used_data_blocks,
3131 &s->total_data_blocks) != 5) {
3132 log_error("Failed to parse thin pool params: %s.", params);
3133 return 0;
3134 }
3135
3136 *status = s;
3137
3138 return 1;
3139}
3140
3141int dm_get_status_thin(struct dm_pool *mem, const char *params,
3142 struct dm_status_thin **status)
3143{
3144 struct dm_status_thin *s;
3145
3146 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin)))) {
3147 log_error("Failed to allocate thin status structure.");
3148 return 0;
3149 }
3150
9568f1b5
ZK
3151 if (strchr(params, '-')) {
3152 s->mapped_sectors = 0;
3153 s->highest_mapped_sector = 0;
3154 } else if (sscanf(params, "%" PRIu64 " %" PRIu64,
077c4d1a
ZK
3155 &s->mapped_sectors,
3156 &s->highest_mapped_sector) != 2) {
3157 log_error("Failed to parse thin params: %s.", params);
3158 return 0;
3159 }
3160
3161 *status = s;
3162
3163 return 1;
3164}
3165
b4f1578f 3166static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
3167{
3168 struct seg_area *area;
3169
b4f1578f 3170 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
3171 log_error("Failed to allocate target segment area.");
3172 return 0;
3173 }
3174
3175 area->dev_node = dev_node;
3176 area->offset = offset;
3177
2c44337b 3178 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
3179 seg->area_count++;
3180
3181 return 1;
3182}
3183
b4f1578f 3184int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
3185 const char *dev_name,
3186 const char *uuid,
3187 uint64_t offset)
165e4a11
AK
3188{
3189 struct load_segment *seg;
3190 struct stat info;
b4f1578f 3191 struct dm_tree_node *dev_node;
165e4a11
AK
3192
3193 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3194 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3195 return 0;
3196 }
3197
3198 if (uuid) {
b4f1578f 3199 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3200 log_error("Couldn't find area uuid %s.", uuid);
3201 return 0;
3202 }
b4f1578f
AK
3203 if (!_link_tree_nodes(node, dev_node))
3204 return_0;
165e4a11 3205 } else {
6d04311e 3206 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3207 log_error("Device %s not found.", dev_name);
3208 return 0;
3209 }
3210
40e5fd8b 3211 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3212 log_error("Device %s is not a block device.", dev_name);
3213 return 0;
3214 }
3215
3216 /* FIXME Check correct macro use */
cda69e17
PR
3217 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3218 MINOR(info.st_rdev), 0)))
b4f1578f 3219 return_0;
165e4a11
AK
3220 }
3221
3222 if (!node->props.segment_count) {
b8175c33 3223 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3224 return 0;
3225 }
3226
2c44337b 3227 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3228
b4f1578f
AK
3229 if (!_add_area(node, seg, dev_node, offset))
3230 return_0;
165e4a11
AK
3231
3232 return 1;
db208f51 3233}
bd90c6b2 3234
6d04311e
JEB
3235int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3236{
3237 struct load_segment *seg;
3238
3239 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3240
415c0690
AK
3241 switch (seg->type) {
3242 case SEG_RAID1:
3243 case SEG_RAID4:
3244 case SEG_RAID5_LA:
3245 case SEG_RAID5_RA:
3246 case SEG_RAID5_LS:
3247 case SEG_RAID5_RS:
3248 case SEG_RAID6_ZR:
3249 case SEG_RAID6_NR:
3250 case SEG_RAID6_NC:
3251 break;
3252 default:
3253 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3254 return 0;
3255 }
3256
6d04311e
JEB
3257 if (!_add_area(node, seg, NULL, offset))
3258 return_0;
3259
3260 return 1;
3261}
This page took 0.552142 seconds and 5 git commands to generate.