]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Do not crash for NULL sort_key
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
b262f3e1
ZK
27#define REPLICATOR_LOCAL_SITE 0
28
165e4a11
AK
29/* Supported segment types */
30enum {
12ca060e
MB
31 SEG_CRYPT,
32 SEG_ERROR,
165e4a11
AK
33 SEG_LINEAR,
34 SEG_MIRRORED,
b262f3e1
ZK
35 SEG_REPLICATOR,
36 SEG_REPLICATOR_DEV,
165e4a11
AK
37 SEG_SNAPSHOT,
38 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 39 SEG_SNAPSHOT_MERGE,
165e4a11
AK
40 SEG_STRIPED,
41 SEG_ZERO,
4251236e
ZK
42 SEG_THIN_POOL,
43 SEG_THIN,
cac52ca4
JEB
44 SEG_RAID1,
45 SEG_RAID4,
46 SEG_RAID5_LA,
47 SEG_RAID5_RA,
48 SEG_RAID5_LS,
49 SEG_RAID5_RS,
50 SEG_RAID6_ZR,
51 SEG_RAID6_NR,
52 SEG_RAID6_NC,
53 SEG_LAST,
165e4a11 54};
b4f1578f 55
165e4a11
AK
56/* FIXME Add crypt and multipath support */
57
58struct {
59 unsigned type;
60 const char *target;
61} dm_segtypes[] = {
12ca060e 62 { SEG_CRYPT, "crypt" },
165e4a11
AK
63 { SEG_ERROR, "error" },
64 { SEG_LINEAR, "linear" },
65 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
66 { SEG_REPLICATOR, "replicator" },
67 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
68 { SEG_SNAPSHOT, "snapshot" },
69 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 70 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
71 { SEG_STRIPED, "striped" },
72 { SEG_ZERO, "zero"},
4251236e
ZK
73 { SEG_THIN_POOL, "thin-pool"},
74 { SEG_THIN, "thin"},
cac52ca4
JEB
75 { SEG_RAID1, "raid1"},
76 { SEG_RAID4, "raid4"},
77 { SEG_RAID5_LA, "raid5_la"},
78 { SEG_RAID5_RA, "raid5_ra"},
79 { SEG_RAID5_LS, "raid5_ls"},
80 { SEG_RAID5_RS, "raid5_rs"},
81 { SEG_RAID6_ZR, "raid6_zr"},
82 { SEG_RAID6_NR, "raid6_nr"},
83 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
84
85 /*
86 *WARNING: Since 'raid' target overloads this 1:1 mapping table
87 * for search do not add new enum elements past them!
88 */
cac52ca4
JEB
89 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
90 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
91 { SEG_LAST, NULL },
165e4a11
AK
92};
93
94/* Some segment types have a list of areas of other devices attached */
95struct seg_area {
2c44337b 96 struct dm_list list;
165e4a11 97
b4f1578f 98 struct dm_tree_node *dev_node;
165e4a11
AK
99
100 uint64_t offset;
b262f3e1
ZK
101
102 unsigned rsite_index; /* Replicator site index */
103 struct dm_tree_node *slog; /* Replicator sync log node */
104 uint64_t region_size; /* Replicator sync log size */
105 uint32_t flags; /* Replicator sync log flags */
106};
107
2e732e96
ZK
108struct dm_thin_message {
109 dm_thin_message_t type;
110 union {
111 struct {
112 uint32_t device_id;
113 uint32_t origin_id;
114 } m_create_snap;
115 struct {
116 uint32_t device_id;
117 } m_create_thin;
118 struct {
119 uint32_t device_id;
120 } m_delete;
121 struct {
122 uint64_t current_id;
123 uint64_t new_id;
124 } m_set_transaction_id;
125 struct {
126 uint32_t device_id;
127 uint64_t new_size;
128 } m_trim;
129 } u;
130};
131
25e6ab87
ZK
132struct thin_message {
133 struct dm_list list;
134 struct dm_thin_message message;
660a42bc 135 int expected_errno;
25e6ab87
ZK
136};
137
b262f3e1
ZK
138/* Replicator-log has a list of sites */
139/* FIXME: maybe move to seg_area too? */
140struct replicator_site {
141 struct dm_list list;
142
143 unsigned rsite_index;
144 dm_replicator_mode_t mode;
145 uint32_t async_timeout;
146 uint32_t fall_behind_ios;
147 uint64_t fall_behind_data;
165e4a11
AK
148};
149
150/* Per-segment properties */
151struct load_segment {
2c44337b 152 struct dm_list list;
165e4a11
AK
153
154 unsigned type;
155
156 uint64_t size;
157
b262f3e1
ZK
158 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
159 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 160
cac52ca4 161 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
162
163 int persistent; /* Snapshot */
164 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
165 struct dm_tree_node *cow; /* Snapshot */
166 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 167 struct dm_tree_node *merge; /* Snapshot */
165e4a11 168
b262f3e1 169 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 170 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
171 unsigned clustered; /* Mirror */
172 unsigned mirror_area_count; /* Mirror */
dbcb64b8 173 uint32_t flags; /* Mirror log */
67b25ed4 174 char *uuid; /* Clustered mirror log */
12ca060e
MB
175
176 const char *cipher; /* Crypt */
177 const char *chainmode; /* Crypt */
178 const char *iv; /* Crypt */
179 uint64_t iv_offset; /* Crypt */
180 const char *key; /* Crypt */
b262f3e1
ZK
181
182 const char *rlog_type; /* Replicator */
183 struct dm_list rsites; /* Replicator */
184 unsigned rsite_count; /* Replicator */
185 unsigned rdevice_count; /* Replicator */
186 struct dm_tree_node *replicator;/* Replicator-dev */
187 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 188
40e5fd8b 189 uint64_t rebuilds; /* raid */
4251236e
ZK
190
191 struct dm_tree_node *metadata; /* Thin_pool */
192 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 193 struct dm_list thin_messages; /* Thin_pool */
bbcd37e4 194 uint64_t transaction_id; /* Thin_pool */
e9156c2b 195 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 196 uint32_t data_block_size; /* Thin_pool */
460c5991 197 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
198 uint32_t device_id; /* Thin */
199
165e4a11
AK
200};
201
202/* Per-device properties */
203struct load_properties {
204 int read_only;
205 uint32_t major;
206 uint32_t minor;
207
52b84409
AK
208 uint32_t read_ahead;
209 uint32_t read_ahead_flags;
210
165e4a11 211 unsigned segment_count;
bb875bb9 212 unsigned size_changed;
2c44337b 213 struct dm_list segs;
165e4a11
AK
214
215 const char *new_name;
566515c0
PR
216
217 /* If immediate_dev_node is set to 1, try to create the dev node
218 * as soon as possible (e.g. in preload stage even during traversal
219 * and processing of dm tree). This will also flush all stacked dev
220 * node operations, synchronizing with udev.
221 */
df390f17
AK
222 unsigned immediate_dev_node;
223
224 /*
225 * If the device size changed from zero and this is set,
226 * don't resume the device immediately, even if the device
227 * has parents. This works provided the parents do not
228 * validate the device size and is required by pvmove to
229 * avoid starting the mirror resync operation too early.
230 */
231 unsigned delay_resume_if_new;
bbcd37e4
ZK
232
233 /* Send messages for this node in preload */
234 unsigned send_messages;
165e4a11
AK
235};
236
237/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 238struct dm_tree_link {
2c44337b 239 struct dm_list list;
b4f1578f 240 struct dm_tree_node *node;
165e4a11
AK
241};
242
b4f1578f
AK
243struct dm_tree_node {
244 struct dm_tree *dtree;
3d0480ed 245
40e5fd8b
AK
246 const char *name;
247 const char *uuid;
248 struct dm_info info;
3d0480ed 249
40e5fd8b
AK
250 struct dm_list uses; /* Nodes this node uses */
251 struct dm_list used_by; /* Nodes that use this node */
165e4a11 252
56c28292
AK
253 int activation_priority; /* 0 gets activated first */
254
f16aea9e
PR
255 uint16_t udev_flags; /* Udev control flags */
256
165e4a11
AK
257 void *context; /* External supplied context */
258
259 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
260
261 /*
262 * If presuspend of child node is needed
263 * Note: only direct child is allowed
264 */
265 struct dm_tree_node *presuspend_node;
3d0480ed
AK
266};
267
b4f1578f 268struct dm_tree {
a3f6b2ce
AK
269 struct dm_pool *mem;
270 struct dm_hash_table *devs;
165e4a11 271 struct dm_hash_table *uuids;
b4f1578f 272 struct dm_tree_node root;
c55b1410 273 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
274 int no_flush; /* 1 sets noflush (mirrors/multipath) */
275 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 276 uint32_t cookie;
3d0480ed
AK
277};
278
5c9eae96
AK
279/*
280 * Tree functions.
281 */
b4f1578f 282struct dm_tree *dm_tree_create(void)
3d0480ed 283{
0395dd22 284 struct dm_pool *dmem;
b4f1578f 285 struct dm_tree *dtree;
3d0480ed 286
0395dd22
ZK
287 if (!(dmem = dm_pool_create("dtree", 1024)) ||
288 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
289 log_error("Failed to allocate dtree.");
290 if (dmem)
291 dm_pool_destroy(dmem);
3d0480ed
AK
292 return NULL;
293 }
294
b4f1578f 295 dtree->root.dtree = dtree;
2c44337b
AK
296 dm_list_init(&dtree->root.uses);
297 dm_list_init(&dtree->root.used_by);
c55b1410 298 dtree->skip_lockfs = 0;
b9ffd32c 299 dtree->no_flush = 0;
0395dd22 300 dtree->mem = dmem;
3d0480ed 301
b4f1578f
AK
302 if (!(dtree->devs = dm_hash_create(8))) {
303 log_error("dtree hash creation failed");
304 dm_pool_destroy(dtree->mem);
3d0480ed
AK
305 return NULL;
306 }
307
b4f1578f
AK
308 if (!(dtree->uuids = dm_hash_create(32))) {
309 log_error("dtree uuid hash creation failed");
310 dm_hash_destroy(dtree->devs);
311 dm_pool_destroy(dtree->mem);
165e4a11
AK
312 return NULL;
313 }
314
b4f1578f 315 return dtree;
3d0480ed
AK
316}
317
b4f1578f 318void dm_tree_free(struct dm_tree *dtree)
3d0480ed 319{
b4f1578f 320 if (!dtree)
3d0480ed
AK
321 return;
322
b4f1578f
AK
323 dm_hash_destroy(dtree->uuids);
324 dm_hash_destroy(dtree->devs);
325 dm_pool_destroy(dtree->mem);
3d0480ed
AK
326}
327
5c9eae96
AK
328void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
329{
330 node->dtree->cookie = cookie;
331}
332
333uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
334{
335 return node->dtree->cookie;
336}
337
338void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
339{
340 dnode->dtree->skip_lockfs = 1;
341}
342
343void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
344{
345 dnode->dtree->no_flush = 1;
346}
347
348void dm_tree_retry_remove(struct dm_tree_node *dnode)
349{
350 dnode->dtree->retry_remove = 1;
351}
352
353/*
354 * Node functions.
355 */
04bde319
ZK
356static int _nodes_are_linked(const struct dm_tree_node *parent,
357 const struct dm_tree_node *child)
3d0480ed 358{
b4f1578f 359 struct dm_tree_link *dlink;
3d0480ed 360
2c44337b 361 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
362 if (dlink->node == child)
363 return 1;
3d0480ed
AK
364
365 return 0;
366}
367
2c44337b 368static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 369{
b4f1578f 370 struct dm_tree_link *dlink;
3d0480ed 371
b4f1578f
AK
372 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
373 log_error("dtree link allocation failed");
3d0480ed
AK
374 return 0;
375 }
376
377 dlink->node = node;
2c44337b 378 dm_list_add(list, &dlink->list);
3d0480ed
AK
379
380 return 1;
381}
382
b4f1578f
AK
383static int _link_nodes(struct dm_tree_node *parent,
384 struct dm_tree_node *child)
3d0480ed
AK
385{
386 if (_nodes_are_linked(parent, child))
387 return 1;
388
389 if (!_link(&parent->uses, child))
390 return 0;
391
392 if (!_link(&child->used_by, parent))
393 return 0;
394
395 return 1;
396}
397
2c44337b 398static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 399{
b4f1578f 400 struct dm_tree_link *dlink;
3d0480ed 401
2c44337b 402 dm_list_iterate_items(dlink, list)
3d0480ed 403 if (dlink->node == node) {
2c44337b 404 dm_list_del(&dlink->list);
3d0480ed
AK
405 break;
406 }
3d0480ed
AK
407}
408
b4f1578f
AK
409static void _unlink_nodes(struct dm_tree_node *parent,
410 struct dm_tree_node *child)
3d0480ed
AK
411{
412 if (!_nodes_are_linked(parent, child))
413 return;
414
415 _unlink(&parent->uses, child);
416 _unlink(&child->used_by, parent);
417}
418
b4f1578f 419static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 420{
b4f1578f 421 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
422}
423
b4f1578f 424static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 425{
b1ebf028 426 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
427}
428
b4f1578f 429static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 430{
b4f1578f 431 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
432}
433
b4f1578f 434static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 435{
b1ebf028 436 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
437}
438
b4f1578f 439static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
440{
441 /* Don't link to root node if child already has a parent */
f77736ca 442 if (parent == &parent->dtree->root) {
b4f1578f 443 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
444 return 1;
445 } else
446 _remove_from_toplevel(child);
447
f77736ca 448 if (child == &child->dtree->root) {
b4f1578f 449 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
450 return 1;
451 } else
452 _remove_from_bottomlevel(parent);
453
454 return _link_nodes(parent, child);
455}
456
b4f1578f 457static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
458 const char *name,
459 const char *uuid,
165e4a11 460 struct dm_info *info,
f16aea9e
PR
461 void *context,
462 uint16_t udev_flags)
3d0480ed 463{
b4f1578f 464 struct dm_tree_node *node;
3d0480ed
AK
465 uint64_t dev;
466
b4f1578f
AK
467 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
468 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
469 return NULL;
470 }
471
b4f1578f 472 node->dtree = dtree;
3d0480ed
AK
473
474 node->name = name;
475 node->uuid = uuid;
476 node->info = *info;
165e4a11 477 node->context = context;
f16aea9e 478 node->udev_flags = udev_flags;
56c28292 479 node->activation_priority = 0;
3d0480ed 480
2c44337b
AK
481 dm_list_init(&node->uses);
482 dm_list_init(&node->used_by);
483 dm_list_init(&node->props.segs);
3d0480ed
AK
484
485 dev = MKDEV(info->major, info->minor);
486
b4f1578f 487 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 488 sizeof(dev), node)) {
b4f1578f
AK
489 log_error("dtree node hash insertion failed");
490 dm_pool_free(dtree->mem, node);
3d0480ed
AK
491 return NULL;
492 }
493
165e4a11 494 if (uuid && *uuid &&
b4f1578f
AK
495 !dm_hash_insert(dtree->uuids, uuid, node)) {
496 log_error("dtree uuid hash insertion failed");
497 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 498 sizeof(dev));
b4f1578f 499 dm_pool_free(dtree->mem, node);
165e4a11
AK
500 return NULL;
501 }
502
3d0480ed
AK
503 return node;
504}
505
b4f1578f 506static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
507 uint32_t major, uint32_t minor)
508{
509 uint64_t dev = MKDEV(major, minor);
510
b4f1578f 511 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
512 sizeof(dev));
513}
514
b4f1578f 515static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
516 const char *uuid)
517{
87f98002 518 struct dm_tree_node *node;
2e5ff5d1
AK
519 const char *default_uuid_prefix;
520 size_t default_uuid_prefix_len;
87f98002
AK
521
522 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
523 return node;
524
2e5ff5d1
AK
525 default_uuid_prefix = dm_uuid_prefix();
526 default_uuid_prefix_len = strlen(default_uuid_prefix);
527
528 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
87f98002
AK
529 return NULL;
530
2e5ff5d1 531 return dm_hash_lookup(dtree->uuids, uuid + default_uuid_prefix_len);
165e4a11
AK
532}
533
5c9eae96
AK
534void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
535
536{
537 struct dm_info *dinfo = &dnode->info;
538
539 if (udev_flags != dnode->udev_flags)
540 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
541 ") udev_flags from 0x%x to 0x%x",
542 dnode->name, dinfo->major, dinfo->minor,
543 dnode->udev_flags, udev_flags);
544 dnode->udev_flags = udev_flags;
545}
546
547void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
548 uint32_t read_ahead,
549 uint32_t read_ahead_flags)
550{
551 dnode->props.read_ahead = read_ahead;
552 dnode->props.read_ahead_flags = read_ahead_flags;
553}
554
555void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
556 struct dm_tree_node *presuspend_node)
557{
558 node->presuspend_node = presuspend_node;
559}
560
561const char *dm_tree_node_get_name(const struct dm_tree_node *node)
562{
563 return node->info.exists ? node->name : "";
564}
565
566const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
567{
568 return node->info.exists ? node->uuid : "";
569}
570
571const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
572{
573 return &node->info;
574}
575
576void *dm_tree_node_get_context(const struct dm_tree_node *node)
577{
578 return node->context;
579}
580
581int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
582{
583 return dnode->props.size_changed;
584}
585
586int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
587{
588 if (inverted) {
589 if (_nodes_are_linked(&node->dtree->root, node))
590 return 0;
591 return dm_list_size(&node->used_by);
592 }
593
594 if (_nodes_are_linked(node, &node->dtree->root))
595 return 0;
596
597 return dm_list_size(&node->uses);
598}
599
600/*
601 * Returns 1 if no prefix supplied
602 */
603static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
604{
605 const char *default_uuid_prefix = dm_uuid_prefix();
606 size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
607
608 if (!uuid_prefix)
609 return 1;
610
611 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
612 return 1;
613
614 /* Handle transition: active device uuids might be missing the prefix */
615 if (uuid_prefix_len <= 4)
616 return 0;
617
618 if (!strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
619 return 0;
620
621 if (strncmp(uuid_prefix, default_uuid_prefix, default_uuid_prefix_len))
622 return 0;
623
624 if (!strncmp(uuid, uuid_prefix + default_uuid_prefix_len, uuid_prefix_len - default_uuid_prefix_len))
625 return 1;
626
627 return 0;
628}
629
630/*
631 * Returns 1 if no children.
632 */
633static int _children_suspended(struct dm_tree_node *node,
634 uint32_t inverted,
635 const char *uuid_prefix,
636 size_t uuid_prefix_len)
637{
638 struct dm_list *list;
639 struct dm_tree_link *dlink;
640 const struct dm_info *dinfo;
641 const char *uuid;
642
643 if (inverted) {
644 if (_nodes_are_linked(&node->dtree->root, node))
645 return 1;
646 list = &node->used_by;
647 } else {
648 if (_nodes_are_linked(node, &node->dtree->root))
649 return 1;
650 list = &node->uses;
651 }
652
653 dm_list_iterate_items(dlink, list) {
654 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
655 stack;
656 continue;
657 }
658
659 /* Ignore if it doesn't belong to this VG */
660 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
661 continue;
662
663 /* Ignore if parent node wants to presuspend this node */
664 if (dlink->node->presuspend_node == node)
665 continue;
666
667 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
668 stack; /* FIXME Is this normal? */
669 return 0;
670 }
671
672 if (!dinfo->suspended)
673 return 0;
674 }
675
676 return 1;
677}
678
679/*
680 * Set major and minor to zero for root of tree.
681 */
682struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
683 uint32_t major,
684 uint32_t minor)
685{
686 if (!major && !minor)
687 return &dtree->root;
688
689 return _find_dm_tree_node(dtree, major, minor);
690}
691
692/*
693 * Set uuid to NULL for root of tree.
694 */
695struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
696 const char *uuid)
697{
698 if (!uuid || !*uuid)
699 return &dtree->root;
700
701 return _find_dm_tree_node_by_uuid(dtree, uuid);
702}
703
704/*
705 * First time set *handle to NULL.
706 * Set inverted to invert the tree.
707 */
708struct dm_tree_node *dm_tree_next_child(void **handle,
709 const struct dm_tree_node *parent,
710 uint32_t inverted)
711{
712 struct dm_list **dlink = (struct dm_list **) handle;
713 const struct dm_list *use_list;
714
715 if (inverted)
716 use_list = &parent->used_by;
717 else
718 use_list = &parent->uses;
719
720 if (!*dlink)
721 *dlink = dm_list_first(use_list);
722 else
723 *dlink = dm_list_next(use_list, *dlink);
724
725 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
726}
727
a3f6b2ce 728static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
2e5ff5d1 729 const char **name, const char **uuid, unsigned inactive_table,
3d0480ed
AK
730 struct dm_info *info, struct dm_deps **deps)
731{
732 memset(info, 0, sizeof(*info));
733
734 if (!dm_is_dm_major(major)) {
2e5ff5d1
AK
735 if (name)
736 *name = "";
737 if (uuid)
738 *uuid = "";
3d0480ed
AK
739 *deps = NULL;
740 info->major = major;
741 info->minor = minor;
3d0480ed
AK
742 return 1;
743 }
744
745 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
746 log_error("deps dm_task creation failed");
747 return 0;
748 }
749
b4f1578f
AK
750 if (!dm_task_set_major(*dmt, major)) {
751 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
752 major, minor);
3d0480ed 753 goto failed;
b4f1578f 754 }
3d0480ed 755
b4f1578f
AK
756 if (!dm_task_set_minor(*dmt, minor)) {
757 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
758 major, minor);
3d0480ed 759 goto failed;
b4f1578f 760 }
3d0480ed 761
2e5ff5d1
AK
762 if (inactive_table && !dm_task_query_inactive_table(*dmt)) {
763 log_error("_deps: failed to set inactive table for (%" PRIu32 ":%" PRIu32 ")",
764 major, minor);
765 goto failed;
766 }
767
b4f1578f
AK
768 if (!dm_task_run(*dmt)) {
769 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
770 major, minor);
3d0480ed 771 goto failed;
b4f1578f 772 }
3d0480ed 773
b4f1578f
AK
774 if (!dm_task_get_info(*dmt, info)) {
775 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
776 major, minor);
3d0480ed 777 goto failed;
b4f1578f 778 }
3d0480ed
AK
779
780 if (!info->exists) {
2e5ff5d1
AK
781 if (name)
782 *name = "";
783 if (uuid)
784 *uuid = "";
3d0480ed
AK
785 *deps = NULL;
786 } else {
787 if (info->major != major) {
b4f1578f 788 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
789 major, info->major);
790 goto failed;
791 }
792 if (info->minor != minor) {
b4f1578f 793 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
794 minor, info->minor);
795 goto failed;
796 }
2e5ff5d1 797 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
798 log_error("name pool_strdup failed");
799 goto failed;
800 }
2e5ff5d1 801 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
802 log_error("uuid pool_strdup failed");
803 goto failed;
804 }
805 *deps = dm_task_get_deps(*dmt);
806 }
807
808 return 1;
809
810failed:
811 dm_task_destroy(*dmt);
812 return 0;
813}
814
5c9eae96
AK
815/*
816 * Deactivate a device with its dependencies if the uuid prefix matches.
817 */
818static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
819 struct dm_info *info, struct dm_pool *mem,
820 const char **name, const char **uuid)
3d0480ed 821{
5c9eae96
AK
822 struct dm_task *dmt;
823 int r;
3d0480ed 824
5c9eae96
AK
825 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
826 log_error("_info_by_dev: dm_task creation failed");
827 return 0;
3d0480ed
AK
828 }
829
5c9eae96
AK
830 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
831 log_error("_info_by_dev: Failed to set device number");
832 dm_task_destroy(dmt);
833 return 0;
834 }
835
836 if (!with_open_count && !dm_task_no_open_count(dmt))
837 log_error("Failed to disable open_count");
838
839 if (!(r = dm_task_run(dmt)))
840 goto_out;
841
842 if (!(r = dm_task_get_info(dmt, info)))
843 goto_out;
844
845 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(dmt)))) {
846 log_error("name pool_strdup failed");
847 r = 0;
b4f1578f 848 goto_out;
165e4a11 849 }
3d0480ed 850
5c9eae96
AK
851 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(dmt)))) {
852 log_error("uuid pool_strdup failed");
853 r = 0;
854 goto_out;
855 }
3d0480ed 856
5c9eae96
AK
857out:
858 dm_task_destroy(dmt);
859
860 return r;
861}
862
863static int _check_device_not_in_use(const char *name, struct dm_info *info)
864{
865 if (!info->exists)
866 return 1;
867
868 /* If sysfs is not used, use open_count information only. */
869 if (!*dm_sysfs_dir()) {
870 if (info->open_count) {
871 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
872 name, info->major, info->minor);
873 return 0;
874 }
875
876 return 1;
877 }
878
879 if (dm_device_has_holders(info->major, info->minor)) {
880 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
881 "by another device.", name, info->major, info->minor);
882 return 0;
883 }
884
885 if (dm_device_has_mounted_fs(info->major, info->minor)) {
886 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
887 "a filesystem in use.", name, info->major, info->minor);
888 return 0;
889 }
890
891 return 1;
892}
893
894/* Check if all parent nodes of given node have open_count == 0 */
895static int _node_has_closed_parents(struct dm_tree_node *node,
896 const char *uuid_prefix,
897 size_t uuid_prefix_len)
898{
899 struct dm_tree_link *dlink;
900 const struct dm_info *dinfo;
901 struct dm_info info;
902 const char *uuid;
903
904 /* Iterate through parents of this node */
905 dm_list_iterate_items(dlink, &node->used_by) {
906 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
b4f1578f 907 stack;
5c9eae96 908 continue;
b4f1578f 909 }
5c9eae96
AK
910
911 /* Ignore if it doesn't belong to this VG */
912 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
913 continue;
914
915 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
916 stack; /* FIXME Is this normal? */
917 return 0;
918 }
919
920 /* Refresh open_count */
921 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
922 !info.exists)
923 continue;
924
925 if (info.open_count) {
926 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
927 dinfo->major, dinfo->minor, info.open_count);
928 return 0;
929 }
930 }
931
932 return 1;
933}
934
935static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
936 uint32_t *cookie, uint16_t udev_flags, int retry)
937{
938 struct dm_task *dmt;
939 int r = 0;
940
941 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
942
943 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
944 log_error("Deactivation dm_task creation failed for %s", name);
945 return 0;
946 }
947
948 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
949 log_error("Failed to set device number for %s deactivation", name);
165e4a11 950 goto out;
3d0480ed
AK
951 }
952
5c9eae96
AK
953 if (!dm_task_no_open_count(dmt))
954 log_error("Failed to disable open_count");
955
956 if (cookie)
957 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
958 goto out;
959
960 if (retry)
961 dm_task_retry_remove(dmt);
962
963 r = dm_task_run(dmt);
964
965 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
966 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
967 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
968
969 /* FIXME Remove node from tree or mark invalid? */
3d0480ed 970
3d0480ed 971out:
5c9eae96 972 dm_task_destroy(dmt);
3d0480ed 973
5c9eae96 974 return r;
165e4a11
AK
975}
976
2e5ff5d1 977static int _node_clear_table(struct dm_tree_node *dnode, uint16_t udev_flags)
165e4a11 978{
2e5ff5d1
AK
979 struct dm_task *dmt = NULL, *deps_dmt = NULL;
980 struct dm_info *info, deps_info;
981 struct dm_deps *deps = NULL;
982 const char *name, *uuid;
983 const char *default_uuid_prefix;
984 size_t default_uuid_prefix_len;
985 uint32_t i;
986 int r = 0;
165e4a11
AK
987
988 if (!(info = &dnode->info)) {
b4f1578f 989 log_error("_node_clear_table failed: missing info");
165e4a11
AK
990 return 0;
991 }
992
b4f1578f
AK
993 if (!(name = dm_tree_node_get_name(dnode))) {
994 log_error("_node_clear_table failed: missing name");
165e4a11
AK
995 return 0;
996 }
997
998 /* Is there a table? */
999 if (!info->exists || !info->inactive_table)
1000 return 1;
1001
2e5ff5d1
AK
1002 /* Get devices used by inactive table that's about to be deleted. */
1003 if (!_deps(&deps_dmt, dnode->dtree->mem, info->major, info->minor, NULL, NULL, 1, info, &deps)) {
1004 log_error("Failed to obtain dependencies for %s before clearing table.", name);
1005 return 0;
1006 }
10d0d9c7 1007
165e4a11
AK
1008 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
1009 name, info->major, info->minor);
1010
1011 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11 1012 log_error("Table clear dm_task creation failed for %s", name);
2e5ff5d1 1013 goto_out;
165e4a11
AK
1014 }
1015
1016 if (!dm_task_set_major(dmt, info->major) ||
1017 !dm_task_set_minor(dmt, info->minor)) {
1018 log_error("Failed to set device number for %s table clear", name);
2e5ff5d1 1019 goto_out;
165e4a11
AK
1020 }
1021
1022 r = dm_task_run(dmt);
1023
1024 if (!dm_task_get_info(dmt, info)) {
b4f1578f 1025 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
1026 r = 0;
1027 }
1028
2e5ff5d1
AK
1029 if (!r || !deps)
1030 goto_out;
1031
1032 /*
1033 * Remove (incomplete) devices that the inactive table referred to but
1034 * which are not in the tree, no longer referenced and don't have a live
1035 * table.
1036 */
1037 default_uuid_prefix = dm_uuid_prefix();
1038 default_uuid_prefix_len = strlen(default_uuid_prefix);
1039
1040 for (i = 0; i < deps->count; i++) {
1041 /* If already in tree, assume it's under control */
1042 if (_find_dm_tree_node(dnode->dtree, MAJOR(deps->device[i]), MINOR(deps->device[i])))
5c9eae96 1043 continue;
db208f51 1044
5c9eae96
AK
1045 if (!_info_by_dev(MAJOR(deps->device[i]), MINOR(deps->device[i]), 1,
1046 &deps_info, dnode->dtree->mem, &name, &uuid))
1047 continue;
2e5ff5d1 1048
5c9eae96
AK
1049 /* Proceed if device is an 'orphan' - unreferenced and without a live table. */
1050 if (!deps_info.exists || deps_info.live_table || deps_info.open_count)
1051 continue;
3e8c6b73 1052
5c9eae96
AK
1053 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
1054 continue;
2e5ff5d1 1055
5c9eae96
AK
1056 /* Remove device. */
1057 if (!_deactivate_node(name, deps_info.major, deps_info.minor, &dnode->dtree->cookie, udev_flags, 0)) {
1058 log_error("Failed to deactivate no-longer-used device %s (%"
1059 PRIu32 ":%" PRIu32 ")", name, deps_info.major, deps_info.minor);
1060 } else if (deps_info.suspended)
1061 dec_suspended();
2e5ff5d1
AK
1062 }
1063
1064out:
5c9eae96
AK
1065 if (dmt)
1066 dm_task_destroy(dmt);
1067
1068 if (deps_dmt)
1069 dm_task_destroy(deps_dmt);
3e8c6b73
AK
1070
1071 return r;
1072}
1073
5c9eae96
AK
1074struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
1075 const char *name,
1076 const char *uuid,
1077 uint32_t major,
1078 uint32_t minor,
1079 int read_only,
1080 int clear_inactive,
1081 void *context,
1082 uint16_t udev_flags)
125712be 1083{
5c9eae96
AK
1084 struct dm_tree_node *dnode;
1085 struct dm_info info;
1086 const char *name2;
1087 const char *uuid2;
125712be 1088
5c9eae96
AK
1089 /* Do we need to add node to tree? */
1090 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
1091 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
1092 log_error("name pool_strdup failed");
1093 return NULL;
1094 }
1095 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
1096 log_error("uuid pool_strdup failed");
1097 return NULL;
c3e5b497
PR
1098 }
1099
fc5c61df 1100 memset(&info, 0, sizeof(info));
125712be 1101
5c9eae96
AK
1102 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
1103 context, 0)))
1104 return_NULL;
125712be 1105
5c9eae96
AK
1106 /* Attach to root node until a table is supplied */
1107 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
1108 return_NULL;
f3ef15ef 1109
5c9eae96
AK
1110 dnode->props.major = major;
1111 dnode->props.minor = minor;
1112 dnode->props.new_name = NULL;
1113 dnode->props.size_changed = 0;
1114 } else if (strcmp(name, dnode->name)) {
1115 /* Do we need to rename node? */
1116 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
1117 log_error("name pool_strdup failed");
1118 return NULL;
f3ef15ef 1119 }
5c9eae96 1120 }
f3ef15ef 1121
5c9eae96
AK
1122 dnode->props.read_only = read_only ? 1 : 0;
1123 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
1124 dnode->props.read_ahead_flags = 0;
f3ef15ef 1125
5c9eae96
AK
1126 if (clear_inactive && !_node_clear_table(dnode, udev_flags))
1127 return_NULL;
f3ef15ef 1128
5c9eae96
AK
1129 dnode->context = context;
1130 dnode->udev_flags = udev_flags;
f3ef15ef 1131
5c9eae96
AK
1132 return dnode;
1133}
f3ef15ef 1134
5c9eae96
AK
1135struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree, const char *name,
1136 const char *uuid, uint32_t major, uint32_t minor,
1137 int read_only, int clear_inactive, void *context)
1138{
1139 return dm_tree_add_new_dev_with_udev_flags(dtree, name, uuid, major, minor,
1140 read_only, clear_inactive, context, 0);
f3ef15ef
ZK
1141}
1142
5c9eae96
AK
1143static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
1144 struct dm_tree_node *parent,
1145 uint32_t major, uint32_t minor,
1146 uint16_t udev_flags)
3e8c6b73 1147{
5c9eae96
AK
1148 struct dm_task *dmt = NULL;
1149 struct dm_info info;
1150 struct dm_deps *deps = NULL;
1151 const char *name = NULL;
1152 const char *uuid = NULL;
1153 struct dm_tree_node *node = NULL;
1154 uint32_t i;
1155 int new = 0;
3e8c6b73 1156
5c9eae96
AK
1157 /* Already in tree? */
1158 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
1159 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, 0, &info, &deps))
1160 return_NULL;
3e8c6b73 1161
5c9eae96
AK
1162 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
1163 NULL, udev_flags)))
1164 goto_out;
1165 new = 1;
3e8c6b73
AK
1166 }
1167
5c9eae96
AK
1168 if (!_link_tree_nodes(parent, node)) {
1169 node = NULL;
1170 goto_out;
3e8c6b73
AK
1171 }
1172
5c9eae96
AK
1173 /* If node was already in tree, no need to recurse. */
1174 if (!new)
1175 goto out;
787200ef 1176
5c9eae96
AK
1177 /* Can't recurse if not a mapped device or there are no dependencies */
1178 if (!node->info.exists || !deps->count) {
1179 if (!_add_to_bottomlevel(node)) {
1180 stack;
1181 node = NULL;
1182 }
1183 goto out;
1184 }
787200ef 1185
5c9eae96
AK
1186 /* Add dependencies to tree */
1187 for (i = 0; i < deps->count; i++)
1188 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
1189 MINOR(deps->device[i]), udev_flags)) {
1190 node = NULL;
1191 goto_out;
1192 }
3e8c6b73 1193
5c9eae96
AK
1194out:
1195 if (dmt)
1196 dm_task_destroy(dmt);
165e4a11 1197
5c9eae96
AK
1198 return node;
1199}
db208f51 1200
5c9eae96
AK
1201int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
1202{
1203 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
1204}
db208f51 1205
5c9eae96
AK
1206int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
1207 uint32_t minor, uint16_t udev_flags)
1208{
1209 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
db208f51
AK
1210}
1211
bd90c6b2 1212static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1213 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1214{
1215 struct dm_task *dmt;
1216 int r = 0;
1217
1218 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1219
1220 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1221 log_error("Rename dm_task creation failed for %s", old_name);
1222 return 0;
1223 }
1224
1225 if (!dm_task_set_name(dmt, old_name)) {
1226 log_error("Failed to set name for %s rename.", old_name);
1227 goto out;
1228 }
1229
b4f1578f 1230 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1231 goto_out;
165e4a11
AK
1232
1233 if (!dm_task_no_open_count(dmt))
1234 log_error("Failed to disable open_count");
1235
f16aea9e 1236 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1237 goto out;
1238
165e4a11
AK
1239 r = dm_task_run(dmt);
1240
1241out:
1242 dm_task_destroy(dmt);
1243
1244 return r;
1245}
1246
165e4a11
AK
1247/* FIXME Merge with _suspend_node? */
1248static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1249 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1250 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1251 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1252{
1253 struct dm_task *dmt;
bd90c6b2 1254 int r = 0;
165e4a11
AK
1255
1256 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1257
1258 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1259 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1260 return 0;
1261 }
1262
0b7d16bc
AK
1263 /* FIXME Kernel should fill in name on return instead */
1264 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1265 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1266 goto out;
0b7d16bc
AK
1267 }
1268
165e4a11
AK
1269 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1270 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1271 goto out;
165e4a11
AK
1272 }
1273
1274 if (!dm_task_no_open_count(dmt))
1275 log_error("Failed to disable open_count");
1276
52b84409
AK
1277 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1278 log_error("Failed to set read ahead");
1279
f16aea9e 1280 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1281 goto_out;
bd90c6b2 1282
9a8f192a
ZK
1283 if (!(r = dm_task_run(dmt)))
1284 goto_out;
1285
1286 if (already_suspended)
1287 dec_suspended();
1288
1289 if (!(r = dm_task_get_info(dmt, newinfo)))
1290 stack;
165e4a11 1291
bd90c6b2 1292out:
165e4a11
AK
1293 dm_task_destroy(dmt);
1294
1295 return r;
1296}
1297
db208f51 1298static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1299 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1300{
1301 struct dm_task *dmt;
1302 int r;
1303
b9ffd32c
AK
1304 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1305 name, major, minor,
1306 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1307 no_flush ? "" : " with device flush");
db208f51
AK
1308
1309 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1310 log_error("Suspend dm_task creation failed for %s", name);
1311 return 0;
1312 }
1313
1314 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1315 log_error("Failed to set device number for %s suspension.", name);
1316 dm_task_destroy(dmt);
1317 return 0;
1318 }
1319
1320 if (!dm_task_no_open_count(dmt))
1321 log_error("Failed to disable open_count");
1322
c55b1410
AK
1323 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1324 log_error("Failed to set skip_lockfs flag.");
1325
b9ffd32c
AK
1326 if (no_flush && !dm_task_no_flush(dmt))
1327 log_error("Failed to set no_flush flag.");
1328
1840aa09
AK
1329 if ((r = dm_task_run(dmt))) {
1330 inc_suspended();
db208f51 1331 r = dm_task_get_info(dmt, newinfo);
1840aa09 1332 }
db208f51 1333
3e8c6b73
AK
1334 dm_task_destroy(dmt);
1335
1336 return r;
1337}
1338
25e6ab87 1339static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1340{
1341 struct dm_task *dmt;
1342 int r = 0;
1343 uint64_t start, length;
1344 char *type = NULL;
1345 char *params = NULL;
e0ea24be 1346
25e6ab87
ZK
1347 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1348 return_0;
e0ea24be 1349
25e6ab87
ZK
1350 if (!dm_task_set_major(dmt, dnode->info.major) ||
1351 !dm_task_set_minor(dmt, dnode->info.minor)) {
1352 log_error("Failed to set major minor.");
1353 goto out;
e0ea24be
ZK
1354 }
1355
25e6ab87
ZK
1356 if (!dm_task_run(dmt))
1357 goto_out;
1358
1359 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1360
1361 if (type && (strcmp(type, "thin-pool") != 0)) {
c590a9cd 1362 log_error("Expected thin-pool target for %d:%d and got %s.",
25e6ab87 1363 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1364 goto out;
1365 }
1366
25e6ab87 1367 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
c590a9cd 1368 log_error("Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1369 goto out;
1370 }
1371
25e6ab87 1372 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1373
25e6ab87
ZK
1374 r = 1;
1375out:
1376 dm_task_destroy(dmt);
e0ea24be 1377
25e6ab87
ZK
1378 return r;
1379}
e0ea24be 1380
25e6ab87
ZK
1381static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1382{
1383 struct dm_task *dmt;
1384 struct dm_thin_message *m = &tm->message;
1385 char buf[64];
1386 int r;
e0ea24be 1387
25e6ab87
ZK
1388 switch (m->type) {
1389 case DM_THIN_MESSAGE_CREATE_SNAP:
1390 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1391 m->u.m_create_snap.device_id,
1392 m->u.m_create_snap.origin_id);
1393 break;
1394 case DM_THIN_MESSAGE_CREATE_THIN:
1395 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1396 m->u.m_create_thin.device_id);
1397 break;
1398 case DM_THIN_MESSAGE_DELETE:
1399 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1400 m->u.m_delete.device_id);
1401 break;
1402 case DM_THIN_MESSAGE_TRIM:
1403 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1404 m->u.m_trim.device_id,
1405 m->u.m_trim.new_size);
1406 break;
1407 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1408 r = dm_snprintf(buf, sizeof(buf),
1409 "set_transaction_id %" PRIu64 " %" PRIu64,
1410 m->u.m_set_transaction_id.current_id,
1411 m->u.m_set_transaction_id.new_id);
1412 break;
25de9add
ZK
1413 default:
1414 r = -1;
25e6ab87
ZK
1415 }
1416
25de9add 1417 if (r < 0) {
25e6ab87
ZK
1418 log_error("Failed to prepare message.");
1419 return 0;
1420 }
1421
1422 r = 0;
1423
1424 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1425 return_0;
1426
1427 if (!dm_task_set_major(dmt, dnode->info.major) ||
1428 !dm_task_set_minor(dmt, dnode->info.minor)) {
1429 log_error("Failed to set message major minor.");
1430 goto out;
1431 }
1432
1433 if (!dm_task_set_message(dmt, buf))
1434 goto_out;
1435
660a42bc
ZK
1436 /* Internal functionality of dm_task */
1437 dmt->expected_errno = tm->expected_errno;
1438
25e6ab87
ZK
1439 if (!dm_task_run(dmt))
1440 goto_out;
1441
1442 r = 1;
e0ea24be
ZK
1443out:
1444 dm_task_destroy(dmt);
1445
1446 return r;
1447}
1448
11f64f0a
ZK
1449static int _node_send_messages(struct dm_tree_node *dnode,
1450 const char *uuid_prefix,
1451 size_t uuid_prefix_len)
25e6ab87
ZK
1452{
1453 struct load_segment *seg;
1454 struct thin_message *tmsg;
11f64f0a 1455 uint64_t trans_id;
25e6ab87
ZK
1456 const char *uuid;
1457
bbcd37e4 1458 if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1459 return 1;
1460
1461 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1462 if (seg->type != SEG_THIN_POOL)
1463 return 1;
1464
1465 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1466 return_0;
1467
1468 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1469 log_debug("UUID \"%s\" does not match.", uuid);
1470 return 1;
1471 }
1472
11f64f0a 1473 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
bbcd37e4 1474 goto_bad;
25e6ab87 1475
bbcd37e4 1476 if (trans_id == seg->transaction_id)
25e6ab87
ZK
1477 return 1; /* In sync - skip messages */
1478
bbcd37e4 1479 if (trans_id != (seg->transaction_id - 1)) {
25e6ab87 1480 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
bbcd37e4
ZK
1481 trans_id, seg->transaction_id - 1);
1482 goto bad; /* Nothing to send */
25e6ab87
ZK
1483 }
1484
1485 dm_list_iterate_items(tmsg, &seg->thin_messages)
1486 if (!(_thin_pool_node_message(dnode, tmsg)))
bbcd37e4 1487 goto_bad;
25e6ab87
ZK
1488
1489 return 1;
bbcd37e4
ZK
1490bad:
1491 /* Try to deactivate */
1492 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
1493 log_error("Failed to deactivate %s", dnode->name);
1494
1495 return 0;
25e6ab87
ZK
1496}
1497
18e0f934
AK
1498/*
1499 * FIXME Don't attempt to deactivate known internal dependencies.
1500 */
1501static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1502 const char *uuid_prefix,
1503 size_t uuid_prefix_len,
1504 unsigned level)
3e8c6b73 1505{
b7eb2ad0 1506 int r = 1;
3e8c6b73 1507 void *handle = NULL;
b4f1578f 1508 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1509 struct dm_info info;
1510 const struct dm_info *dinfo;
1511 const char *name;
1512 const char *uuid;
1513
b4f1578f
AK
1514 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1515 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1516 stack;
1517 continue;
1518 }
1519
b4f1578f 1520 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1521 stack;
1522 continue;
1523 }
1524
b4f1578f 1525 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1526 stack;
1527 continue;
1528 }
1529
1530 /* Ignore if it doesn't belong to this VG */
2b69db1f 1531 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1532 continue;
3e8c6b73
AK
1533
1534 /* Refresh open_count */
2e5ff5d1 1535 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
f55021f4 1536 !info.exists)
3e8c6b73
AK
1537 continue;
1538
4ce43894
ZK
1539 if (info.open_count) {
1540 /* Skip internal non-toplevel opened nodes */
1541 if (level)
1542 continue;
1543
1544 /* When retry is not allowed, error */
1545 if (!child->dtree->retry_remove) {
1546 log_error("Unable to deactivate open %s (%" PRIu32
1547 ":%" PRIu32 ")", name, info.major, info.minor);
1548 r = 0;
1549 continue;
1550 }
1551
1552 /* Check toplevel node for holders/mounted fs */
1553 if (!_check_device_not_in_use(name, &info)) {
1554 stack;
1555 r = 0;
1556 continue;
1557 }
1558 /* Go on with retry */
1559 }
125712be 1560
f3ef15ef 1561 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1562 if ((child->presuspend_node &&
f3ef15ef
ZK
1563 !_node_has_closed_parents(child->presuspend_node,
1564 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1565 /* Only report error from (likely non-internal) dependency at top level */
1566 if (!level) {
1567 log_error("Unable to deactivate open %s (%" PRIu32
1568 ":%" PRIu32 ")", name, info.major,
1569 info.minor);
1570 r = 0;
1571 }
f55021f4
AK
1572 continue;
1573 }
1574
76d1aec8
ZK
1575 /* Suspend child node first if requested */
1576 if (child->presuspend_node &&
1577 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1578 continue;
1579
f16aea9e 1580 if (!_deactivate_node(name, info.major, info.minor,
787200ef 1581 &child->dtree->cookie, child->udev_flags,
4ce43894 1582 (level == 0) ? child->dtree->retry_remove : 0)) {
3e8c6b73
AK
1583 log_error("Unable to deactivate %s (%" PRIu32
1584 ":%" PRIu32 ")", name, info.major,
1585 info.minor);
b7eb2ad0 1586 r = 0;
3e8c6b73 1587 continue;
f4249251
AK
1588 } else if (info.suspended)
1589 dec_suspended();
3e8c6b73 1590
18e0f934
AK
1591 if (dm_tree_node_num_children(child, 0)) {
1592 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1593 return_0;
18e0f934 1594 }
3e8c6b73
AK
1595 }
1596
b7eb2ad0 1597 return r;
3e8c6b73 1598}
db208f51 1599
18e0f934
AK
1600int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1601 const char *uuid_prefix,
1602 size_t uuid_prefix_len)
1603{
1604 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1605}
1606
b4f1578f 1607int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1608 const char *uuid_prefix,
1609 size_t uuid_prefix_len)
db208f51 1610{
68085c93 1611 int r = 1;
db208f51 1612 void *handle = NULL;
b4f1578f 1613 struct dm_tree_node *child = dnode;
db208f51
AK
1614 struct dm_info info, newinfo;
1615 const struct dm_info *dinfo;
1616 const char *name;
1617 const char *uuid;
1618
690a5da2 1619 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1620 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1621 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1622 stack;
1623 continue;
1624 }
1625
b4f1578f 1626 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1627 stack;
1628 continue;
1629 }
1630
b4f1578f 1631 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1632 stack;
1633 continue;
1634 }
1635
1636 /* Ignore if it doesn't belong to this VG */
2b69db1f 1637 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1638 continue;
1639
690a5da2
AK
1640 /* Ensure immediate parents are already suspended */
1641 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1642 continue;
1643
2e5ff5d1 1644 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info, NULL, NULL, NULL) ||
b700541f 1645 !info.exists || info.suspended)
db208f51
AK
1646 continue;
1647
c55b1410 1648 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1649 child->dtree->skip_lockfs,
1650 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1651 log_error("Unable to suspend %s (%" PRIu32
1652 ":%" PRIu32 ")", name, info.major,
1653 info.minor);
68085c93 1654 r = 0;
db208f51
AK
1655 continue;
1656 }
1657
1658 /* Update cached info */
1659 child->info = newinfo;
690a5da2
AK
1660 }
1661
1662 /* Then suspend any child nodes */
1663 handle = NULL;
1664
b4f1578f
AK
1665 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1666 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1667 stack;
1668 continue;
1669 }
1670
1671 /* Ignore if it doesn't belong to this VG */
87f98002 1672 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1673 continue;
db208f51 1674
b4f1578f 1675 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1676 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1677 return_0;
db208f51
AK
1678 }
1679
68085c93 1680 return r;
db208f51
AK
1681}
1682
b4f1578f 1683int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1684 const char *uuid_prefix,
1685 size_t uuid_prefix_len)
1686{
2ca6b865 1687 int r = 1;
db208f51 1688 void *handle = NULL;
b4f1578f 1689 struct dm_tree_node *child = dnode;
165e4a11
AK
1690 struct dm_info newinfo;
1691 const char *name;
db208f51 1692 const char *uuid;
56c28292 1693 int priority;
db208f51 1694
165e4a11 1695 /* Activate children first */
b4f1578f
AK
1696 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1697 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1698 stack;
1699 continue;
db208f51
AK
1700 }
1701
908db078
AK
1702 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1703 continue;
db208f51 1704
b4f1578f 1705 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1706 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1707 return_0;
56c28292 1708 }
165e4a11 1709
56c28292 1710 handle = NULL;
165e4a11 1711
aa6f4e51 1712 for (priority = 0; priority < 3; priority++) {
56c28292 1713 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1714 if (priority != child->activation_priority)
1715 continue;
1716
56c28292
AK
1717 if (!(uuid = dm_tree_node_get_uuid(child))) {
1718 stack;
1719 continue;
165e4a11 1720 }
165e4a11 1721
56c28292
AK
1722 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1723 continue;
165e4a11 1724
56c28292
AK
1725 if (!(name = dm_tree_node_get_name(child))) {
1726 stack;
1727 continue;
1728 }
1729
1730 /* Rename? */
1731 if (child->props.new_name) {
bd90c6b2 1732 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1733 child->info.minor, &child->dtree->cookie,
1734 child->udev_flags)) {
56c28292
AK
1735 log_error("Failed to rename %s (%" PRIu32
1736 ":%" PRIu32 ") to %s", name, child->info.major,
1737 child->info.minor, child->props.new_name);
1738 return 0;
1739 }
1740 child->name = child->props.new_name;
1741 child->props.new_name = NULL;
1742 }
1743
1744 if (!child->info.inactive_table && !child->info.suspended)
1745 continue;
1746
bafa2f39 1747 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1748 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1749 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1750 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1751 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1752 child->info.minor);
2ca6b865 1753 r = 0;
56c28292
AK
1754 continue;
1755 }
1756
1757 /* Update cached info */
1758 child->info = newinfo;
1759 }
db208f51
AK
1760 }
1761
4173a228
ZK
1762 /*
1763 * FIXME: Implement delayed error reporting
1764 * activation should be stopped only in the case,
1765 * the submission of transation_id message fails,
1766 * resume should continue further, just whole command
1767 * has to report failure.
1768 */
1769 if (r && dnode->props.send_messages &&
1770 !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
1771 stack;
1772
165e4a11
AK
1773 handle = NULL;
1774
2ca6b865 1775 return r;
165e4a11
AK
1776}
1777
b4f1578f 1778static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1779{
1780 int r = 0;
1781 struct dm_task *dmt;
1782
1783 log_verbose("Creating %s", dnode->name);
1784
1785 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1786 log_error("Create dm_task creation failed for %s", dnode->name);
1787 return 0;
1788 }
1789
1790 if (!dm_task_set_name(dmt, dnode->name)) {
1791 log_error("Failed to set device name for %s", dnode->name);
1792 goto out;
1793 }
1794
1795 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1796 log_error("Failed to set uuid for %s", dnode->name);
1797 goto out;
1798 }
1799
1800 if (dnode->props.major &&
1801 (!dm_task_set_major(dmt, dnode->props.major) ||
1802 !dm_task_set_minor(dmt, dnode->props.minor))) {
1803 log_error("Failed to set device number for %s creation.", dnode->name);
1804 goto out;
1805 }
1806
1807 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1808 log_error("Failed to set read only flag for %s", dnode->name);
1809 goto out;
1810 }
1811
1812 if (!dm_task_no_open_count(dmt))
1813 log_error("Failed to disable open_count");
1814
1815 if ((r = dm_task_run(dmt)))
1816 r = dm_task_get_info(dmt, &dnode->info);
1817
1818out:
1819 dm_task_destroy(dmt);
1820
1821 return r;
1822}
1823
1824
b4f1578f 1825static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1826{
1827 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1828 log_error("Failed to format %s device number for %s as dm "
1829 "target (%u,%u)",
1830 node->name, node->uuid, node->info.major, node->info.minor);
1831 return 0;
165e4a11
AK
1832 }
1833
1834 return 1;
1835}
1836
ffa9b6a5
ZK
1837/* simplify string emiting code */
1838#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1839do {\
1840 int w;\
1841 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1842 stack; /* Out of space */\
1843 return -1;\
1844 }\
1845 p += w;\
1846} while (0)
ffa9b6a5 1847
3c74075f
JEB
1848/*
1849 * _emit_areas_line
1850 *
1851 * Returns: 1 on success, 0 on failure
1852 */
08f1ddea 1853static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1854 struct load_segment *seg, char *params,
1855 size_t paramsize, int *pos)
165e4a11
AK
1856{
1857 struct seg_area *area;
7d7d93ac 1858 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1859 unsigned first_time = 1;
db3c1ac1 1860 const char *logtype, *synctype;
b262f3e1 1861 unsigned log_parm_count;
165e4a11 1862
2c44337b 1863 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1864 switch (seg->type) {
1865 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1866 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1867 return_0;
1868
b262f3e1
ZK
1869 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1870 if (first_time)
1871 EMIT_PARAMS(*pos, " nolog 0");
1872 else {
1873 /* Remote devices */
1874 log_parm_count = (area->flags &
1875 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1876
1877 if (!area->slog) {
1878 devbuf[0] = 0; /* Only core log parameters */
1879 logtype = "core";
1880 } else {
1881 devbuf[0] = ' '; /* Extra space before device name */
1882 if (!_build_dev_string(devbuf + 1,
1883 sizeof(devbuf) - 1,
1884 area->slog))
1885 return_0;
1886 logtype = "disk";
1887 log_parm_count++; /* Extra sync log device name parameter */
1888 }
1889
1890 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1891 log_parm_count, devbuf, area->region_size);
1892
db3c1ac1
AK
1893 synctype = (area->flags & DM_NOSYNC) ?
1894 " nosync" : (area->flags & DM_FORCESYNC) ?
1895 " sync" : NULL;
b262f3e1 1896
db3c1ac1
AK
1897 if (synctype)
1898 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1899 }
1900 break;
cac52ca4
JEB
1901 case SEG_RAID1:
1902 case SEG_RAID4:
1903 case SEG_RAID5_LA:
1904 case SEG_RAID5_RA:
1905 case SEG_RAID5_LS:
1906 case SEG_RAID5_RS:
1907 case SEG_RAID6_ZR:
1908 case SEG_RAID6_NR:
1909 case SEG_RAID6_NC:
6d04311e
JEB
1910 if (!area->dev_node) {
1911 EMIT_PARAMS(*pos, " -");
1912 break;
1913 }
1914 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1915 return_0;
1916
cac52ca4
JEB
1917 EMIT_PARAMS(*pos, " %s", devbuf);
1918 break;
b262f3e1 1919 default:
6d04311e
JEB
1920 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1921 return_0;
1922
b262f3e1
ZK
1923 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1924 devbuf, area->offset);
1925 }
609faae9
AK
1926
1927 first_time = 0;
165e4a11
AK
1928 }
1929
1930 return 1;
1931}
1932
b262f3e1
ZK
1933static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1934 size_t paramsize, int *pos)
1935{
1936 const struct load_segment *rlog_seg;
1937 struct replicator_site *rsite;
1938 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1939 unsigned parm_count;
1940
1941 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1942 return_0;
1943
1944 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1945 struct load_segment);
1946
1947 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1948 seg->rlog_type, rlogbuf, rlog_seg->size);
1949
1950 dm_list_iterate_items(rsite, &seg->rsites) {
1951 parm_count = (rsite->fall_behind_data
1952 || rsite->fall_behind_ios
1953 || rsite->async_timeout) ? 4 : 2;
1954
1955 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1956 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1957
1958 if (rsite->fall_behind_data)
1959 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1960 else if (rsite->fall_behind_ios)
1961 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1962 else if (rsite->async_timeout)
1963 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1964 }
1965
1966 return 1;
1967}
1968
3c74075f 1969/*
3c74075f
JEB
1970 * Returns: 1 on success, 0 on failure
1971 */
beecb1e1
ZK
1972static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1973 char *params, size_t paramsize)
165e4a11 1974{
8f26e18c
JEB
1975 int block_on_error = 0;
1976 int handle_errors = 0;
1977 int dm_log_userspace = 0;
1978 struct utsname uts;
dbcb64b8 1979 unsigned log_parm_count;
b39fdcf4 1980 int pos = 0, parts;
7d7d93ac 1981 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1982 const char *logtype;
b39fdcf4 1983 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1984
b39fdcf4
MB
1985 if (uname(&uts) == -1) {
1986 log_error("Cannot read kernel release version.");
1987 return 0;
1988 }
1989
1990 /* Kernels with a major number of 2 always had 3 parts. */
1991 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1992 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1993 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1994 return 0;
1995 }
67b25ed4 1996
8f26e18c
JEB
1997 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1998 /*
1999 * Originally, block_on_error was an argument to the log
2000 * portion of the mirror CTR table. It was renamed to
2001 * "handle_errors" and now resides in the 'features'
2002 * section of the mirror CTR table (i.e. at the end).
2003 *
2004 * We can identify whether to use "block_on_error" or
2005 * "handle_errors" by the dm-mirror module's version
2006 * number (>= 1.12) or by the kernel version (>= 2.6.22).
2007 */
ba61f848 2008 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
2009 handle_errors = 1;
2010 else
2011 block_on_error = 1;
2012 }
2013
2014 if (seg->clustered) {
2015 /* Cluster mirrors require a UUID */
2016 if (!seg->uuid)
2017 return_0;
2018
2019 /*
2020 * Cluster mirrors used to have their own log
2021 * types. Now they are accessed through the
2022 * userspace log type.
2023 *
2024 * The dm-log-userspace module was added to the
2025 * 2.6.31 kernel.
2026 */
ba61f848 2027 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
2028 dm_log_userspace = 1;
2029 }
2030
2031 /* Region size */
2032 log_parm_count = 1;
2033
2034 /* [no]sync, block_on_error etc. */
2035 log_parm_count += hweight32(seg->flags);
311d6d81 2036
8f26e18c
JEB
2037 /* "handle_errors" is a feature arg now */
2038 if (handle_errors)
2039 log_parm_count--;
2040
2041 /* DM_CORELOG does not count in the param list */
2042 if (seg->flags & DM_CORELOG)
2043 log_parm_count--;
2044
2045 if (seg->clustered) {
2046 log_parm_count++; /* For UUID */
2047
2048 if (!dm_log_userspace)
ffa9b6a5 2049 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
2050 else
2051 /* For clustered-* type field inserted later */
2052 log_parm_count++;
8f26e18c 2053 }
dbcb64b8 2054
8f26e18c
JEB
2055 if (!seg->log)
2056 logtype = "core";
2057 else {
2058 logtype = "disk";
2059 log_parm_count++;
2060 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
2061 return_0;
2062 }
dbcb64b8 2063
8f26e18c
JEB
2064 if (dm_log_userspace)
2065 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
2066 log_parm_count, seg->uuid, logtype);
2067 else
ffa9b6a5 2068 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 2069
8f26e18c
JEB
2070 if (seg->log)
2071 EMIT_PARAMS(pos, " %s", logbuf);
2072
2073 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 2074
8f26e18c
JEB
2075 if (seg->clustered && !dm_log_userspace)
2076 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 2077
8f26e18c
JEB
2078 if ((seg->flags & DM_NOSYNC))
2079 EMIT_PARAMS(pos, " nosync");
2080 else if ((seg->flags & DM_FORCESYNC))
2081 EMIT_PARAMS(pos, " sync");
dbcb64b8 2082
8f26e18c
JEB
2083 if (block_on_error)
2084 EMIT_PARAMS(pos, " block_on_error");
2085
2086 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
2087
5f3325fc 2088 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 2089 return_0;
dbcb64b8 2090
8f26e18c
JEB
2091 if (handle_errors)
2092 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 2093
3c74075f 2094 return 1;
8f26e18c
JEB
2095}
2096
cac52ca4
JEB
2097static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
2098 uint32_t minor, struct load_segment *seg,
2099 uint64_t *seg_start, char *params,
2100 size_t paramsize)
2101{
ad2432dc 2102 uint32_t i;
cac52ca4
JEB
2103 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
2104 int pos = 0;
2105
2106 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
2107 param_count++;
2108
2109 if (seg->region_size)
2110 param_count += 2;
2111
ad2432dc
MB
2112 /* rebuilds is 64-bit */
2113 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
2114 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 2115
cac52ca4
JEB
2116 if ((seg->type == SEG_RAID1) && seg->stripe_size)
2117 log_error("WARNING: Ignoring RAID1 stripe size");
2118
2119 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
2120 param_count, seg->stripe_size);
2121
2122 if (seg->flags & DM_NOSYNC)
2123 EMIT_PARAMS(pos, " nosync");
2124 else if (seg->flags & DM_FORCESYNC)
2125 EMIT_PARAMS(pos, " sync");
2126
2127 if (seg->region_size)
2128 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2129
f439e65b
JEB
2130 for (i = 0; i < (seg->area_count / 2); i++)
2131 if (seg->rebuilds & (1 << i))
2132 EMIT_PARAMS(pos, " rebuild %u", i);
2133
cac52ca4
JEB
2134 /* Print number of metadata/data device pairs */
2135 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2136
2137 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2138 return_0;
2139
2140 return 1;
2141}
2142
8f26e18c
JEB
2143static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2144 uint32_t minor, struct load_segment *seg,
2145 uint64_t *seg_start, char *params,
2146 size_t paramsize)
2147{
2148 int pos = 0;
2149 int r;
cac52ca4 2150 int target_type_is_raid = 0;
8f26e18c 2151 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2152 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2153
8f26e18c
JEB
2154 switch(seg->type) {
2155 case SEG_ERROR:
2156 case SEG_ZERO:
2157 case SEG_LINEAR:
2158 break;
2159 case SEG_MIRRORED:
2160 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2161 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2162 if (!r)
2163 return_0;
165e4a11 2164 break;
b262f3e1
ZK
2165 case SEG_REPLICATOR:
2166 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2167 &pos)) <= 0) {
2168 stack;
2169 return r;
2170 }
2171 break;
2172 case SEG_REPLICATOR_DEV:
2173 if (!seg->replicator || !_build_dev_string(originbuf,
2174 sizeof(originbuf),
2175 seg->replicator))
2176 return_0;
2177
2178 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2179 break;
165e4a11 2180 case SEG_SNAPSHOT:
aa6f4e51 2181 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2182 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2183 return_0;
2184 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2185 return_0;
ffa9b6a5
ZK
2186 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2187 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2188 break;
2189 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2190 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2191 return_0;
ffa9b6a5 2192 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2193 break;
2194 case SEG_STRIPED:
609faae9 2195 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2196 break;
12ca060e 2197 case SEG_CRYPT:
609faae9 2198 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2199 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2200 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2201 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2202 seg->iv_offset : *seg_start);
2203 break;
cac52ca4
JEB
2204 case SEG_RAID1:
2205 case SEG_RAID4:
2206 case SEG_RAID5_LA:
2207 case SEG_RAID5_RA:
2208 case SEG_RAID5_LS:
2209 case SEG_RAID5_RS:
2210 case SEG_RAID6_ZR:
2211 case SEG_RAID6_NR:
2212 case SEG_RAID6_NC:
2213 target_type_is_raid = 1;
2214 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2215 params, paramsize);
2216 if (!r)
2217 return_0;
2218
2219 break;
4251236e
ZK
2220 case SEG_THIN_POOL:
2221 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2222 return_0;
2223 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2224 return_0;
2225 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2226 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2227 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2228 break;
2229 case SEG_THIN:
2230 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2231 return_0;
2232 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2233 break;
165e4a11
AK
2234 }
2235
2236 switch(seg->type) {
2237 case SEG_ERROR:
b262f3e1 2238 case SEG_REPLICATOR:
165e4a11
AK
2239 case SEG_SNAPSHOT:
2240 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2241 case SEG_SNAPSHOT_MERGE:
165e4a11 2242 case SEG_ZERO:
4251236e
ZK
2243 case SEG_THIN_POOL:
2244 case SEG_THIN:
165e4a11 2245 break;
12ca060e 2246 case SEG_CRYPT:
165e4a11 2247 case SEG_LINEAR:
b262f3e1 2248 case SEG_REPLICATOR_DEV:
165e4a11
AK
2249 case SEG_STRIPED:
2250 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2251 stack;
2252 return r;
2253 }
b6793963
AK
2254 if (!params[0]) {
2255 log_error("No parameters supplied for %s target "
2256 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2257 major, minor);
b6793963
AK
2258 return 0;
2259 }
165e4a11
AK
2260 break;
2261 }
2262
4b2cae46
AK
2263 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2264 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2265 *seg_start, seg->size, target_type_is_raid ? "raid" :
2266 dm_segtypes[seg->type].target, params);
165e4a11 2267
cac52ca4
JEB
2268 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2269 target_type_is_raid ? "raid" :
2270 dm_segtypes[seg->type].target, params))
b4f1578f 2271 return_0;
165e4a11
AK
2272
2273 *seg_start += seg->size;
2274
2275 return 1;
2276}
2277
ffa9b6a5
ZK
2278#undef EMIT_PARAMS
2279
4b2cae46
AK
2280static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2281 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2282{
2283 char *params;
2284 size_t paramsize = 4096;
2285 int ret;
2286
2287 do {
2288 if (!(params = dm_malloc(paramsize))) {
2289 log_error("Insufficient space for target parameters.");
2290 return 0;
2291 }
2292
12ea7cb1 2293 params[0] = '\0';
4b2cae46
AK
2294 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2295 params, paramsize);
165e4a11
AK
2296 dm_free(params);
2297
2298 if (!ret)
2299 stack;
2300
2301 if (ret >= 0)
2302 return ret;
2303
2304 log_debug("Insufficient space in params[%" PRIsize_t
2305 "] for target parameters.", paramsize);
2306
2307 paramsize *= 2;
2308 } while (paramsize < MAX_TARGET_PARAMSIZE);
2309
2310 log_error("Target parameter size too big. Aborting.");
2311 return 0;
2312}
2313
b4f1578f 2314static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2315{
2316 int r = 0;
2317 struct dm_task *dmt;
2318 struct load_segment *seg;
df390f17 2319 uint64_t seg_start = 0, existing_table_size;
165e4a11 2320
4b2cae46
AK
2321 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2322 dnode->info.major, dnode->info.minor);
165e4a11
AK
2323
2324 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2325 log_error("Reload dm_task creation failed for %s", dnode->name);
2326 return 0;
2327 }
2328
2329 if (!dm_task_set_major(dmt, dnode->info.major) ||
2330 !dm_task_set_minor(dmt, dnode->info.minor)) {
2331 log_error("Failed to set device number for %s reload.", dnode->name);
2332 goto out;
2333 }
2334
2335 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2336 log_error("Failed to set read only flag for %s", dnode->name);
2337 goto out;
2338 }
2339
2340 if (!dm_task_no_open_count(dmt))
2341 log_error("Failed to disable open_count");
2342
2c44337b 2343 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2344 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2345 seg, &seg_start))
b4f1578f 2346 goto_out;
165e4a11 2347
ec289b64
AK
2348 if (!dm_task_suppress_identical_reload(dmt))
2349 log_error("Failed to suppress reload of identical tables.");
2350
2351 if ((r = dm_task_run(dmt))) {
165e4a11 2352 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2353 if (r && !dnode->info.inactive_table)
2354 log_verbose("Suppressed %s identical table reload.",
2355 dnode->name);
bb875bb9 2356
df390f17 2357 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2358 if ((dnode->props.size_changed =
df390f17 2359 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2360 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2361 PRIu64 " for %s", existing_table_size,
bb875bb9 2362 seg_start, dnode->name);
df390f17
AK
2363 /*
2364 * Kernel usually skips size validation on zero-length devices
2365 * now so no need to preload them.
2366 */
2367 /* FIXME In which kernel version did this begin? */
2368 if (!existing_table_size && dnode->props.delay_resume_if_new)
2369 dnode->props.size_changed = 0;
2370 }
ec289b64 2371 }
165e4a11
AK
2372
2373 dnode->props.segment_count = 0;
2374
2375out:
2376 dm_task_destroy(dmt);
2377
2378 return r;
165e4a11
AK
2379}
2380
b4f1578f 2381int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2382 const char *uuid_prefix,
2383 size_t uuid_prefix_len)
165e4a11 2384{
2ca6b865 2385 int r = 1;
165e4a11 2386 void *handle = NULL;
b4f1578f 2387 struct dm_tree_node *child;
165e4a11 2388 struct dm_info newinfo;
566515c0 2389 int update_devs_flag = 0;
165e4a11
AK
2390
2391 /* Preload children first */
b4f1578f 2392 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2393 /* Skip existing non-device-mapper devices */
2394 if (!child->info.exists && child->info.major)
2395 continue;
2396
2397 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2398 if (child->info.exists &&
2399 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2400 continue;
2401
b4f1578f 2402 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2403 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2404 return_0;
165e4a11 2405
165e4a11 2406 /* FIXME Cope if name exists with no uuid? */
3d6782b3
ZK
2407 if (!child->info.exists && !_create_node(child))
2408 return_0;
165e4a11 2409
3d6782b3
ZK
2410 if (!child->info.inactive_table &&
2411 child->props.segment_count &&
2412 !_load_node(child))
2413 return_0;
165e4a11 2414
eb91c4ee
MB
2415 /* Propagate device size change change */
2416 if (child->props.size_changed)
2417 dnode->props.size_changed = 1;
2418
bb875bb9 2419 /* Resume device immediately if it has parents and its size changed */
3776c494 2420 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2421 continue;
2422
7707ea90
AK
2423 if (!child->info.inactive_table && !child->info.suspended)
2424 continue;
2425
fc795d87 2426 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2427 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2428 &newinfo, &child->dtree->cookie, child->udev_flags,
2429 child->info.suspended)) {
165e4a11 2430 log_error("Unable to resume %s (%" PRIu32
fc795d87 2431 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2432 child->info.minor);
2ca6b865 2433 r = 0;
165e4a11
AK
2434 continue;
2435 }
2436
2437 /* Update cached info */
2438 child->info = newinfo;
566515c0
PR
2439 /*
2440 * Prepare for immediate synchronization with udev and flush all stacked
2441 * dev node operations if requested by immediate_dev_node property. But
2442 * finish processing current level in the tree first.
2443 */
2444 if (child->props.immediate_dev_node)
2445 update_devs_flag = 1;
165e4a11
AK
2446 }
2447
566515c0
PR
2448 if (update_devs_flag) {
2449 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2450 stack;
2451 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2452 }
2453
2ca6b865 2454 return r;
165e4a11
AK
2455}
2456
165e4a11
AK
2457/*
2458 * Returns 1 if unsure.
2459 */
b4f1578f 2460int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2461 const char *uuid_prefix,
2462 size_t uuid_prefix_len)
2463{
2464 void *handle = NULL;
b4f1578f 2465 struct dm_tree_node *child = dnode;
165e4a11
AK
2466 const char *uuid;
2467
b4f1578f
AK
2468 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2469 if (!(uuid = dm_tree_node_get_uuid(child))) {
2470 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2471 return 1;
2472 }
2473
87f98002 2474 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2475 return 1;
2476
b4f1578f
AK
2477 if (dm_tree_node_num_children(child, 0))
2478 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2479 }
2480
2481 return 0;
2482}
2483
2484/*
2485 * Target functions
2486 */
b4f1578f 2487static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2488{
2489 struct load_segment *seg;
2490
b4f1578f
AK
2491 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2492 log_error("dtree node segment allocation failed");
165e4a11
AK
2493 return NULL;
2494 }
2495
2496 seg->type = type;
2497 seg->size = size;
2498 seg->area_count = 0;
2c44337b 2499 dm_list_init(&seg->areas);
165e4a11
AK
2500 seg->stripe_size = 0;
2501 seg->persistent = 0;
2502 seg->chunk_size = 0;
2503 seg->cow = NULL;
2504 seg->origin = NULL;
aa6f4e51 2505 seg->merge = NULL;
165e4a11 2506
2c44337b 2507 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2508 dnode->props.segment_count++;
2509
2510 return seg;
2511}
2512
b4f1578f 2513int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2514 uint64_t size,
2515 const char *origin_uuid)
165e4a11
AK
2516{
2517 struct load_segment *seg;
b4f1578f 2518 struct dm_tree_node *origin_node;
165e4a11 2519
b4f1578f
AK
2520 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2521 return_0;
165e4a11 2522
b4f1578f 2523 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2524 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2525 return 0;
2526 }
2527
2528 seg->origin = origin_node;
b4f1578f
AK
2529 if (!_link_tree_nodes(dnode, origin_node))
2530 return_0;
165e4a11 2531
56c28292
AK
2532 /* Resume snapshot origins after new snapshots */
2533 dnode->activation_priority = 1;
2534
165e4a11
AK
2535 return 1;
2536}
2537
aa6f4e51
MS
2538static int _add_snapshot_target(struct dm_tree_node *node,
2539 uint64_t size,
2540 const char *origin_uuid,
2541 const char *cow_uuid,
2542 const char *merge_uuid,
2543 int persistent,
2544 uint32_t chunk_size)
165e4a11
AK
2545{
2546 struct load_segment *seg;
aa6f4e51
MS
2547 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2548 unsigned seg_type;
2549
2550 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2551
aa6f4e51 2552 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2553 return_0;
165e4a11 2554
b4f1578f 2555 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2556 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2557 return 0;
2558 }
2559
2560 seg->origin = origin_node;
b4f1578f
AK
2561 if (!_link_tree_nodes(node, origin_node))
2562 return_0;
165e4a11 2563
b4f1578f 2564 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2565 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2566 return 0;
2567 }
2568
2569 seg->cow = cow_node;
b4f1578f
AK
2570 if (!_link_tree_nodes(node, cow_node))
2571 return_0;
165e4a11
AK
2572
2573 seg->persistent = persistent ? 1 : 0;
2574 seg->chunk_size = chunk_size;
2575
aa6f4e51
MS
2576 if (merge_uuid) {
2577 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2578 /* not a pure error, merging snapshot may have been deactivated */
2579 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2580 } else {
2581 seg->merge = merge_node;
2582 /* must not link merging snapshot, would undermine activation_priority below */
2583 }
2584
2585 /* Resume snapshot-merge (acting origin) after other snapshots */
2586 node->activation_priority = 1;
2587 if (seg->merge) {
2588 /* Resume merging snapshot after snapshot-merge */
2589 seg->merge->activation_priority = 2;
2590 }
2591 }
2592
165e4a11
AK
2593 return 1;
2594}
2595
aa6f4e51
MS
2596
2597int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2598 uint64_t size,
2599 const char *origin_uuid,
2600 const char *cow_uuid,
2601 int persistent,
2602 uint32_t chunk_size)
2603{
2604 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2605 NULL, persistent, chunk_size);
2606}
2607
2608int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2609 uint64_t size,
2610 const char *origin_uuid,
2611 const char *cow_uuid,
2612 const char *merge_uuid,
2613 uint32_t chunk_size)
2614{
2615 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2616 merge_uuid, 1, chunk_size);
2617}
2618
b4f1578f 2619int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2620 uint64_t size)
165e4a11 2621{
b4f1578f
AK
2622 if (!_add_segment(node, SEG_ERROR, size))
2623 return_0;
165e4a11
AK
2624
2625 return 1;
2626}
2627
b4f1578f 2628int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2629 uint64_t size)
165e4a11 2630{
b4f1578f
AK
2631 if (!_add_segment(node, SEG_ZERO, size))
2632 return_0;
165e4a11
AK
2633
2634 return 1;
2635}
2636
b4f1578f 2637int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2638 uint64_t size)
165e4a11 2639{
b4f1578f
AK
2640 if (!_add_segment(node, SEG_LINEAR, size))
2641 return_0;
165e4a11
AK
2642
2643 return 1;
2644}
2645
b4f1578f 2646int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2647 uint64_t size,
2648 uint32_t stripe_size)
165e4a11
AK
2649{
2650 struct load_segment *seg;
2651
b4f1578f
AK
2652 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2653 return_0;
165e4a11
AK
2654
2655 seg->stripe_size = stripe_size;
2656
2657 return 1;
2658}
2659
12ca060e
MB
2660int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2661 uint64_t size,
2662 const char *cipher,
2663 const char *chainmode,
2664 const char *iv,
2665 uint64_t iv_offset,
2666 const char *key)
2667{
2668 struct load_segment *seg;
2669
2670 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2671 return_0;
2672
2673 seg->cipher = cipher;
2674 seg->chainmode = chainmode;
2675 seg->iv = iv;
2676 seg->iv_offset = iv_offset;
2677 seg->key = key;
2678
2679 return 1;
2680}
2681
b4f1578f 2682int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2683 uint32_t region_size,
08e64ce5 2684 unsigned clustered,
165e4a11 2685 const char *log_uuid,
ce7ed2c0
AK
2686 unsigned area_count,
2687 uint32_t flags)
165e4a11 2688{
908db078 2689 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2690 struct load_segment *seg;
2691
2692 if (!node->props.segment_count) {
b8175c33 2693 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2694 return 0;
2695 }
2696
2c44337b 2697 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2698
24b026e3 2699 if (log_uuid) {
67b25ed4
AK
2700 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2701 log_error("log uuid pool_strdup failed");
2702 return 0;
2703 }
df390f17
AK
2704 if ((flags & DM_CORELOG))
2705 /* For pvmove: immediate resume (for size validation) isn't needed. */
2706 node->props.delay_resume_if_new = 1;
2707 else {
9723090c
AK
2708 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2709 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2710 return 0;
2711 }
2712
566515c0
PR
2713 if (clustered)
2714 log_node->props.immediate_dev_node = 1;
2715
0a99713e
AK
2716 /* The kernel validates the size of disk logs. */
2717 /* FIXME Propagate to any devices below */
2718 log_node->props.delay_resume_if_new = 0;
2719
9723090c
AK
2720 if (!_link_tree_nodes(node, log_node))
2721 return_0;
2722 }
165e4a11
AK
2723 }
2724
2725 seg->log = log_node;
165e4a11
AK
2726 seg->region_size = region_size;
2727 seg->clustered = clustered;
2728 seg->mirror_area_count = area_count;
dbcb64b8 2729 seg->flags = flags;
165e4a11
AK
2730
2731 return 1;
2732}
2733
b4f1578f 2734int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2735 uint64_t size)
165e4a11 2736{
cbecd3cd 2737 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2738 return_0;
165e4a11
AK
2739
2740 return 1;
2741}
2742
cac52ca4
JEB
2743int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2744 uint64_t size,
2745 const char *raid_type,
2746 uint32_t region_size,
2747 uint32_t stripe_size,
f439e65b 2748 uint64_t rebuilds,
cac52ca4
JEB
2749 uint64_t reserved2)
2750{
2751 int i;
2752 struct load_segment *seg = NULL;
2753
2754 for (i = 0; dm_segtypes[i].target && !seg; i++)
2755 if (!strcmp(raid_type, dm_segtypes[i].target))
2756 if (!(seg = _add_segment(node,
2757 dm_segtypes[i].type, size)))
2758 return_0;
2759
b2fa9b43
JEB
2760 if (!seg)
2761 return_0;
2762
cac52ca4
JEB
2763 seg->region_size = region_size;
2764 seg->stripe_size = stripe_size;
2765 seg->area_count = 0;
f439e65b 2766 seg->rebuilds = rebuilds;
cac52ca4
JEB
2767
2768 return 1;
2769}
2770
b262f3e1
ZK
2771int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2772 uint64_t size,
2773 const char *rlog_uuid,
2774 const char *rlog_type,
2775 unsigned rsite_index,
2776 dm_replicator_mode_t mode,
2777 uint32_t async_timeout,
2778 uint64_t fall_behind_data,
2779 uint32_t fall_behind_ios)
2780{
2781 struct load_segment *rseg;
2782 struct replicator_site *rsite;
2783
2784 /* Local site0 - adds replicator segment and links rlog device */
2785 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2786 if (node->props.segment_count) {
2787 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2788 return 0;
2789 }
2790
2791 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2792 return_0;
2793
2794 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2795 log_error("Missing replicator log uuid %s.", rlog_uuid);
2796 return 0;
2797 }
2798
2799 if (!_link_tree_nodes(node, rseg->log))
2800 return_0;
2801
2802 if (strcmp(rlog_type, "ringbuffer") != 0) {
2803 log_error("Unsupported replicator log type %s.", rlog_type);
2804 return 0;
2805 }
2806
2807 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2808 return_0;
2809
2810 dm_list_init(&rseg->rsites);
2811 rseg->rdevice_count = 0;
2812 node->activation_priority = 1;
2813 }
2814
2815 /* Add site to segment */
2816 if (mode == DM_REPLICATOR_SYNC
2817 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2818 log_error("Async parameters passed for synchronnous replicator.");
2819 return 0;
2820 }
2821
2822 if (node->props.segment_count != 1) {
2823 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2824 return 0;
2825 }
2826
2827 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2828 if (rseg->type != SEG_REPLICATOR) {
2829 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2830 dm_segtypes[rseg->type].target);
2831 return 0;
2832 }
2833
2834 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2835 log_error("Failed to allocate remote site segment.");
2836 return 0;
2837 }
2838
2839 dm_list_add(&rseg->rsites, &rsite->list);
2840 rseg->rsite_count++;
2841
2842 rsite->mode = mode;
2843 rsite->async_timeout = async_timeout;
2844 rsite->fall_behind_data = fall_behind_data;
2845 rsite->fall_behind_ios = fall_behind_ios;
2846 rsite->rsite_index = rsite_index;
2847
2848 return 1;
2849}
2850
2851/* Appends device node to Replicator */
2852int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2853 uint64_t size,
2854 const char *replicator_uuid,
2855 uint64_t rdevice_index,
2856 const char *rdev_uuid,
2857 unsigned rsite_index,
2858 const char *slog_uuid,
2859 uint32_t slog_flags,
2860 uint32_t slog_region_size)
2861{
2862 struct seg_area *area;
2863 struct load_segment *rseg;
2864 struct load_segment *rep_seg;
2865
2866 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2867 /* Site index for local target */
2868 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2869 return_0;
2870
2871 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2872 log_error("Missing replicator uuid %s.", replicator_uuid);
2873 return 0;
2874 }
2875
2876 /* Local slink0 for replicator must be always initialized first */
2877 if (rseg->replicator->props.segment_count != 1) {
2878 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2879 return 0;
2880 }
2881
2882 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2883 if (rep_seg->type != SEG_REPLICATOR) {
2884 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2885 dm_segtypes[rep_seg->type].target);
2886 return 0;
2887 }
2888 rep_seg->rdevice_count++;
2889
2890 if (!_link_tree_nodes(node, rseg->replicator))
2891 return_0;
2892
2893 rseg->rdevice_index = rdevice_index;
2894 } else {
2895 /* Local slink0 for replicator must be always initialized first */
2896 if (node->props.segment_count != 1) {
2897 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2898 return 0;
2899 }
2900
2901 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2902 if (rseg->type != SEG_REPLICATOR_DEV) {
2903 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2904 dm_segtypes[rseg->type].target);
2905 return 0;
2906 }
2907 }
2908
2909 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2910 log_error("Unspecified sync log uuid.");
2911 return 0;
2912 }
2913
2914 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2915 return_0;
2916
2917 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2918
2919 if (!(slog_flags & DM_CORELOG)) {
2920 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2921 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2922 return 0;
2923 }
2924
2925 if (!_link_tree_nodes(node, area->slog))
2926 return_0;
2927 }
2928
2929 area->flags = slog_flags;
2930 area->region_size = slog_region_size;
2931 area->rsite_index = rsite_index;
2932
2933 return 1;
2934}
2935
5668fe04
ZK
2936static int _thin_validate_device_id(uint32_t device_id)
2937{
2938 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2939 log_error("Device id %u is higher then %u.",
2940 device_id, DM_THIN_MAX_DEVICE_ID);
2941 return 0;
2942 }
2943
2944 return 1;
2945}
2946
4251236e
ZK
2947int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2948 uint64_t size,
e0ea24be 2949 uint64_t transaction_id,
4251236e 2950 const char *metadata_uuid,
5668fd6a 2951 const char *pool_uuid,
4251236e 2952 uint32_t data_block_size,
e9156c2b 2953 uint64_t low_water_mark,
460c5991 2954 unsigned skip_block_zeroing)
4251236e
ZK
2955{
2956 struct load_segment *seg;
2957
3f53c059 2958 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2959 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2960 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2961 return 0;
2962 }
2963
3f53c059 2964 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2965 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2966 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2967 return 0;
2968 }
2969
2970 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2971 return_0;
2972
2973 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2974 log_error("Missing metadata uuid %s.", metadata_uuid);
2975 return 0;
2976 }
2977
2978 if (!_link_tree_nodes(node, seg->metadata))
2979 return_0;
2980
2981 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2982 log_error("Missing pool uuid %s.", pool_uuid);
2983 return 0;
2984 }
2985
2986 if (!_link_tree_nodes(node, seg->pool))
2987 return_0;
2988
bbcd37e4
ZK
2989 node->props.send_messages = 1;
2990 seg->transaction_id = transaction_id;
e9156c2b 2991 seg->low_water_mark = low_water_mark;
e0ea24be 2992 seg->data_block_size = data_block_size;
460c5991 2993 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
2994 dm_list_init(&seg->thin_messages);
2995
2996 return 1;
2997}
2998
2999int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
2e732e96
ZK
3000 dm_thin_message_t type,
3001 uint64_t id1, uint64_t id2)
25e6ab87
ZK
3002{
3003 struct load_segment *seg;
3004 struct thin_message *tm;
3005
3006 if (node->props.segment_count != 1) {
759b9592 3007 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
3008 return 0;
3009 }
3010
3011 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 3012 if (seg->type != SEG_THIN_POOL) {
759b9592 3013 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
3014 dm_segtypes[seg->type].target);
3015 return 0;
3016 }
3017
3018 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
3019 log_error("Failed to allocate thin message.");
3020 return 0;
3021 }
3022
2e732e96 3023 switch (type) {
25e6ab87 3024 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 3025 /* If the thin origin is active, it must be suspend first! */
2e732e96 3026 if (id1 == id2) {
759b9592 3027 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
3028 return 0;
3029 }
2e732e96
ZK
3030 if (!_thin_validate_device_id(id1) ||
3031 !_thin_validate_device_id(id2))
25e6ab87 3032 return_0;
2e732e96
ZK
3033 tm->message.u.m_create_snap.device_id = id1;
3034 tm->message.u.m_create_snap.origin_id = id2;
25e6ab87
ZK
3035 break;
3036 case DM_THIN_MESSAGE_CREATE_THIN:
2e732e96 3037 if (!_thin_validate_device_id(id1))
25e6ab87 3038 return_0;
2e732e96 3039 tm->message.u.m_create_thin.device_id = id1;
660a42bc 3040 tm->expected_errno = EEXIST;
25e6ab87
ZK
3041 break;
3042 case DM_THIN_MESSAGE_DELETE:
2e732e96 3043 if (!_thin_validate_device_id(id1))
25e6ab87 3044 return_0;
2e732e96 3045 tm->message.u.m_delete.device_id = id1;
660a42bc 3046 tm->expected_errno = ENODATA;
25e6ab87
ZK
3047 break;
3048 case DM_THIN_MESSAGE_TRIM:
2e732e96 3049 if (!_thin_validate_device_id(id1))
25e6ab87 3050 return_0;
2e732e96
ZK
3051 tm->message.u.m_trim.device_id = id1;
3052 tm->message.u.m_trim.new_size = id2;
25e6ab87
ZK
3053 break;
3054 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
19e3f8c3 3055 if ((id1 + 1) != id2) {
2e732e96
ZK
3056 log_error("New transaction id must be sequential.");
3057 return 0; /* FIXME: Maybe too strict here? */
3058 }
19e3f8c3 3059 if (id2 != seg->transaction_id) {
2e732e96 3060 log_error("Current transaction id is different from thin pool.");
25e6ab87
ZK
3061 return 0; /* FIXME: Maybe too strict here? */
3062 }
2e732e96
ZK
3063 tm->message.u.m_set_transaction_id.current_id = id1;
3064 tm->message.u.m_set_transaction_id.new_id = id2;
25e6ab87
ZK
3065 break;
3066 default:
2e732e96 3067 log_error("Unsupported message type %d.", (int) type);
25e6ab87
ZK
3068 return 0;
3069 }
3070
2e732e96 3071 tm->message.type = type;
25e6ab87 3072 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
3073
3074 return 1;
3075}
3076
3077int dm_tree_node_add_thin_target(struct dm_tree_node *node,
3078 uint64_t size,
4d25c81b 3079 const char *pool_uuid,
4251236e
ZK
3080 uint32_t device_id)
3081{
4d25c81b 3082 struct dm_tree_node *pool;
4251236e
ZK
3083 struct load_segment *seg;
3084
4d25c81b
ZK
3085 if (!(pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
3086 log_error("Missing thin pool uuid %s.", pool_uuid);
4251236e
ZK
3087 return 0;
3088 }
3089
4d25c81b 3090 if (!_link_tree_nodes(node, pool))
4251236e
ZK
3091 return_0;
3092
6744c143
ZK
3093 if (!_thin_validate_device_id(device_id))
3094 return_0;
4d25c81b 3095
6744c143
ZK
3096 if (!(seg = _add_segment(node, SEG_THIN, size)))
3097 return_0;
4d25c81b 3098
6744c143
ZK
3099 seg->pool = pool;
3100 seg->device_id = device_id;
1419bf1c 3101
4251236e
ZK
3102 return 1;
3103}
3104
077c4d1a
ZK
3105
3106int dm_get_status_thin_pool(struct dm_pool *mem, const char *params,
3107 struct dm_status_thin_pool **status)
3108{
3109 struct dm_status_thin_pool *s;
3110
3111 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin_pool)))) {
3112 log_error("Failed to allocate thin_pool status structure.");
3113 return 0;
3114 }
3115
5fd459f0 3116 /* FIXME: add support for held metadata root */
077c4d1a
ZK
3117 if (sscanf(params, "%" PRIu64 " %" PRIu64 "/%" PRIu64 " %" PRIu64 "/%" PRIu64,
3118 &s->transaction_id,
5fd459f0
ZK
3119 &s->used_metadata_blocks,
3120 &s->total_metadata_blocks,
077c4d1a
ZK
3121 &s->used_data_blocks,
3122 &s->total_data_blocks) != 5) {
3123 log_error("Failed to parse thin pool params: %s.", params);
3124 return 0;
3125 }
3126
3127 *status = s;
3128
3129 return 1;
3130}
3131
3132int dm_get_status_thin(struct dm_pool *mem, const char *params,
3133 struct dm_status_thin **status)
3134{
3135 struct dm_status_thin *s;
3136
3137 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin)))) {
3138 log_error("Failed to allocate thin status structure.");
3139 return 0;
3140 }
3141
9568f1b5
ZK
3142 if (strchr(params, '-')) {
3143 s->mapped_sectors = 0;
3144 s->highest_mapped_sector = 0;
3145 } else if (sscanf(params, "%" PRIu64 " %" PRIu64,
077c4d1a
ZK
3146 &s->mapped_sectors,
3147 &s->highest_mapped_sector) != 2) {
3148 log_error("Failed to parse thin params: %s.", params);
3149 return 0;
3150 }
3151
3152 *status = s;
3153
3154 return 1;
3155}
3156
b4f1578f 3157static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
3158{
3159 struct seg_area *area;
3160
b4f1578f 3161 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
3162 log_error("Failed to allocate target segment area.");
3163 return 0;
3164 }
3165
3166 area->dev_node = dev_node;
3167 area->offset = offset;
3168
2c44337b 3169 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
3170 seg->area_count++;
3171
3172 return 1;
3173}
3174
b4f1578f 3175int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
3176 const char *dev_name,
3177 const char *uuid,
3178 uint64_t offset)
165e4a11
AK
3179{
3180 struct load_segment *seg;
3181 struct stat info;
b4f1578f 3182 struct dm_tree_node *dev_node;
165e4a11
AK
3183
3184 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3185 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3186 return 0;
3187 }
3188
3189 if (uuid) {
b4f1578f 3190 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3191 log_error("Couldn't find area uuid %s.", uuid);
3192 return 0;
3193 }
b4f1578f
AK
3194 if (!_link_tree_nodes(node, dev_node))
3195 return_0;
165e4a11 3196 } else {
6d04311e 3197 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3198 log_error("Device %s not found.", dev_name);
3199 return 0;
3200 }
3201
40e5fd8b 3202 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3203 log_error("Device %s is not a block device.", dev_name);
3204 return 0;
3205 }
3206
3207 /* FIXME Check correct macro use */
cda69e17
PR
3208 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3209 MINOR(info.st_rdev), 0)))
b4f1578f 3210 return_0;
165e4a11
AK
3211 }
3212
3213 if (!node->props.segment_count) {
b8175c33 3214 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3215 return 0;
3216 }
3217
2c44337b 3218 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3219
b4f1578f
AK
3220 if (!_add_area(node, seg, dev_node, offset))
3221 return_0;
165e4a11
AK
3222
3223 return 1;
db208f51 3224}
bd90c6b2 3225
6d04311e
JEB
3226int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3227{
3228 struct load_segment *seg;
3229
3230 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3231
415c0690
AK
3232 switch (seg->type) {
3233 case SEG_RAID1:
3234 case SEG_RAID4:
3235 case SEG_RAID5_LA:
3236 case SEG_RAID5_RA:
3237 case SEG_RAID5_LS:
3238 case SEG_RAID5_RS:
3239 case SEG_RAID6_ZR:
3240 case SEG_RAID6_NR:
3241 case SEG_RAID6_NC:
3242 break;
3243 default:
3244 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3245 return 0;
3246 }
3247
6d04311e
JEB
3248 if (!_add_area(node, seg, NULL, offset))
3249 return_0;
3250
3251 return 1;
3252}
This page took 0.493654 seconds and 5 git commands to generate.