]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
Thin rename local static
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
b262f3e1
ZK
27#define REPLICATOR_LOCAL_SITE 0
28
165e4a11
AK
29/* Supported segment types */
30enum {
12ca060e
MB
31 SEG_CRYPT,
32 SEG_ERROR,
165e4a11
AK
33 SEG_LINEAR,
34 SEG_MIRRORED,
b262f3e1
ZK
35 SEG_REPLICATOR,
36 SEG_REPLICATOR_DEV,
165e4a11
AK
37 SEG_SNAPSHOT,
38 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 39 SEG_SNAPSHOT_MERGE,
165e4a11
AK
40 SEG_STRIPED,
41 SEG_ZERO,
4251236e
ZK
42 SEG_THIN_POOL,
43 SEG_THIN,
cac52ca4
JEB
44 SEG_RAID1,
45 SEG_RAID4,
46 SEG_RAID5_LA,
47 SEG_RAID5_RA,
48 SEG_RAID5_LS,
49 SEG_RAID5_RS,
50 SEG_RAID6_ZR,
51 SEG_RAID6_NR,
52 SEG_RAID6_NC,
53 SEG_LAST,
165e4a11 54};
b4f1578f 55
165e4a11
AK
56/* FIXME Add crypt and multipath support */
57
58struct {
59 unsigned type;
60 const char *target;
61} dm_segtypes[] = {
12ca060e 62 { SEG_CRYPT, "crypt" },
165e4a11
AK
63 { SEG_ERROR, "error" },
64 { SEG_LINEAR, "linear" },
65 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
66 { SEG_REPLICATOR, "replicator" },
67 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
68 { SEG_SNAPSHOT, "snapshot" },
69 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 70 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
71 { SEG_STRIPED, "striped" },
72 { SEG_ZERO, "zero"},
4251236e
ZK
73 { SEG_THIN_POOL, "thin-pool"},
74 { SEG_THIN, "thin"},
cac52ca4
JEB
75 { SEG_RAID1, "raid1"},
76 { SEG_RAID4, "raid4"},
77 { SEG_RAID5_LA, "raid5_la"},
78 { SEG_RAID5_RA, "raid5_ra"},
79 { SEG_RAID5_LS, "raid5_ls"},
80 { SEG_RAID5_RS, "raid5_rs"},
81 { SEG_RAID6_ZR, "raid6_zr"},
82 { SEG_RAID6_NR, "raid6_nr"},
83 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
84
85 /*
86 *WARNING: Since 'raid' target overloads this 1:1 mapping table
87 * for search do not add new enum elements past them!
88 */
cac52ca4
JEB
89 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
90 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
91 { SEG_LAST, NULL },
165e4a11
AK
92};
93
94/* Some segment types have a list of areas of other devices attached */
95struct seg_area {
2c44337b 96 struct dm_list list;
165e4a11 97
b4f1578f 98 struct dm_tree_node *dev_node;
165e4a11
AK
99
100 uint64_t offset;
b262f3e1
ZK
101
102 unsigned rsite_index; /* Replicator site index */
103 struct dm_tree_node *slog; /* Replicator sync log node */
104 uint64_t region_size; /* Replicator sync log size */
105 uint32_t flags; /* Replicator sync log flags */
106};
107
2e732e96
ZK
108struct dm_thin_message {
109 dm_thin_message_t type;
110 union {
111 struct {
112 uint32_t device_id;
113 uint32_t origin_id;
114 } m_create_snap;
115 struct {
116 uint32_t device_id;
117 } m_create_thin;
118 struct {
119 uint32_t device_id;
120 } m_delete;
121 struct {
122 uint64_t current_id;
123 uint64_t new_id;
124 } m_set_transaction_id;
125 struct {
126 uint32_t device_id;
127 uint64_t new_size;
128 } m_trim;
129 } u;
130};
131
25e6ab87
ZK
132struct thin_message {
133 struct dm_list list;
134 struct dm_thin_message message;
660a42bc 135 int expected_errno;
25e6ab87
ZK
136};
137
b262f3e1
ZK
138/* Replicator-log has a list of sites */
139/* FIXME: maybe move to seg_area too? */
140struct replicator_site {
141 struct dm_list list;
142
143 unsigned rsite_index;
144 dm_replicator_mode_t mode;
145 uint32_t async_timeout;
146 uint32_t fall_behind_ios;
147 uint64_t fall_behind_data;
165e4a11
AK
148};
149
150/* Per-segment properties */
151struct load_segment {
2c44337b 152 struct dm_list list;
165e4a11
AK
153
154 unsigned type;
155
156 uint64_t size;
157
b262f3e1
ZK
158 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
159 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 160
cac52ca4 161 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
162
163 int persistent; /* Snapshot */
164 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
165 struct dm_tree_node *cow; /* Snapshot */
166 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 167 struct dm_tree_node *merge; /* Snapshot */
165e4a11 168
b262f3e1 169 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 170 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
171 unsigned clustered; /* Mirror */
172 unsigned mirror_area_count; /* Mirror */
dbcb64b8 173 uint32_t flags; /* Mirror log */
67b25ed4 174 char *uuid; /* Clustered mirror log */
12ca060e
MB
175
176 const char *cipher; /* Crypt */
177 const char *chainmode; /* Crypt */
178 const char *iv; /* Crypt */
179 uint64_t iv_offset; /* Crypt */
180 const char *key; /* Crypt */
b262f3e1
ZK
181
182 const char *rlog_type; /* Replicator */
183 struct dm_list rsites; /* Replicator */
184 unsigned rsite_count; /* Replicator */
185 unsigned rdevice_count; /* Replicator */
186 struct dm_tree_node *replicator;/* Replicator-dev */
187 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 188
40e5fd8b 189 uint64_t rebuilds; /* raid */
4251236e
ZK
190
191 struct dm_tree_node *metadata; /* Thin_pool */
192 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 193 struct dm_list thin_messages; /* Thin_pool */
bbcd37e4 194 uint64_t transaction_id; /* Thin_pool */
e9156c2b 195 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 196 uint32_t data_block_size; /* Thin_pool */
460c5991 197 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
198 uint32_t device_id; /* Thin */
199
165e4a11
AK
200};
201
202/* Per-device properties */
203struct load_properties {
204 int read_only;
205 uint32_t major;
206 uint32_t minor;
207
52b84409
AK
208 uint32_t read_ahead;
209 uint32_t read_ahead_flags;
210
165e4a11 211 unsigned segment_count;
bb875bb9 212 unsigned size_changed;
2c44337b 213 struct dm_list segs;
165e4a11
AK
214
215 const char *new_name;
566515c0
PR
216
217 /* If immediate_dev_node is set to 1, try to create the dev node
218 * as soon as possible (e.g. in preload stage even during traversal
219 * and processing of dm tree). This will also flush all stacked dev
220 * node operations, synchronizing with udev.
221 */
df390f17
AK
222 unsigned immediate_dev_node;
223
224 /*
225 * If the device size changed from zero and this is set,
226 * don't resume the device immediately, even if the device
227 * has parents. This works provided the parents do not
228 * validate the device size and is required by pvmove to
229 * avoid starting the mirror resync operation too early.
230 */
231 unsigned delay_resume_if_new;
bbcd37e4
ZK
232
233 /* Send messages for this node in preload */
234 unsigned send_messages;
165e4a11
AK
235};
236
237/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 238struct dm_tree_link {
2c44337b 239 struct dm_list list;
b4f1578f 240 struct dm_tree_node *node;
165e4a11
AK
241};
242
b4f1578f
AK
243struct dm_tree_node {
244 struct dm_tree *dtree;
3d0480ed 245
40e5fd8b
AK
246 const char *name;
247 const char *uuid;
248 struct dm_info info;
3d0480ed 249
40e5fd8b
AK
250 struct dm_list uses; /* Nodes this node uses */
251 struct dm_list used_by; /* Nodes that use this node */
165e4a11 252
56c28292
AK
253 int activation_priority; /* 0 gets activated first */
254
f16aea9e
PR
255 uint16_t udev_flags; /* Udev control flags */
256
165e4a11
AK
257 void *context; /* External supplied context */
258
259 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
260
261 /*
262 * If presuspend of child node is needed
263 * Note: only direct child is allowed
264 */
265 struct dm_tree_node *presuspend_node;
3d0480ed
AK
266};
267
b4f1578f 268struct dm_tree {
a3f6b2ce
AK
269 struct dm_pool *mem;
270 struct dm_hash_table *devs;
165e4a11 271 struct dm_hash_table *uuids;
b4f1578f 272 struct dm_tree_node root;
c55b1410 273 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
274 int no_flush; /* 1 sets noflush (mirrors/multipath) */
275 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 276 uint32_t cookie;
3d0480ed
AK
277};
278
b4f1578f 279struct dm_tree *dm_tree_create(void)
3d0480ed 280{
0395dd22 281 struct dm_pool *dmem;
b4f1578f 282 struct dm_tree *dtree;
3d0480ed 283
0395dd22
ZK
284 if (!(dmem = dm_pool_create("dtree", 1024)) ||
285 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
286 log_error("Failed to allocate dtree.");
287 if (dmem)
288 dm_pool_destroy(dmem);
3d0480ed
AK
289 return NULL;
290 }
291
b4f1578f 292 dtree->root.dtree = dtree;
2c44337b
AK
293 dm_list_init(&dtree->root.uses);
294 dm_list_init(&dtree->root.used_by);
c55b1410 295 dtree->skip_lockfs = 0;
b9ffd32c 296 dtree->no_flush = 0;
0395dd22 297 dtree->mem = dmem;
3d0480ed 298
b4f1578f
AK
299 if (!(dtree->devs = dm_hash_create(8))) {
300 log_error("dtree hash creation failed");
301 dm_pool_destroy(dtree->mem);
3d0480ed
AK
302 return NULL;
303 }
304
b4f1578f
AK
305 if (!(dtree->uuids = dm_hash_create(32))) {
306 log_error("dtree uuid hash creation failed");
307 dm_hash_destroy(dtree->devs);
308 dm_pool_destroy(dtree->mem);
165e4a11
AK
309 return NULL;
310 }
311
b4f1578f 312 return dtree;
3d0480ed
AK
313}
314
b4f1578f 315void dm_tree_free(struct dm_tree *dtree)
3d0480ed 316{
b4f1578f 317 if (!dtree)
3d0480ed
AK
318 return;
319
b4f1578f
AK
320 dm_hash_destroy(dtree->uuids);
321 dm_hash_destroy(dtree->devs);
322 dm_pool_destroy(dtree->mem);
3d0480ed
AK
323}
324
04bde319
ZK
325static int _nodes_are_linked(const struct dm_tree_node *parent,
326 const struct dm_tree_node *child)
3d0480ed 327{
b4f1578f 328 struct dm_tree_link *dlink;
3d0480ed 329
2c44337b 330 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
331 if (dlink->node == child)
332 return 1;
3d0480ed
AK
333
334 return 0;
335}
336
2c44337b 337static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 338{
b4f1578f 339 struct dm_tree_link *dlink;
3d0480ed 340
b4f1578f
AK
341 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
342 log_error("dtree link allocation failed");
3d0480ed
AK
343 return 0;
344 }
345
346 dlink->node = node;
2c44337b 347 dm_list_add(list, &dlink->list);
3d0480ed
AK
348
349 return 1;
350}
351
b4f1578f
AK
352static int _link_nodes(struct dm_tree_node *parent,
353 struct dm_tree_node *child)
3d0480ed
AK
354{
355 if (_nodes_are_linked(parent, child))
356 return 1;
357
358 if (!_link(&parent->uses, child))
359 return 0;
360
361 if (!_link(&child->used_by, parent))
362 return 0;
363
364 return 1;
365}
366
2c44337b 367static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 368{
b4f1578f 369 struct dm_tree_link *dlink;
3d0480ed 370
2c44337b 371 dm_list_iterate_items(dlink, list)
3d0480ed 372 if (dlink->node == node) {
2c44337b 373 dm_list_del(&dlink->list);
3d0480ed
AK
374 break;
375 }
3d0480ed
AK
376}
377
b4f1578f
AK
378static void _unlink_nodes(struct dm_tree_node *parent,
379 struct dm_tree_node *child)
3d0480ed
AK
380{
381 if (!_nodes_are_linked(parent, child))
382 return;
383
384 _unlink(&parent->uses, child);
385 _unlink(&child->used_by, parent);
386}
387
b4f1578f 388static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 389{
b4f1578f 390 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
391}
392
b4f1578f 393static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 394{
b1ebf028 395 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
396}
397
b4f1578f 398static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 399{
b4f1578f 400 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
401}
402
b4f1578f 403static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 404{
b1ebf028 405 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
406}
407
b4f1578f 408static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
409{
410 /* Don't link to root node if child already has a parent */
f77736ca 411 if (parent == &parent->dtree->root) {
b4f1578f 412 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
413 return 1;
414 } else
415 _remove_from_toplevel(child);
416
f77736ca 417 if (child == &child->dtree->root) {
b4f1578f 418 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
419 return 1;
420 } else
421 _remove_from_bottomlevel(parent);
422
423 return _link_nodes(parent, child);
424}
425
b4f1578f 426static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
427 const char *name,
428 const char *uuid,
165e4a11 429 struct dm_info *info,
f16aea9e
PR
430 void *context,
431 uint16_t udev_flags)
3d0480ed 432{
b4f1578f 433 struct dm_tree_node *node;
3d0480ed
AK
434 uint64_t dev;
435
b4f1578f
AK
436 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
437 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
438 return NULL;
439 }
440
b4f1578f 441 node->dtree = dtree;
3d0480ed
AK
442
443 node->name = name;
444 node->uuid = uuid;
445 node->info = *info;
165e4a11 446 node->context = context;
f16aea9e 447 node->udev_flags = udev_flags;
56c28292 448 node->activation_priority = 0;
3d0480ed 449
2c44337b
AK
450 dm_list_init(&node->uses);
451 dm_list_init(&node->used_by);
452 dm_list_init(&node->props.segs);
3d0480ed
AK
453
454 dev = MKDEV(info->major, info->minor);
455
b4f1578f 456 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 457 sizeof(dev), node)) {
b4f1578f
AK
458 log_error("dtree node hash insertion failed");
459 dm_pool_free(dtree->mem, node);
3d0480ed
AK
460 return NULL;
461 }
462
165e4a11 463 if (uuid && *uuid &&
b4f1578f
AK
464 !dm_hash_insert(dtree->uuids, uuid, node)) {
465 log_error("dtree uuid hash insertion failed");
466 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 467 sizeof(dev));
b4f1578f 468 dm_pool_free(dtree->mem, node);
165e4a11
AK
469 return NULL;
470 }
471
3d0480ed
AK
472 return node;
473}
474
b4f1578f 475static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
476 uint32_t major, uint32_t minor)
477{
478 uint64_t dev = MKDEV(major, minor);
479
b4f1578f 480 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
481 sizeof(dev));
482}
483
b4f1578f 484static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
485 const char *uuid)
486{
87f98002 487 struct dm_tree_node *node;
2e5ff5d1
AK
488 const char *default_uuid_prefix;
489 size_t default_uuid_prefix_len;
87f98002
AK
490
491 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
492 return node;
493
2e5ff5d1
AK
494 default_uuid_prefix = dm_uuid_prefix();
495 default_uuid_prefix_len = strlen(default_uuid_prefix);
496
497 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
87f98002
AK
498 return NULL;
499
2e5ff5d1 500 return dm_hash_lookup(dtree->uuids, uuid + default_uuid_prefix_len);
165e4a11
AK
501}
502
a3f6b2ce 503static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
2e5ff5d1 504 const char **name, const char **uuid, unsigned inactive_table,
3d0480ed
AK
505 struct dm_info *info, struct dm_deps **deps)
506{
507 memset(info, 0, sizeof(*info));
508
509 if (!dm_is_dm_major(major)) {
2e5ff5d1
AK
510 if (name)
511 *name = "";
512 if (uuid)
513 *uuid = "";
3d0480ed
AK
514 *deps = NULL;
515 info->major = major;
516 info->minor = minor;
517 info->exists = 0;
165e4a11
AK
518 info->live_table = 0;
519 info->inactive_table = 0;
520 info->read_only = 0;
3d0480ed
AK
521 return 1;
522 }
523
524 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
525 log_error("deps dm_task creation failed");
526 return 0;
527 }
528
b4f1578f
AK
529 if (!dm_task_set_major(*dmt, major)) {
530 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
531 major, minor);
3d0480ed 532 goto failed;
b4f1578f 533 }
3d0480ed 534
b4f1578f
AK
535 if (!dm_task_set_minor(*dmt, minor)) {
536 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
537 major, minor);
3d0480ed 538 goto failed;
b4f1578f 539 }
3d0480ed 540
2e5ff5d1
AK
541 if (inactive_table && !dm_task_query_inactive_table(*dmt)) {
542 log_error("_deps: failed to set inactive table for (%" PRIu32 ":%" PRIu32 ")",
543 major, minor);
544 goto failed;
545 }
546
b4f1578f
AK
547 if (!dm_task_run(*dmt)) {
548 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
549 major, minor);
3d0480ed 550 goto failed;
b4f1578f 551 }
3d0480ed 552
b4f1578f
AK
553 if (!dm_task_get_info(*dmt, info)) {
554 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
555 major, minor);
3d0480ed 556 goto failed;
b4f1578f 557 }
3d0480ed
AK
558
559 if (!info->exists) {
2e5ff5d1
AK
560 if (name)
561 *name = "";
562 if (uuid)
563 *uuid = "";
3d0480ed
AK
564 *deps = NULL;
565 } else {
566 if (info->major != major) {
b4f1578f 567 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
568 major, info->major);
569 goto failed;
570 }
571 if (info->minor != minor) {
b4f1578f 572 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
573 minor, info->minor);
574 goto failed;
575 }
2e5ff5d1 576 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
577 log_error("name pool_strdup failed");
578 goto failed;
579 }
2e5ff5d1 580 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
581 log_error("uuid pool_strdup failed");
582 goto failed;
583 }
584 *deps = dm_task_get_deps(*dmt);
585 }
586
587 return 1;
588
589failed:
590 dm_task_destroy(*dmt);
591 return 0;
592}
593
b4f1578f
AK
594static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
595 struct dm_tree_node *parent,
cda69e17
PR
596 uint32_t major, uint32_t minor,
597 uint16_t udev_flags)
3d0480ed
AK
598{
599 struct dm_task *dmt = NULL;
600 struct dm_info info;
601 struct dm_deps *deps = NULL;
602 const char *name = NULL;
603 const char *uuid = NULL;
b4f1578f 604 struct dm_tree_node *node = NULL;
3d0480ed 605 uint32_t i;
3d0480ed
AK
606 int new = 0;
607
608 /* Already in tree? */
b4f1578f 609 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
2e5ff5d1 610 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, 0, &info, &deps))
b4f1578f 611 return_NULL;
3d0480ed 612
f16aea9e 613 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
cda69e17 614 NULL, udev_flags)))
b4f1578f 615 goto_out;
3d0480ed
AK
616 new = 1;
617 }
618
165e4a11
AK
619 if (!_link_tree_nodes(parent, node)) {
620 node = NULL;
b4f1578f 621 goto_out;
165e4a11 622 }
3d0480ed
AK
623
624 /* If node was already in tree, no need to recurse. */
625 if (!new)
165e4a11 626 goto out;
3d0480ed
AK
627
628 /* Can't recurse if not a mapped device or there are no dependencies */
629 if (!node->info.exists || !deps->count) {
b4f1578f
AK
630 if (!_add_to_bottomlevel(node)) {
631 stack;
165e4a11 632 node = NULL;
b4f1578f 633 }
165e4a11 634 goto out;
3d0480ed
AK
635 }
636
637 /* Add dependencies to tree */
638 for (i = 0; i < deps->count; i++)
b4f1578f 639 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
cda69e17 640 MINOR(deps->device[i]), udev_flags)) {
165e4a11 641 node = NULL;
b4f1578f 642 goto_out;
165e4a11 643 }
3d0480ed 644
3d0480ed
AK
645out:
646 if (dmt)
647 dm_task_destroy(dmt);
648
165e4a11
AK
649 return node;
650}
651
2e5ff5d1
AK
652// FIXME Move fn group down.
653static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
654 struct dm_info *info, struct dm_pool *mem,
655 const char **name, const char **uuid);
656static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
657 uint32_t *cookie, uint16_t udev_flags, int retry);
658static int _node_clear_table(struct dm_tree_node *dnode, uint16_t udev_flags)
165e4a11 659{
2e5ff5d1
AK
660 struct dm_task *dmt = NULL, *deps_dmt = NULL;
661 struct dm_info *info, deps_info;
662 struct dm_deps *deps = NULL;
663 const char *name, *uuid;
664 const char *default_uuid_prefix;
665 size_t default_uuid_prefix_len;
666 uint32_t i;
667 int r = 0;
165e4a11
AK
668
669 if (!(info = &dnode->info)) {
b4f1578f 670 log_error("_node_clear_table failed: missing info");
165e4a11
AK
671 return 0;
672 }
673
b4f1578f
AK
674 if (!(name = dm_tree_node_get_name(dnode))) {
675 log_error("_node_clear_table failed: missing name");
165e4a11
AK
676 return 0;
677 }
678
679 /* Is there a table? */
680 if (!info->exists || !info->inactive_table)
681 return 1;
682
2e5ff5d1
AK
683 /* Get devices used by inactive table that's about to be deleted. */
684 if (!_deps(&deps_dmt, dnode->dtree->mem, info->major, info->minor, NULL, NULL, 1, info, &deps)) {
685 log_error("Failed to obtain dependencies for %s before clearing table.", name);
686 return 0;
687 }
10d0d9c7 688
165e4a11
AK
689 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
690 name, info->major, info->minor);
691
692 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11 693 log_error("Table clear dm_task creation failed for %s", name);
2e5ff5d1 694 goto_out;
165e4a11
AK
695 }
696
697 if (!dm_task_set_major(dmt, info->major) ||
698 !dm_task_set_minor(dmt, info->minor)) {
699 log_error("Failed to set device number for %s table clear", name);
2e5ff5d1 700 goto_out;
165e4a11
AK
701 }
702
703 r = dm_task_run(dmt);
704
705 if (!dm_task_get_info(dmt, info)) {
b4f1578f 706 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
707 r = 0;
708 }
709
2e5ff5d1
AK
710 if (!r || !deps)
711 goto_out;
712
713 /*
714 * Remove (incomplete) devices that the inactive table referred to but
715 * which are not in the tree, no longer referenced and don't have a live
716 * table.
717 */
718 default_uuid_prefix = dm_uuid_prefix();
719 default_uuid_prefix_len = strlen(default_uuid_prefix);
720
721 for (i = 0; i < deps->count; i++) {
722 /* If already in tree, assume it's under control */
723 if (_find_dm_tree_node(dnode->dtree, MAJOR(deps->device[i]), MINOR(deps->device[i])))
724 continue;
725
726 if (!_info_by_dev(MAJOR(deps->device[i]), MINOR(deps->device[i]), 1,
727 &deps_info, dnode->dtree->mem, &name, &uuid))
728 continue;
729
730 /* Proceed if device is an 'orphan' - unreferenced and without a live table. */
731 if (!deps_info.exists || deps_info.live_table || deps_info.open_count)
732 continue;
733
734 if (strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
735 continue;
736
737 /* Remove device. */
738 if (!_deactivate_node(name, deps_info.major, deps_info.minor, &dnode->dtree->cookie, udev_flags, 0)) {
739 log_error("Failed to deactivate no-longer-used device %s (%"
740 PRIu32 ":%" PRIu32 ")", name, deps_info.major, deps_info.minor);
741 } else if (deps_info.suspended)
742 dec_suspended();
743 }
744
745out:
746 if (dmt)
747 dm_task_destroy(dmt);
748
749 if (deps_dmt)
750 dm_task_destroy(deps_dmt);
165e4a11 751
3d0480ed
AK
752 return r;
753}
754
2e5ff5d1
AK
755struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
756 const char *name,
757 const char *uuid,
758 uint32_t major,
759 uint32_t minor,
760 int read_only,
761 int clear_inactive,
762 void *context,
763 uint16_t udev_flags)
165e4a11 764{
b4f1578f 765 struct dm_tree_node *dnode;
165e4a11
AK
766 struct dm_info info;
767 const char *name2;
768 const char *uuid2;
769
770 /* Do we need to add node to tree? */
b4f1578f
AK
771 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
772 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
773 log_error("name pool_strdup failed");
774 return NULL;
775 }
b4f1578f 776 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
165e4a11
AK
777 log_error("uuid pool_strdup failed");
778 return NULL;
779 }
780
781 info.major = 0;
782 info.minor = 0;
783 info.exists = 0;
784 info.live_table = 0;
785 info.inactive_table = 0;
786 info.read_only = 0;
787
f16aea9e
PR
788 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
789 context, 0)))
b4f1578f 790 return_NULL;
165e4a11
AK
791
792 /* Attach to root node until a table is supplied */
b4f1578f
AK
793 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
794 return_NULL;
165e4a11
AK
795
796 dnode->props.major = major;
797 dnode->props.minor = minor;
798 dnode->props.new_name = NULL;
bb875bb9 799 dnode->props.size_changed = 0;
165e4a11
AK
800 } else if (strcmp(name, dnode->name)) {
801 /* Do we need to rename node? */
b4f1578f 802 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
165e4a11 803 log_error("name pool_strdup failed");
2e5ff5d1 804 return NULL;
165e4a11
AK
805 }
806 }
807
808 dnode->props.read_only = read_only ? 1 : 0;
52b84409
AK
809 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
810 dnode->props.read_ahead_flags = 0;
165e4a11 811
2e5ff5d1 812 if (clear_inactive && !_node_clear_table(dnode, udev_flags))
b4f1578f 813 return_NULL;
165e4a11
AK
814
815 dnode->context = context;
2e5ff5d1 816 dnode->udev_flags = udev_flags;
165e4a11
AK
817
818 return dnode;
819}
820
2e5ff5d1
AK
821struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree, const char *name,
822 const char *uuid, uint32_t major, uint32_t minor,
823 int read_only, int clear_inactive, void *context)
f16aea9e 824{
2e5ff5d1
AK
825 return dm_tree_add_new_dev_with_udev_flags(dtree, name, uuid, major, minor,
826 read_only, clear_inactive, context, 0);
f16aea9e
PR
827}
828
83c606ae
JEB
829void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
830
831{
832 struct dm_info *dinfo = &dnode->info;
833
834 if (udev_flags != dnode->udev_flags)
835 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
836 ") udev_flags from 0x%x to 0x%x",
837 dnode->name, dinfo->major, dinfo->minor,
838 dnode->udev_flags, udev_flags);
839 dnode->udev_flags = udev_flags;
840}
f16aea9e 841
52b84409
AK
842void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
843 uint32_t read_ahead,
844 uint32_t read_ahead_flags)
08e64ce5 845{
52b84409
AK
846 dnode->props.read_ahead = read_ahead;
847 dnode->props.read_ahead_flags = read_ahead_flags;
848}
849
76d1aec8
ZK
850void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
851 struct dm_tree_node *presuspend_node)
852{
853 node->presuspend_node = presuspend_node;
854}
855
b4f1578f 856int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
3d0480ed 857{
cda69e17
PR
858 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
859}
860
861int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
862 uint32_t minor, uint16_t udev_flags)
863{
864 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
3d0480ed
AK
865}
866
04bde319 867const char *dm_tree_node_get_name(const struct dm_tree_node *node)
3d0480ed
AK
868{
869 return node->info.exists ? node->name : "";
870}
871
04bde319 872const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
3d0480ed
AK
873{
874 return node->info.exists ? node->uuid : "";
875}
876
04bde319 877const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
3d0480ed
AK
878{
879 return &node->info;
880}
881
04bde319 882void *dm_tree_node_get_context(const struct dm_tree_node *node)
165e4a11
AK
883{
884 return node->context;
885}
886
04bde319 887int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
eb91c4ee
MB
888{
889 return dnode->props.size_changed;
890}
891
04bde319 892int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
3d0480ed
AK
893{
894 if (inverted) {
b4f1578f 895 if (_nodes_are_linked(&node->dtree->root, node))
3d0480ed 896 return 0;
2c44337b 897 return dm_list_size(&node->used_by);
3d0480ed
AK
898 }
899
b4f1578f 900 if (_nodes_are_linked(node, &node->dtree->root))
3d0480ed
AK
901 return 0;
902
2c44337b 903 return dm_list_size(&node->uses);
3d0480ed
AK
904}
905
2b69db1f
AK
906/*
907 * Returns 1 if no prefix supplied
908 */
909static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
910{
2e5ff5d1
AK
911 const char *default_uuid_prefix = dm_uuid_prefix();
912 size_t default_uuid_prefix_len = strlen(default_uuid_prefix);
913
2b69db1f
AK
914 if (!uuid_prefix)
915 return 1;
916
917 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
918 return 1;
919
920 /* Handle transition: active device uuids might be missing the prefix */
921 if (uuid_prefix_len <= 4)
922 return 0;
923
2e5ff5d1 924 if (!strncmp(uuid, default_uuid_prefix, default_uuid_prefix_len))
872dea04
AK
925 return 0;
926
2e5ff5d1 927 if (strncmp(uuid_prefix, default_uuid_prefix, default_uuid_prefix_len))
2b69db1f
AK
928 return 0;
929
2e5ff5d1 930 if (!strncmp(uuid, uuid_prefix + default_uuid_prefix_len, uuid_prefix_len - default_uuid_prefix_len))
2b69db1f
AK
931 return 1;
932
933 return 0;
934}
935
690a5da2
AK
936/*
937 * Returns 1 if no children.
938 */
b4f1578f 939static int _children_suspended(struct dm_tree_node *node,
690a5da2
AK
940 uint32_t inverted,
941 const char *uuid_prefix,
942 size_t uuid_prefix_len)
943{
2c44337b 944 struct dm_list *list;
b4f1578f 945 struct dm_tree_link *dlink;
690a5da2
AK
946 const struct dm_info *dinfo;
947 const char *uuid;
948
949 if (inverted) {
b4f1578f 950 if (_nodes_are_linked(&node->dtree->root, node))
690a5da2
AK
951 return 1;
952 list = &node->used_by;
953 } else {
b4f1578f 954 if (_nodes_are_linked(node, &node->dtree->root))
690a5da2
AK
955 return 1;
956 list = &node->uses;
957 }
958
2c44337b 959 dm_list_iterate_items(dlink, list) {
b4f1578f 960 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
690a5da2
AK
961 stack;
962 continue;
963 }
964
965 /* Ignore if it doesn't belong to this VG */
2b69db1f 966 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2
AK
967 continue;
968
76d1aec8
ZK
969 /* Ignore if parent node wants to presuspend this node */
970 if (dlink->node->presuspend_node == node)
971 continue;
972
b4f1578f
AK
973 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
974 stack; /* FIXME Is this normal? */
690a5da2
AK
975 return 0;
976 }
977
978 if (!dinfo->suspended)
979 return 0;
980 }
981
982 return 1;
983}
984
3d0480ed
AK
985/*
986 * Set major and minor to zero for root of tree.
987 */
b4f1578f 988struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
3d0480ed
AK
989 uint32_t major,
990 uint32_t minor)
991{
992 if (!major && !minor)
b4f1578f 993 return &dtree->root;
3d0480ed 994
b4f1578f 995 return _find_dm_tree_node(dtree, major, minor);
3d0480ed
AK
996}
997
165e4a11
AK
998/*
999 * Set uuid to NULL for root of tree.
1000 */
b4f1578f 1001struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
1002 const char *uuid)
1003{
1004 if (!uuid || !*uuid)
b4f1578f 1005 return &dtree->root;
165e4a11 1006
b4f1578f 1007 return _find_dm_tree_node_by_uuid(dtree, uuid);
165e4a11
AK
1008}
1009
3d0480ed
AK
1010/*
1011 * First time set *handle to NULL.
1012 * Set inverted to invert the tree.
1013 */
b4f1578f 1014struct dm_tree_node *dm_tree_next_child(void **handle,
04bde319
ZK
1015 const struct dm_tree_node *parent,
1016 uint32_t inverted)
3d0480ed 1017{
2c44337b 1018 struct dm_list **dlink = (struct dm_list **) handle;
04bde319 1019 const struct dm_list *use_list;
3d0480ed
AK
1020
1021 if (inverted)
1022 use_list = &parent->used_by;
1023 else
1024 use_list = &parent->uses;
1025
1026 if (!*dlink)
2c44337b 1027 *dlink = dm_list_first(use_list);
3d0480ed 1028 else
2c44337b 1029 *dlink = dm_list_next(use_list, *dlink);
3d0480ed 1030
2c44337b 1031 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
3d0480ed
AK
1032}
1033
3e8c6b73 1034/*
a6d97ede 1035 * Deactivate a device with its dependencies if the uuid prefix matches.
3e8c6b73 1036 */
db208f51 1037static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
2e5ff5d1
AK
1038 struct dm_info *info, struct dm_pool *mem,
1039 const char **name, const char **uuid)
3e8c6b73
AK
1040{
1041 struct dm_task *dmt;
1042 int r;
1043
1044 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
1045 log_error("_info_by_dev: dm_task creation failed");
1046 return 0;
1047 }
1048
1049 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1050 log_error("_info_by_dev: Failed to set device number");
1051 dm_task_destroy(dmt);
1052 return 0;
1053 }
1054
db208f51
AK
1055 if (!with_open_count && !dm_task_no_open_count(dmt))
1056 log_error("Failed to disable open_count");
1057
2e5ff5d1
AK
1058 if (!(r = dm_task_run(dmt)))
1059 goto_out;
1060
1061 if (!(r = dm_task_get_info(dmt, info)))
1062 goto_out;
3e8c6b73 1063
2e5ff5d1
AK
1064 if (name && !(*name = dm_pool_strdup(mem, dm_task_get_name(dmt)))) {
1065 log_error("name pool_strdup failed");
1066 r = 0;
1067 goto_out;
1068 }
1069
1070 if (uuid && !(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(dmt)))) {
1071 log_error("uuid pool_strdup failed");
1072 r = 0;
1073 goto_out;
1074 }
1075
1076out:
3e8c6b73
AK
1077 dm_task_destroy(dmt);
1078
1079 return r;
1080}
1081
4ce43894 1082static int _check_device_not_in_use(const char *name, struct dm_info *info)
125712be
PR
1083{
1084 if (!info->exists)
1085 return 1;
1086
1087 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
1088 if (!*dm_sysfs_dir()) {
1089 if (info->open_count) {
4ce43894
ZK
1090 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
1091 name, info->major, info->minor);
c3e5b497
PR
1092 return 0;
1093 }
1094
1095 return 1;
1096 }
125712be
PR
1097
1098 if (dm_device_has_holders(info->major, info->minor)) {
4ce43894
ZK
1099 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
1100 "by another device.", name, info->major, info->minor);
125712be
PR
1101 return 0;
1102 }
1103
1104 if (dm_device_has_mounted_fs(info->major, info->minor)) {
4ce43894
ZK
1105 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
1106 "a filesystem in use.", name, info->major, info->minor);
125712be
PR
1107 return 0;
1108 }
1109
1110 return 1;
1111}
1112
f3ef15ef
ZK
1113/* Check if all parent nodes of given node have open_count == 0 */
1114static int _node_has_closed_parents(struct dm_tree_node *node,
1115 const char *uuid_prefix,
1116 size_t uuid_prefix_len)
1117{
1118 struct dm_tree_link *dlink;
1119 const struct dm_info *dinfo;
1120 struct dm_info info;
1121 const char *uuid;
1122
1123 /* Iterate through parents of this node */
1124 dm_list_iterate_items(dlink, &node->used_by) {
1125 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1126 stack;
1127 continue;
1128 }
1129
1130 /* Ignore if it doesn't belong to this VG */
1131 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1132 continue;
1133
1134 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1135 stack; /* FIXME Is this normal? */
1136 return 0;
1137 }
1138
1139 /* Refresh open_count */
2e5ff5d1 1140 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
f3ef15ef
ZK
1141 !info.exists)
1142 continue;
1143
eb418883
ZK
1144 if (info.open_count) {
1145 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1146 dinfo->major, dinfo->minor, info.open_count);
f3ef15ef 1147 return 0;
eb418883 1148 }
f3ef15ef
ZK
1149 }
1150
1151 return 1;
1152}
1153
f16aea9e 1154static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
787200ef 1155 uint32_t *cookie, uint16_t udev_flags, int retry)
3e8c6b73
AK
1156{
1157 struct dm_task *dmt;
bd90c6b2 1158 int r = 0;
3e8c6b73
AK
1159
1160 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1161
1162 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1163 log_error("Deactivation dm_task creation failed for %s", name);
1164 return 0;
1165 }
1166
1167 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1168 log_error("Failed to set device number for %s deactivation", name);
bd90c6b2 1169 goto out;
3e8c6b73
AK
1170 }
1171
1172 if (!dm_task_no_open_count(dmt))
1173 log_error("Failed to disable open_count");
1174
2e5ff5d1
AK
1175 if (cookie)
1176 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
1177 goto out;
787200ef
PR
1178
1179 if (retry)
1180 dm_task_retry_remove(dmt);
1181
3e8c6b73
AK
1182 r = dm_task_run(dmt);
1183
0437bccc
AK
1184 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1185 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
2e5ff5d1 1186 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
165e4a11 1187
db208f51
AK
1188 /* FIXME Remove node from tree or mark invalid? */
1189
bd90c6b2 1190out:
db208f51
AK
1191 dm_task_destroy(dmt);
1192
1193 return r;
1194}
1195
bd90c6b2 1196static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1197 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1198{
1199 struct dm_task *dmt;
1200 int r = 0;
1201
1202 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1203
1204 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1205 log_error("Rename dm_task creation failed for %s", old_name);
1206 return 0;
1207 }
1208
1209 if (!dm_task_set_name(dmt, old_name)) {
1210 log_error("Failed to set name for %s rename.", old_name);
1211 goto out;
1212 }
1213
b4f1578f 1214 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1215 goto_out;
165e4a11
AK
1216
1217 if (!dm_task_no_open_count(dmt))
1218 log_error("Failed to disable open_count");
1219
f16aea9e 1220 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1221 goto out;
1222
165e4a11
AK
1223 r = dm_task_run(dmt);
1224
1225out:
1226 dm_task_destroy(dmt);
1227
1228 return r;
1229}
1230
165e4a11
AK
1231/* FIXME Merge with _suspend_node? */
1232static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1233 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1234 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1235 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1236{
1237 struct dm_task *dmt;
bd90c6b2 1238 int r = 0;
165e4a11
AK
1239
1240 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1241
1242 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1243 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1244 return 0;
1245 }
1246
0b7d16bc
AK
1247 /* FIXME Kernel should fill in name on return instead */
1248 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1249 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1250 goto out;
0b7d16bc
AK
1251 }
1252
165e4a11
AK
1253 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1254 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1255 goto out;
165e4a11
AK
1256 }
1257
1258 if (!dm_task_no_open_count(dmt))
1259 log_error("Failed to disable open_count");
1260
52b84409
AK
1261 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1262 log_error("Failed to set read ahead");
1263
f16aea9e 1264 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1265 goto_out;
bd90c6b2 1266
9a8f192a
ZK
1267 if (!(r = dm_task_run(dmt)))
1268 goto_out;
1269
1270 if (already_suspended)
1271 dec_suspended();
1272
1273 if (!(r = dm_task_get_info(dmt, newinfo)))
1274 stack;
165e4a11 1275
bd90c6b2 1276out:
165e4a11
AK
1277 dm_task_destroy(dmt);
1278
1279 return r;
1280}
1281
db208f51 1282static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1283 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1284{
1285 struct dm_task *dmt;
1286 int r;
1287
b9ffd32c
AK
1288 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1289 name, major, minor,
1290 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1291 no_flush ? "" : " with device flush");
db208f51
AK
1292
1293 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1294 log_error("Suspend dm_task creation failed for %s", name);
1295 return 0;
1296 }
1297
1298 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1299 log_error("Failed to set device number for %s suspension.", name);
1300 dm_task_destroy(dmt);
1301 return 0;
1302 }
1303
1304 if (!dm_task_no_open_count(dmt))
1305 log_error("Failed to disable open_count");
1306
c55b1410
AK
1307 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1308 log_error("Failed to set skip_lockfs flag.");
1309
b9ffd32c
AK
1310 if (no_flush && !dm_task_no_flush(dmt))
1311 log_error("Failed to set no_flush flag.");
1312
1840aa09
AK
1313 if ((r = dm_task_run(dmt))) {
1314 inc_suspended();
db208f51 1315 r = dm_task_get_info(dmt, newinfo);
1840aa09 1316 }
db208f51 1317
3e8c6b73
AK
1318 dm_task_destroy(dmt);
1319
1320 return r;
1321}
1322
25e6ab87 1323static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1324{
1325 struct dm_task *dmt;
1326 int r = 0;
1327 uint64_t start, length;
1328 char *type = NULL;
1329 char *params = NULL;
e0ea24be 1330
25e6ab87
ZK
1331 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1332 return_0;
e0ea24be 1333
25e6ab87
ZK
1334 if (!dm_task_set_major(dmt, dnode->info.major) ||
1335 !dm_task_set_minor(dmt, dnode->info.minor)) {
1336 log_error("Failed to set major minor.");
1337 goto out;
e0ea24be
ZK
1338 }
1339
25e6ab87
ZK
1340 if (!dm_task_run(dmt))
1341 goto_out;
1342
1343 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1344
1345 if (type && (strcmp(type, "thin-pool") != 0)) {
c590a9cd 1346 log_error("Expected thin-pool target for %d:%d and got %s.",
25e6ab87 1347 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1348 goto out;
1349 }
1350
25e6ab87 1351 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
c590a9cd 1352 log_error("Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1353 goto out;
1354 }
1355
25e6ab87 1356 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1357
25e6ab87
ZK
1358 r = 1;
1359out:
1360 dm_task_destroy(dmt);
e0ea24be 1361
25e6ab87
ZK
1362 return r;
1363}
e0ea24be 1364
25e6ab87
ZK
1365static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1366{
1367 struct dm_task *dmt;
1368 struct dm_thin_message *m = &tm->message;
1369 char buf[64];
1370 int r;
e0ea24be 1371
25e6ab87
ZK
1372 switch (m->type) {
1373 case DM_THIN_MESSAGE_CREATE_SNAP:
1374 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1375 m->u.m_create_snap.device_id,
1376 m->u.m_create_snap.origin_id);
1377 break;
1378 case DM_THIN_MESSAGE_CREATE_THIN:
1379 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1380 m->u.m_create_thin.device_id);
1381 break;
1382 case DM_THIN_MESSAGE_DELETE:
1383 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1384 m->u.m_delete.device_id);
1385 break;
1386 case DM_THIN_MESSAGE_TRIM:
1387 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1388 m->u.m_trim.device_id,
1389 m->u.m_trim.new_size);
1390 break;
1391 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1392 r = dm_snprintf(buf, sizeof(buf),
1393 "set_transaction_id %" PRIu64 " %" PRIu64,
1394 m->u.m_set_transaction_id.current_id,
1395 m->u.m_set_transaction_id.new_id);
1396 break;
25de9add
ZK
1397 default:
1398 r = -1;
25e6ab87
ZK
1399 }
1400
25de9add 1401 if (r < 0) {
25e6ab87
ZK
1402 log_error("Failed to prepare message.");
1403 return 0;
1404 }
1405
1406 r = 0;
1407
1408 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1409 return_0;
1410
1411 if (!dm_task_set_major(dmt, dnode->info.major) ||
1412 !dm_task_set_minor(dmt, dnode->info.minor)) {
1413 log_error("Failed to set message major minor.");
1414 goto out;
1415 }
1416
1417 if (!dm_task_set_message(dmt, buf))
1418 goto_out;
1419
660a42bc
ZK
1420 /* Internal functionality of dm_task */
1421 dmt->expected_errno = tm->expected_errno;
1422
25e6ab87
ZK
1423 if (!dm_task_run(dmt))
1424 goto_out;
1425
1426 r = 1;
e0ea24be
ZK
1427out:
1428 dm_task_destroy(dmt);
1429
1430 return r;
1431}
1432
11f64f0a
ZK
1433static int _node_send_messages(struct dm_tree_node *dnode,
1434 const char *uuid_prefix,
1435 size_t uuid_prefix_len)
25e6ab87
ZK
1436{
1437 struct load_segment *seg;
1438 struct thin_message *tmsg;
11f64f0a 1439 uint64_t trans_id;
25e6ab87
ZK
1440 const char *uuid;
1441
bbcd37e4 1442 if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1443 return 1;
1444
1445 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1446 if (seg->type != SEG_THIN_POOL)
1447 return 1;
1448
1449 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1450 return_0;
1451
1452 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1453 log_debug("UUID \"%s\" does not match.", uuid);
1454 return 1;
1455 }
1456
11f64f0a 1457 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
bbcd37e4 1458 goto_bad;
25e6ab87 1459
bbcd37e4 1460 if (trans_id == seg->transaction_id)
25e6ab87
ZK
1461 return 1; /* In sync - skip messages */
1462
bbcd37e4 1463 if (trans_id != (seg->transaction_id - 1)) {
25e6ab87 1464 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
bbcd37e4
ZK
1465 trans_id, seg->transaction_id - 1);
1466 goto bad; /* Nothing to send */
25e6ab87
ZK
1467 }
1468
1469 dm_list_iterate_items(tmsg, &seg->thin_messages)
1470 if (!(_thin_pool_node_message(dnode, tmsg)))
bbcd37e4 1471 goto_bad;
25e6ab87
ZK
1472
1473 return 1;
bbcd37e4
ZK
1474bad:
1475 /* Try to deactivate */
1476 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
1477 log_error("Failed to deactivate %s", dnode->name);
1478
1479 return 0;
25e6ab87
ZK
1480}
1481
18e0f934
AK
1482/*
1483 * FIXME Don't attempt to deactivate known internal dependencies.
1484 */
1485static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1486 const char *uuid_prefix,
1487 size_t uuid_prefix_len,
1488 unsigned level)
3e8c6b73 1489{
b7eb2ad0 1490 int r = 1;
3e8c6b73 1491 void *handle = NULL;
b4f1578f 1492 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1493 struct dm_info info;
1494 const struct dm_info *dinfo;
1495 const char *name;
1496 const char *uuid;
1497
b4f1578f
AK
1498 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1499 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1500 stack;
1501 continue;
1502 }
1503
b4f1578f 1504 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1505 stack;
1506 continue;
1507 }
1508
b4f1578f 1509 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1510 stack;
1511 continue;
1512 }
1513
1514 /* Ignore if it doesn't belong to this VG */
2b69db1f 1515 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1516 continue;
3e8c6b73
AK
1517
1518 /* Refresh open_count */
2e5ff5d1 1519 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info, NULL, NULL, NULL) ||
f55021f4 1520 !info.exists)
3e8c6b73
AK
1521 continue;
1522
4ce43894
ZK
1523 if (info.open_count) {
1524 /* Skip internal non-toplevel opened nodes */
1525 if (level)
1526 continue;
1527
1528 /* When retry is not allowed, error */
1529 if (!child->dtree->retry_remove) {
1530 log_error("Unable to deactivate open %s (%" PRIu32
1531 ":%" PRIu32 ")", name, info.major, info.minor);
1532 r = 0;
1533 continue;
1534 }
1535
1536 /* Check toplevel node for holders/mounted fs */
1537 if (!_check_device_not_in_use(name, &info)) {
1538 stack;
1539 r = 0;
1540 continue;
1541 }
1542 /* Go on with retry */
1543 }
125712be 1544
f3ef15ef 1545 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1546 if ((child->presuspend_node &&
f3ef15ef
ZK
1547 !_node_has_closed_parents(child->presuspend_node,
1548 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1549 /* Only report error from (likely non-internal) dependency at top level */
1550 if (!level) {
1551 log_error("Unable to deactivate open %s (%" PRIu32
1552 ":%" PRIu32 ")", name, info.major,
1553 info.minor);
1554 r = 0;
1555 }
f55021f4
AK
1556 continue;
1557 }
1558
76d1aec8
ZK
1559 /* Suspend child node first if requested */
1560 if (child->presuspend_node &&
1561 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1562 continue;
1563
f16aea9e 1564 if (!_deactivate_node(name, info.major, info.minor,
787200ef 1565 &child->dtree->cookie, child->udev_flags,
4ce43894 1566 (level == 0) ? child->dtree->retry_remove : 0)) {
3e8c6b73
AK
1567 log_error("Unable to deactivate %s (%" PRIu32
1568 ":%" PRIu32 ")", name, info.major,
1569 info.minor);
b7eb2ad0 1570 r = 0;
3e8c6b73 1571 continue;
f4249251
AK
1572 } else if (info.suspended)
1573 dec_suspended();
3e8c6b73 1574
18e0f934
AK
1575 if (dm_tree_node_num_children(child, 0)) {
1576 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1577 return_0;
18e0f934 1578 }
3e8c6b73
AK
1579 }
1580
b7eb2ad0 1581 return r;
3e8c6b73 1582}
db208f51 1583
18e0f934
AK
1584int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1585 const char *uuid_prefix,
1586 size_t uuid_prefix_len)
1587{
1588 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1589}
1590
c55b1410
AK
1591void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1592{
1593 dnode->dtree->skip_lockfs = 1;
1594}
1595
b9ffd32c
AK
1596void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1597{
1598 dnode->dtree->no_flush = 1;
1599}
1600
787200ef
PR
1601void dm_tree_retry_remove(struct dm_tree_node *dnode)
1602{
1603 dnode->dtree->retry_remove = 1;
1604}
1605
b4f1578f 1606int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1607 const char *uuid_prefix,
1608 size_t uuid_prefix_len)
db208f51 1609{
68085c93 1610 int r = 1;
db208f51 1611 void *handle = NULL;
b4f1578f 1612 struct dm_tree_node *child = dnode;
db208f51
AK
1613 struct dm_info info, newinfo;
1614 const struct dm_info *dinfo;
1615 const char *name;
1616 const char *uuid;
1617
690a5da2 1618 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1619 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1620 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1621 stack;
1622 continue;
1623 }
1624
b4f1578f 1625 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1626 stack;
1627 continue;
1628 }
1629
b4f1578f 1630 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1631 stack;
1632 continue;
1633 }
1634
1635 /* Ignore if it doesn't belong to this VG */
2b69db1f 1636 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1637 continue;
1638
690a5da2
AK
1639 /* Ensure immediate parents are already suspended */
1640 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1641 continue;
1642
2e5ff5d1 1643 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info, NULL, NULL, NULL) ||
b700541f 1644 !info.exists || info.suspended)
db208f51
AK
1645 continue;
1646
c55b1410 1647 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1648 child->dtree->skip_lockfs,
1649 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1650 log_error("Unable to suspend %s (%" PRIu32
1651 ":%" PRIu32 ")", name, info.major,
1652 info.minor);
68085c93 1653 r = 0;
db208f51
AK
1654 continue;
1655 }
1656
1657 /* Update cached info */
1658 child->info = newinfo;
690a5da2
AK
1659 }
1660
1661 /* Then suspend any child nodes */
1662 handle = NULL;
1663
b4f1578f
AK
1664 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1665 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1666 stack;
1667 continue;
1668 }
1669
1670 /* Ignore if it doesn't belong to this VG */
87f98002 1671 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1672 continue;
db208f51 1673
b4f1578f 1674 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1675 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1676 return_0;
db208f51
AK
1677 }
1678
68085c93 1679 return r;
db208f51
AK
1680}
1681
b4f1578f 1682int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1683 const char *uuid_prefix,
1684 size_t uuid_prefix_len)
1685{
2ca6b865 1686 int r = 1;
db208f51 1687 void *handle = NULL;
b4f1578f 1688 struct dm_tree_node *child = dnode;
165e4a11
AK
1689 struct dm_info newinfo;
1690 const char *name;
db208f51 1691 const char *uuid;
56c28292 1692 int priority;
db208f51 1693
165e4a11 1694 /* Activate children first */
b4f1578f
AK
1695 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1696 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1697 stack;
1698 continue;
db208f51
AK
1699 }
1700
908db078
AK
1701 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1702 continue;
db208f51 1703
b4f1578f 1704 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1705 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1706 return_0;
56c28292 1707 }
165e4a11 1708
56c28292 1709 handle = NULL;
165e4a11 1710
aa6f4e51 1711 for (priority = 0; priority < 3; priority++) {
56c28292 1712 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1713 if (priority != child->activation_priority)
1714 continue;
1715
56c28292
AK
1716 if (!(uuid = dm_tree_node_get_uuid(child))) {
1717 stack;
1718 continue;
165e4a11 1719 }
165e4a11 1720
56c28292
AK
1721 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1722 continue;
165e4a11 1723
56c28292
AK
1724 if (!(name = dm_tree_node_get_name(child))) {
1725 stack;
1726 continue;
1727 }
1728
1729 /* Rename? */
1730 if (child->props.new_name) {
bd90c6b2 1731 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1732 child->info.minor, &child->dtree->cookie,
1733 child->udev_flags)) {
56c28292
AK
1734 log_error("Failed to rename %s (%" PRIu32
1735 ":%" PRIu32 ") to %s", name, child->info.major,
1736 child->info.minor, child->props.new_name);
1737 return 0;
1738 }
1739 child->name = child->props.new_name;
1740 child->props.new_name = NULL;
1741 }
1742
1743 if (!child->info.inactive_table && !child->info.suspended)
1744 continue;
1745
bafa2f39 1746 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1747 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1748 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1749 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1750 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1751 child->info.minor);
2ca6b865 1752 r = 0;
56c28292
AK
1753 continue;
1754 }
1755
1756 /* Update cached info */
1757 child->info = newinfo;
1758 }
db208f51
AK
1759 }
1760
165e4a11
AK
1761 handle = NULL;
1762
2ca6b865 1763 return r;
165e4a11
AK
1764}
1765
b4f1578f 1766static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1767{
1768 int r = 0;
1769 struct dm_task *dmt;
1770
1771 log_verbose("Creating %s", dnode->name);
1772
1773 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1774 log_error("Create dm_task creation failed for %s", dnode->name);
1775 return 0;
1776 }
1777
1778 if (!dm_task_set_name(dmt, dnode->name)) {
1779 log_error("Failed to set device name for %s", dnode->name);
1780 goto out;
1781 }
1782
1783 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1784 log_error("Failed to set uuid for %s", dnode->name);
1785 goto out;
1786 }
1787
1788 if (dnode->props.major &&
1789 (!dm_task_set_major(dmt, dnode->props.major) ||
1790 !dm_task_set_minor(dmt, dnode->props.minor))) {
1791 log_error("Failed to set device number for %s creation.", dnode->name);
1792 goto out;
1793 }
1794
1795 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1796 log_error("Failed to set read only flag for %s", dnode->name);
1797 goto out;
1798 }
1799
1800 if (!dm_task_no_open_count(dmt))
1801 log_error("Failed to disable open_count");
1802
1803 if ((r = dm_task_run(dmt)))
1804 r = dm_task_get_info(dmt, &dnode->info);
1805
1806out:
1807 dm_task_destroy(dmt);
1808
1809 return r;
1810}
1811
1812
b4f1578f 1813static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1814{
1815 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1816 log_error("Failed to format %s device number for %s as dm "
1817 "target (%u,%u)",
1818 node->name, node->uuid, node->info.major, node->info.minor);
1819 return 0;
165e4a11
AK
1820 }
1821
1822 return 1;
1823}
1824
ffa9b6a5
ZK
1825/* simplify string emiting code */
1826#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1827do {\
1828 int w;\
1829 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1830 stack; /* Out of space */\
1831 return -1;\
1832 }\
1833 p += w;\
1834} while (0)
ffa9b6a5 1835
3c74075f
JEB
1836/*
1837 * _emit_areas_line
1838 *
1839 * Returns: 1 on success, 0 on failure
1840 */
08f1ddea 1841static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1842 struct load_segment *seg, char *params,
1843 size_t paramsize, int *pos)
165e4a11
AK
1844{
1845 struct seg_area *area;
7d7d93ac 1846 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1847 unsigned first_time = 1;
db3c1ac1 1848 const char *logtype, *synctype;
b262f3e1 1849 unsigned log_parm_count;
165e4a11 1850
2c44337b 1851 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1852 switch (seg->type) {
1853 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1854 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1855 return_0;
1856
b262f3e1
ZK
1857 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1858 if (first_time)
1859 EMIT_PARAMS(*pos, " nolog 0");
1860 else {
1861 /* Remote devices */
1862 log_parm_count = (area->flags &
1863 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1864
1865 if (!area->slog) {
1866 devbuf[0] = 0; /* Only core log parameters */
1867 logtype = "core";
1868 } else {
1869 devbuf[0] = ' '; /* Extra space before device name */
1870 if (!_build_dev_string(devbuf + 1,
1871 sizeof(devbuf) - 1,
1872 area->slog))
1873 return_0;
1874 logtype = "disk";
1875 log_parm_count++; /* Extra sync log device name parameter */
1876 }
1877
1878 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1879 log_parm_count, devbuf, area->region_size);
1880
db3c1ac1
AK
1881 synctype = (area->flags & DM_NOSYNC) ?
1882 " nosync" : (area->flags & DM_FORCESYNC) ?
1883 " sync" : NULL;
b262f3e1 1884
db3c1ac1
AK
1885 if (synctype)
1886 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1887 }
1888 break;
cac52ca4
JEB
1889 case SEG_RAID1:
1890 case SEG_RAID4:
1891 case SEG_RAID5_LA:
1892 case SEG_RAID5_RA:
1893 case SEG_RAID5_LS:
1894 case SEG_RAID5_RS:
1895 case SEG_RAID6_ZR:
1896 case SEG_RAID6_NR:
1897 case SEG_RAID6_NC:
6d04311e
JEB
1898 if (!area->dev_node) {
1899 EMIT_PARAMS(*pos, " -");
1900 break;
1901 }
1902 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1903 return_0;
1904
cac52ca4
JEB
1905 EMIT_PARAMS(*pos, " %s", devbuf);
1906 break;
b262f3e1 1907 default:
6d04311e
JEB
1908 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1909 return_0;
1910
b262f3e1
ZK
1911 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1912 devbuf, area->offset);
1913 }
609faae9
AK
1914
1915 first_time = 0;
165e4a11
AK
1916 }
1917
1918 return 1;
1919}
1920
b262f3e1
ZK
1921static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1922 size_t paramsize, int *pos)
1923{
1924 const struct load_segment *rlog_seg;
1925 struct replicator_site *rsite;
1926 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1927 unsigned parm_count;
1928
1929 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1930 return_0;
1931
1932 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1933 struct load_segment);
1934
1935 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1936 seg->rlog_type, rlogbuf, rlog_seg->size);
1937
1938 dm_list_iterate_items(rsite, &seg->rsites) {
1939 parm_count = (rsite->fall_behind_data
1940 || rsite->fall_behind_ios
1941 || rsite->async_timeout) ? 4 : 2;
1942
1943 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1944 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1945
1946 if (rsite->fall_behind_data)
1947 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1948 else if (rsite->fall_behind_ios)
1949 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1950 else if (rsite->async_timeout)
1951 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1952 }
1953
1954 return 1;
1955}
1956
3c74075f 1957/*
3c74075f
JEB
1958 * Returns: 1 on success, 0 on failure
1959 */
beecb1e1
ZK
1960static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1961 char *params, size_t paramsize)
165e4a11 1962{
8f26e18c
JEB
1963 int block_on_error = 0;
1964 int handle_errors = 0;
1965 int dm_log_userspace = 0;
1966 struct utsname uts;
dbcb64b8 1967 unsigned log_parm_count;
b39fdcf4 1968 int pos = 0, parts;
7d7d93ac 1969 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1970 const char *logtype;
b39fdcf4 1971 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1972
b39fdcf4
MB
1973 if (uname(&uts) == -1) {
1974 log_error("Cannot read kernel release version.");
1975 return 0;
1976 }
1977
1978 /* Kernels with a major number of 2 always had 3 parts. */
1979 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1980 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1981 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1982 return 0;
1983 }
67b25ed4 1984
8f26e18c
JEB
1985 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1986 /*
1987 * Originally, block_on_error was an argument to the log
1988 * portion of the mirror CTR table. It was renamed to
1989 * "handle_errors" and now resides in the 'features'
1990 * section of the mirror CTR table (i.e. at the end).
1991 *
1992 * We can identify whether to use "block_on_error" or
1993 * "handle_errors" by the dm-mirror module's version
1994 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1995 */
ba61f848 1996 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
1997 handle_errors = 1;
1998 else
1999 block_on_error = 1;
2000 }
2001
2002 if (seg->clustered) {
2003 /* Cluster mirrors require a UUID */
2004 if (!seg->uuid)
2005 return_0;
2006
2007 /*
2008 * Cluster mirrors used to have their own log
2009 * types. Now they are accessed through the
2010 * userspace log type.
2011 *
2012 * The dm-log-userspace module was added to the
2013 * 2.6.31 kernel.
2014 */
ba61f848 2015 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
2016 dm_log_userspace = 1;
2017 }
2018
2019 /* Region size */
2020 log_parm_count = 1;
2021
2022 /* [no]sync, block_on_error etc. */
2023 log_parm_count += hweight32(seg->flags);
311d6d81 2024
8f26e18c
JEB
2025 /* "handle_errors" is a feature arg now */
2026 if (handle_errors)
2027 log_parm_count--;
2028
2029 /* DM_CORELOG does not count in the param list */
2030 if (seg->flags & DM_CORELOG)
2031 log_parm_count--;
2032
2033 if (seg->clustered) {
2034 log_parm_count++; /* For UUID */
2035
2036 if (!dm_log_userspace)
ffa9b6a5 2037 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
2038 else
2039 /* For clustered-* type field inserted later */
2040 log_parm_count++;
8f26e18c 2041 }
dbcb64b8 2042
8f26e18c
JEB
2043 if (!seg->log)
2044 logtype = "core";
2045 else {
2046 logtype = "disk";
2047 log_parm_count++;
2048 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
2049 return_0;
2050 }
dbcb64b8 2051
8f26e18c
JEB
2052 if (dm_log_userspace)
2053 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
2054 log_parm_count, seg->uuid, logtype);
2055 else
ffa9b6a5 2056 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 2057
8f26e18c
JEB
2058 if (seg->log)
2059 EMIT_PARAMS(pos, " %s", logbuf);
2060
2061 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 2062
8f26e18c
JEB
2063 if (seg->clustered && !dm_log_userspace)
2064 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 2065
8f26e18c
JEB
2066 if ((seg->flags & DM_NOSYNC))
2067 EMIT_PARAMS(pos, " nosync");
2068 else if ((seg->flags & DM_FORCESYNC))
2069 EMIT_PARAMS(pos, " sync");
dbcb64b8 2070
8f26e18c
JEB
2071 if (block_on_error)
2072 EMIT_PARAMS(pos, " block_on_error");
2073
2074 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
2075
5f3325fc 2076 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 2077 return_0;
dbcb64b8 2078
8f26e18c
JEB
2079 if (handle_errors)
2080 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 2081
3c74075f 2082 return 1;
8f26e18c
JEB
2083}
2084
cac52ca4
JEB
2085static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
2086 uint32_t minor, struct load_segment *seg,
2087 uint64_t *seg_start, char *params,
2088 size_t paramsize)
2089{
ad2432dc 2090 uint32_t i;
cac52ca4
JEB
2091 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
2092 int pos = 0;
2093
2094 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
2095 param_count++;
2096
2097 if (seg->region_size)
2098 param_count += 2;
2099
ad2432dc
MB
2100 /* rebuilds is 64-bit */
2101 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
2102 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 2103
cac52ca4
JEB
2104 if ((seg->type == SEG_RAID1) && seg->stripe_size)
2105 log_error("WARNING: Ignoring RAID1 stripe size");
2106
2107 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
2108 param_count, seg->stripe_size);
2109
2110 if (seg->flags & DM_NOSYNC)
2111 EMIT_PARAMS(pos, " nosync");
2112 else if (seg->flags & DM_FORCESYNC)
2113 EMIT_PARAMS(pos, " sync");
2114
2115 if (seg->region_size)
2116 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2117
f439e65b
JEB
2118 for (i = 0; i < (seg->area_count / 2); i++)
2119 if (seg->rebuilds & (1 << i))
2120 EMIT_PARAMS(pos, " rebuild %u", i);
2121
cac52ca4
JEB
2122 /* Print number of metadata/data device pairs */
2123 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2124
2125 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2126 return_0;
2127
2128 return 1;
2129}
2130
8f26e18c
JEB
2131static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2132 uint32_t minor, struct load_segment *seg,
2133 uint64_t *seg_start, char *params,
2134 size_t paramsize)
2135{
2136 int pos = 0;
2137 int r;
cac52ca4 2138 int target_type_is_raid = 0;
8f26e18c 2139 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2140 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2141
8f26e18c
JEB
2142 switch(seg->type) {
2143 case SEG_ERROR:
2144 case SEG_ZERO:
2145 case SEG_LINEAR:
2146 break;
2147 case SEG_MIRRORED:
2148 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2149 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2150 if (!r)
2151 return_0;
165e4a11 2152 break;
b262f3e1
ZK
2153 case SEG_REPLICATOR:
2154 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2155 &pos)) <= 0) {
2156 stack;
2157 return r;
2158 }
2159 break;
2160 case SEG_REPLICATOR_DEV:
2161 if (!seg->replicator || !_build_dev_string(originbuf,
2162 sizeof(originbuf),
2163 seg->replicator))
2164 return_0;
2165
2166 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2167 break;
165e4a11 2168 case SEG_SNAPSHOT:
aa6f4e51 2169 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2170 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2171 return_0;
2172 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2173 return_0;
ffa9b6a5
ZK
2174 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2175 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2176 break;
2177 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2178 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2179 return_0;
ffa9b6a5 2180 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2181 break;
2182 case SEG_STRIPED:
609faae9 2183 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2184 break;
12ca060e 2185 case SEG_CRYPT:
609faae9 2186 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2187 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2188 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2189 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2190 seg->iv_offset : *seg_start);
2191 break;
cac52ca4
JEB
2192 case SEG_RAID1:
2193 case SEG_RAID4:
2194 case SEG_RAID5_LA:
2195 case SEG_RAID5_RA:
2196 case SEG_RAID5_LS:
2197 case SEG_RAID5_RS:
2198 case SEG_RAID6_ZR:
2199 case SEG_RAID6_NR:
2200 case SEG_RAID6_NC:
2201 target_type_is_raid = 1;
2202 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2203 params, paramsize);
2204 if (!r)
2205 return_0;
2206
2207 break;
4251236e
ZK
2208 case SEG_THIN_POOL:
2209 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2210 return_0;
2211 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2212 return_0;
2213 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2214 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2215 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2216 break;
2217 case SEG_THIN:
2218 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2219 return_0;
2220 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2221 break;
165e4a11
AK
2222 }
2223
2224 switch(seg->type) {
2225 case SEG_ERROR:
b262f3e1 2226 case SEG_REPLICATOR:
165e4a11
AK
2227 case SEG_SNAPSHOT:
2228 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2229 case SEG_SNAPSHOT_MERGE:
165e4a11 2230 case SEG_ZERO:
4251236e
ZK
2231 case SEG_THIN_POOL:
2232 case SEG_THIN:
165e4a11 2233 break;
12ca060e 2234 case SEG_CRYPT:
165e4a11 2235 case SEG_LINEAR:
b262f3e1 2236 case SEG_REPLICATOR_DEV:
165e4a11
AK
2237 case SEG_STRIPED:
2238 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2239 stack;
2240 return r;
2241 }
b6793963
AK
2242 if (!params[0]) {
2243 log_error("No parameters supplied for %s target "
2244 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2245 major, minor);
b6793963
AK
2246 return 0;
2247 }
165e4a11
AK
2248 break;
2249 }
2250
4b2cae46
AK
2251 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2252 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2253 *seg_start, seg->size, target_type_is_raid ? "raid" :
2254 dm_segtypes[seg->type].target, params);
165e4a11 2255
cac52ca4
JEB
2256 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2257 target_type_is_raid ? "raid" :
2258 dm_segtypes[seg->type].target, params))
b4f1578f 2259 return_0;
165e4a11
AK
2260
2261 *seg_start += seg->size;
2262
2263 return 1;
2264}
2265
ffa9b6a5
ZK
2266#undef EMIT_PARAMS
2267
4b2cae46
AK
2268static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2269 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2270{
2271 char *params;
2272 size_t paramsize = 4096;
2273 int ret;
2274
2275 do {
2276 if (!(params = dm_malloc(paramsize))) {
2277 log_error("Insufficient space for target parameters.");
2278 return 0;
2279 }
2280
12ea7cb1 2281 params[0] = '\0';
4b2cae46
AK
2282 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2283 params, paramsize);
165e4a11
AK
2284 dm_free(params);
2285
2286 if (!ret)
2287 stack;
2288
2289 if (ret >= 0)
2290 return ret;
2291
2292 log_debug("Insufficient space in params[%" PRIsize_t
2293 "] for target parameters.", paramsize);
2294
2295 paramsize *= 2;
2296 } while (paramsize < MAX_TARGET_PARAMSIZE);
2297
2298 log_error("Target parameter size too big. Aborting.");
2299 return 0;
2300}
2301
b4f1578f 2302static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2303{
2304 int r = 0;
2305 struct dm_task *dmt;
2306 struct load_segment *seg;
df390f17 2307 uint64_t seg_start = 0, existing_table_size;
165e4a11 2308
4b2cae46
AK
2309 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2310 dnode->info.major, dnode->info.minor);
165e4a11
AK
2311
2312 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2313 log_error("Reload dm_task creation failed for %s", dnode->name);
2314 return 0;
2315 }
2316
2317 if (!dm_task_set_major(dmt, dnode->info.major) ||
2318 !dm_task_set_minor(dmt, dnode->info.minor)) {
2319 log_error("Failed to set device number for %s reload.", dnode->name);
2320 goto out;
2321 }
2322
2323 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2324 log_error("Failed to set read only flag for %s", dnode->name);
2325 goto out;
2326 }
2327
2328 if (!dm_task_no_open_count(dmt))
2329 log_error("Failed to disable open_count");
2330
2c44337b 2331 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2332 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2333 seg, &seg_start))
b4f1578f 2334 goto_out;
165e4a11 2335
ec289b64
AK
2336 if (!dm_task_suppress_identical_reload(dmt))
2337 log_error("Failed to suppress reload of identical tables.");
2338
2339 if ((r = dm_task_run(dmt))) {
165e4a11 2340 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2341 if (r && !dnode->info.inactive_table)
2342 log_verbose("Suppressed %s identical table reload.",
2343 dnode->name);
bb875bb9 2344
df390f17 2345 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2346 if ((dnode->props.size_changed =
df390f17 2347 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2348 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2349 PRIu64 " for %s", existing_table_size,
bb875bb9 2350 seg_start, dnode->name);
df390f17
AK
2351 /*
2352 * Kernel usually skips size validation on zero-length devices
2353 * now so no need to preload them.
2354 */
2355 /* FIXME In which kernel version did this begin? */
2356 if (!existing_table_size && dnode->props.delay_resume_if_new)
2357 dnode->props.size_changed = 0;
2358 }
ec289b64 2359 }
165e4a11
AK
2360
2361 dnode->props.segment_count = 0;
2362
2363out:
2364 dm_task_destroy(dmt);
2365
2366 return r;
165e4a11
AK
2367}
2368
b4f1578f 2369int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2370 const char *uuid_prefix,
2371 size_t uuid_prefix_len)
165e4a11 2372{
2ca6b865 2373 int r = 1;
165e4a11 2374 void *handle = NULL;
b4f1578f 2375 struct dm_tree_node *child;
165e4a11 2376 struct dm_info newinfo;
566515c0 2377 int update_devs_flag = 0;
165e4a11
AK
2378
2379 /* Preload children first */
b4f1578f 2380 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2381 /* Skip existing non-device-mapper devices */
2382 if (!child->info.exists && child->info.major)
2383 continue;
2384
2385 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2386 if (child->info.exists &&
2387 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2388 continue;
2389
b4f1578f 2390 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2391 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2392 return_0;
165e4a11 2393
165e4a11 2394 /* FIXME Cope if name exists with no uuid? */
3d6782b3
ZK
2395 if (!child->info.exists && !_create_node(child))
2396 return_0;
165e4a11 2397
3d6782b3
ZK
2398 if (!child->info.inactive_table &&
2399 child->props.segment_count &&
2400 !_load_node(child))
2401 return_0;
165e4a11 2402
eb91c4ee
MB
2403 /* Propagate device size change change */
2404 if (child->props.size_changed)
2405 dnode->props.size_changed = 1;
2406
bb875bb9 2407 /* Resume device immediately if it has parents and its size changed */
3776c494 2408 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2409 continue;
2410
7707ea90
AK
2411 if (!child->info.inactive_table && !child->info.suspended)
2412 continue;
2413
fc795d87 2414 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2415 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2416 &newinfo, &child->dtree->cookie, child->udev_flags,
2417 child->info.suspended)) {
165e4a11 2418 log_error("Unable to resume %s (%" PRIu32
fc795d87 2419 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2420 child->info.minor);
2ca6b865 2421 r = 0;
165e4a11
AK
2422 continue;
2423 }
2424
2425 /* Update cached info */
2426 child->info = newinfo;
bbcd37e4
ZK
2427 if (child->props.send_messages &&
2428 !(r = _node_send_messages(child, uuid_prefix, uuid_prefix_len))) {
2429 stack;
2430 continue;
2431 }
566515c0
PR
2432 /*
2433 * Prepare for immediate synchronization with udev and flush all stacked
2434 * dev node operations if requested by immediate_dev_node property. But
2435 * finish processing current level in the tree first.
2436 */
2437 if (child->props.immediate_dev_node)
2438 update_devs_flag = 1;
165e4a11
AK
2439 }
2440
bbcd37e4
ZK
2441 if (r && dnode->props.send_messages &&
2442 !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
2443 stack;
165e4a11 2444
566515c0
PR
2445 if (update_devs_flag) {
2446 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2447 stack;
2448 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2449 }
2450
11f64f0a 2451 if (r && !_node_send_messages(dnode, uuid_prefix, uuid_prefix_len)) {
25e6ab87
ZK
2452 stack;
2453 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
2454 log_error("Failed to deactivate %s", dnode->name);
2455 r = 0;
2456 }
2457
2ca6b865 2458 return r;
165e4a11
AK
2459}
2460
165e4a11
AK
2461/*
2462 * Returns 1 if unsure.
2463 */
b4f1578f 2464int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2465 const char *uuid_prefix,
2466 size_t uuid_prefix_len)
2467{
2468 void *handle = NULL;
b4f1578f 2469 struct dm_tree_node *child = dnode;
165e4a11
AK
2470 const char *uuid;
2471
b4f1578f
AK
2472 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2473 if (!(uuid = dm_tree_node_get_uuid(child))) {
2474 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2475 return 1;
2476 }
2477
87f98002 2478 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2479 return 1;
2480
b4f1578f
AK
2481 if (dm_tree_node_num_children(child, 0))
2482 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2483 }
2484
2485 return 0;
2486}
2487
2488/*
2489 * Target functions
2490 */
b4f1578f 2491static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2492{
2493 struct load_segment *seg;
2494
b4f1578f
AK
2495 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2496 log_error("dtree node segment allocation failed");
165e4a11
AK
2497 return NULL;
2498 }
2499
2500 seg->type = type;
2501 seg->size = size;
2502 seg->area_count = 0;
2c44337b 2503 dm_list_init(&seg->areas);
165e4a11
AK
2504 seg->stripe_size = 0;
2505 seg->persistent = 0;
2506 seg->chunk_size = 0;
2507 seg->cow = NULL;
2508 seg->origin = NULL;
aa6f4e51 2509 seg->merge = NULL;
165e4a11 2510
2c44337b 2511 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2512 dnode->props.segment_count++;
2513
2514 return seg;
2515}
2516
b4f1578f 2517int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2518 uint64_t size,
2519 const char *origin_uuid)
165e4a11
AK
2520{
2521 struct load_segment *seg;
b4f1578f 2522 struct dm_tree_node *origin_node;
165e4a11 2523
b4f1578f
AK
2524 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2525 return_0;
165e4a11 2526
b4f1578f 2527 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2528 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2529 return 0;
2530 }
2531
2532 seg->origin = origin_node;
b4f1578f
AK
2533 if (!_link_tree_nodes(dnode, origin_node))
2534 return_0;
165e4a11 2535
56c28292
AK
2536 /* Resume snapshot origins after new snapshots */
2537 dnode->activation_priority = 1;
2538
165e4a11
AK
2539 return 1;
2540}
2541
aa6f4e51
MS
2542static int _add_snapshot_target(struct dm_tree_node *node,
2543 uint64_t size,
2544 const char *origin_uuid,
2545 const char *cow_uuid,
2546 const char *merge_uuid,
2547 int persistent,
2548 uint32_t chunk_size)
165e4a11
AK
2549{
2550 struct load_segment *seg;
aa6f4e51
MS
2551 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2552 unsigned seg_type;
2553
2554 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2555
aa6f4e51 2556 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2557 return_0;
165e4a11 2558
b4f1578f 2559 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2560 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2561 return 0;
2562 }
2563
2564 seg->origin = origin_node;
b4f1578f
AK
2565 if (!_link_tree_nodes(node, origin_node))
2566 return_0;
165e4a11 2567
b4f1578f 2568 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2569 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2570 return 0;
2571 }
2572
2573 seg->cow = cow_node;
b4f1578f
AK
2574 if (!_link_tree_nodes(node, cow_node))
2575 return_0;
165e4a11
AK
2576
2577 seg->persistent = persistent ? 1 : 0;
2578 seg->chunk_size = chunk_size;
2579
aa6f4e51
MS
2580 if (merge_uuid) {
2581 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2582 /* not a pure error, merging snapshot may have been deactivated */
2583 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2584 } else {
2585 seg->merge = merge_node;
2586 /* must not link merging snapshot, would undermine activation_priority below */
2587 }
2588
2589 /* Resume snapshot-merge (acting origin) after other snapshots */
2590 node->activation_priority = 1;
2591 if (seg->merge) {
2592 /* Resume merging snapshot after snapshot-merge */
2593 seg->merge->activation_priority = 2;
2594 }
2595 }
2596
165e4a11
AK
2597 return 1;
2598}
2599
aa6f4e51
MS
2600
2601int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2602 uint64_t size,
2603 const char *origin_uuid,
2604 const char *cow_uuid,
2605 int persistent,
2606 uint32_t chunk_size)
2607{
2608 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2609 NULL, persistent, chunk_size);
2610}
2611
2612int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2613 uint64_t size,
2614 const char *origin_uuid,
2615 const char *cow_uuid,
2616 const char *merge_uuid,
2617 uint32_t chunk_size)
2618{
2619 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2620 merge_uuid, 1, chunk_size);
2621}
2622
b4f1578f 2623int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2624 uint64_t size)
165e4a11 2625{
b4f1578f
AK
2626 if (!_add_segment(node, SEG_ERROR, size))
2627 return_0;
165e4a11
AK
2628
2629 return 1;
2630}
2631
b4f1578f 2632int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2633 uint64_t size)
165e4a11 2634{
b4f1578f
AK
2635 if (!_add_segment(node, SEG_ZERO, size))
2636 return_0;
165e4a11
AK
2637
2638 return 1;
2639}
2640
b4f1578f 2641int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2642 uint64_t size)
165e4a11 2643{
b4f1578f
AK
2644 if (!_add_segment(node, SEG_LINEAR, size))
2645 return_0;
165e4a11
AK
2646
2647 return 1;
2648}
2649
b4f1578f 2650int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2651 uint64_t size,
2652 uint32_t stripe_size)
165e4a11
AK
2653{
2654 struct load_segment *seg;
2655
b4f1578f
AK
2656 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2657 return_0;
165e4a11
AK
2658
2659 seg->stripe_size = stripe_size;
2660
2661 return 1;
2662}
2663
12ca060e
MB
2664int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2665 uint64_t size,
2666 const char *cipher,
2667 const char *chainmode,
2668 const char *iv,
2669 uint64_t iv_offset,
2670 const char *key)
2671{
2672 struct load_segment *seg;
2673
2674 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2675 return_0;
2676
2677 seg->cipher = cipher;
2678 seg->chainmode = chainmode;
2679 seg->iv = iv;
2680 seg->iv_offset = iv_offset;
2681 seg->key = key;
2682
2683 return 1;
2684}
2685
b4f1578f 2686int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2687 uint32_t region_size,
08e64ce5 2688 unsigned clustered,
165e4a11 2689 const char *log_uuid,
ce7ed2c0
AK
2690 unsigned area_count,
2691 uint32_t flags)
165e4a11 2692{
908db078 2693 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2694 struct load_segment *seg;
2695
2696 if (!node->props.segment_count) {
b8175c33 2697 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2698 return 0;
2699 }
2700
2c44337b 2701 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2702
24b026e3 2703 if (log_uuid) {
67b25ed4
AK
2704 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2705 log_error("log uuid pool_strdup failed");
2706 return 0;
2707 }
df390f17
AK
2708 if ((flags & DM_CORELOG))
2709 /* For pvmove: immediate resume (for size validation) isn't needed. */
2710 node->props.delay_resume_if_new = 1;
2711 else {
9723090c
AK
2712 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2713 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2714 return 0;
2715 }
2716
566515c0
PR
2717 if (clustered)
2718 log_node->props.immediate_dev_node = 1;
2719
0a99713e
AK
2720 /* The kernel validates the size of disk logs. */
2721 /* FIXME Propagate to any devices below */
2722 log_node->props.delay_resume_if_new = 0;
2723
9723090c
AK
2724 if (!_link_tree_nodes(node, log_node))
2725 return_0;
2726 }
165e4a11
AK
2727 }
2728
2729 seg->log = log_node;
165e4a11
AK
2730 seg->region_size = region_size;
2731 seg->clustered = clustered;
2732 seg->mirror_area_count = area_count;
dbcb64b8 2733 seg->flags = flags;
165e4a11
AK
2734
2735 return 1;
2736}
2737
b4f1578f 2738int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2739 uint64_t size)
165e4a11 2740{
cbecd3cd 2741 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2742 return_0;
165e4a11
AK
2743
2744 return 1;
2745}
2746
cac52ca4
JEB
2747int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2748 uint64_t size,
2749 const char *raid_type,
2750 uint32_t region_size,
2751 uint32_t stripe_size,
f439e65b 2752 uint64_t rebuilds,
cac52ca4
JEB
2753 uint64_t reserved2)
2754{
2755 int i;
2756 struct load_segment *seg = NULL;
2757
2758 for (i = 0; dm_segtypes[i].target && !seg; i++)
2759 if (!strcmp(raid_type, dm_segtypes[i].target))
2760 if (!(seg = _add_segment(node,
2761 dm_segtypes[i].type, size)))
2762 return_0;
2763
b2fa9b43
JEB
2764 if (!seg)
2765 return_0;
2766
cac52ca4
JEB
2767 seg->region_size = region_size;
2768 seg->stripe_size = stripe_size;
2769 seg->area_count = 0;
f439e65b 2770 seg->rebuilds = rebuilds;
cac52ca4
JEB
2771
2772 return 1;
2773}
2774
b262f3e1
ZK
2775int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2776 uint64_t size,
2777 const char *rlog_uuid,
2778 const char *rlog_type,
2779 unsigned rsite_index,
2780 dm_replicator_mode_t mode,
2781 uint32_t async_timeout,
2782 uint64_t fall_behind_data,
2783 uint32_t fall_behind_ios)
2784{
2785 struct load_segment *rseg;
2786 struct replicator_site *rsite;
2787
2788 /* Local site0 - adds replicator segment and links rlog device */
2789 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2790 if (node->props.segment_count) {
2791 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2792 return 0;
2793 }
2794
2795 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2796 return_0;
2797
2798 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2799 log_error("Missing replicator log uuid %s.", rlog_uuid);
2800 return 0;
2801 }
2802
2803 if (!_link_tree_nodes(node, rseg->log))
2804 return_0;
2805
2806 if (strcmp(rlog_type, "ringbuffer") != 0) {
2807 log_error("Unsupported replicator log type %s.", rlog_type);
2808 return 0;
2809 }
2810
2811 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2812 return_0;
2813
2814 dm_list_init(&rseg->rsites);
2815 rseg->rdevice_count = 0;
2816 node->activation_priority = 1;
2817 }
2818
2819 /* Add site to segment */
2820 if (mode == DM_REPLICATOR_SYNC
2821 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2822 log_error("Async parameters passed for synchronnous replicator.");
2823 return 0;
2824 }
2825
2826 if (node->props.segment_count != 1) {
2827 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2828 return 0;
2829 }
2830
2831 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2832 if (rseg->type != SEG_REPLICATOR) {
2833 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2834 dm_segtypes[rseg->type].target);
2835 return 0;
2836 }
2837
2838 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2839 log_error("Failed to allocate remote site segment.");
2840 return 0;
2841 }
2842
2843 dm_list_add(&rseg->rsites, &rsite->list);
2844 rseg->rsite_count++;
2845
2846 rsite->mode = mode;
2847 rsite->async_timeout = async_timeout;
2848 rsite->fall_behind_data = fall_behind_data;
2849 rsite->fall_behind_ios = fall_behind_ios;
2850 rsite->rsite_index = rsite_index;
2851
2852 return 1;
2853}
2854
2855/* Appends device node to Replicator */
2856int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2857 uint64_t size,
2858 const char *replicator_uuid,
2859 uint64_t rdevice_index,
2860 const char *rdev_uuid,
2861 unsigned rsite_index,
2862 const char *slog_uuid,
2863 uint32_t slog_flags,
2864 uint32_t slog_region_size)
2865{
2866 struct seg_area *area;
2867 struct load_segment *rseg;
2868 struct load_segment *rep_seg;
2869
2870 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2871 /* Site index for local target */
2872 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2873 return_0;
2874
2875 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2876 log_error("Missing replicator uuid %s.", replicator_uuid);
2877 return 0;
2878 }
2879
2880 /* Local slink0 for replicator must be always initialized first */
2881 if (rseg->replicator->props.segment_count != 1) {
2882 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2883 return 0;
2884 }
2885
2886 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2887 if (rep_seg->type != SEG_REPLICATOR) {
2888 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2889 dm_segtypes[rep_seg->type].target);
2890 return 0;
2891 }
2892 rep_seg->rdevice_count++;
2893
2894 if (!_link_tree_nodes(node, rseg->replicator))
2895 return_0;
2896
2897 rseg->rdevice_index = rdevice_index;
2898 } else {
2899 /* Local slink0 for replicator must be always initialized first */
2900 if (node->props.segment_count != 1) {
2901 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2902 return 0;
2903 }
2904
2905 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2906 if (rseg->type != SEG_REPLICATOR_DEV) {
2907 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2908 dm_segtypes[rseg->type].target);
2909 return 0;
2910 }
2911 }
2912
2913 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2914 log_error("Unspecified sync log uuid.");
2915 return 0;
2916 }
2917
2918 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2919 return_0;
2920
2921 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2922
2923 if (!(slog_flags & DM_CORELOG)) {
2924 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2925 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2926 return 0;
2927 }
2928
2929 if (!_link_tree_nodes(node, area->slog))
2930 return_0;
2931 }
2932
2933 area->flags = slog_flags;
2934 area->region_size = slog_region_size;
2935 area->rsite_index = rsite_index;
2936
2937 return 1;
2938}
2939
5668fe04
ZK
2940static int _thin_validate_device_id(uint32_t device_id)
2941{
2942 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2943 log_error("Device id %u is higher then %u.",
2944 device_id, DM_THIN_MAX_DEVICE_ID);
2945 return 0;
2946 }
2947
2948 return 1;
2949}
2950
4251236e
ZK
2951int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2952 uint64_t size,
e0ea24be 2953 uint64_t transaction_id,
4251236e 2954 const char *metadata_uuid,
5668fd6a 2955 const char *pool_uuid,
4251236e 2956 uint32_t data_block_size,
e9156c2b 2957 uint64_t low_water_mark,
460c5991 2958 unsigned skip_block_zeroing)
4251236e
ZK
2959{
2960 struct load_segment *seg;
2961
3f53c059 2962 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2963 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2964 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2965 return 0;
2966 }
2967
3f53c059 2968 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2969 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2970 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2971 return 0;
2972 }
2973
2974 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2975 return_0;
2976
2977 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2978 log_error("Missing metadata uuid %s.", metadata_uuid);
2979 return 0;
2980 }
2981
2982 if (!_link_tree_nodes(node, seg->metadata))
2983 return_0;
2984
2985 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2986 log_error("Missing pool uuid %s.", pool_uuid);
2987 return 0;
2988 }
2989
2990 if (!_link_tree_nodes(node, seg->pool))
2991 return_0;
2992
bbcd37e4
ZK
2993 node->props.send_messages = 1;
2994 seg->transaction_id = transaction_id;
e9156c2b 2995 seg->low_water_mark = low_water_mark;
e0ea24be 2996 seg->data_block_size = data_block_size;
460c5991 2997 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
2998 dm_list_init(&seg->thin_messages);
2999
3000 return 1;
3001}
3002
3003int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
2e732e96
ZK
3004 dm_thin_message_t type,
3005 uint64_t id1, uint64_t id2)
25e6ab87
ZK
3006{
3007 struct load_segment *seg;
3008 struct thin_message *tm;
3009
3010 if (node->props.segment_count != 1) {
759b9592 3011 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
3012 return 0;
3013 }
3014
3015 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 3016 if (seg->type != SEG_THIN_POOL) {
759b9592 3017 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
3018 dm_segtypes[seg->type].target);
3019 return 0;
3020 }
3021
3022 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
3023 log_error("Failed to allocate thin message.");
3024 return 0;
3025 }
3026
2e732e96 3027 switch (type) {
25e6ab87 3028 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 3029 /* If the thin origin is active, it must be suspend first! */
2e732e96 3030 if (id1 == id2) {
759b9592 3031 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
3032 return 0;
3033 }
2e732e96
ZK
3034 if (!_thin_validate_device_id(id1) ||
3035 !_thin_validate_device_id(id2))
25e6ab87 3036 return_0;
2e732e96
ZK
3037 tm->message.u.m_create_snap.device_id = id1;
3038 tm->message.u.m_create_snap.origin_id = id2;
25e6ab87
ZK
3039 break;
3040 case DM_THIN_MESSAGE_CREATE_THIN:
2e732e96 3041 if (!_thin_validate_device_id(id1))
25e6ab87 3042 return_0;
2e732e96 3043 tm->message.u.m_create_thin.device_id = id1;
660a42bc 3044 tm->expected_errno = EEXIST;
25e6ab87
ZK
3045 break;
3046 case DM_THIN_MESSAGE_DELETE:
2e732e96 3047 if (!_thin_validate_device_id(id1))
25e6ab87 3048 return_0;
2e732e96 3049 tm->message.u.m_delete.device_id = id1;
660a42bc 3050 tm->expected_errno = ENODATA;
25e6ab87
ZK
3051 break;
3052 case DM_THIN_MESSAGE_TRIM:
2e732e96 3053 if (!_thin_validate_device_id(id1))
25e6ab87 3054 return_0;
2e732e96
ZK
3055 tm->message.u.m_trim.device_id = id1;
3056 tm->message.u.m_trim.new_size = id2;
25e6ab87
ZK
3057 break;
3058 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
19e3f8c3 3059 if ((id1 + 1) != id2) {
2e732e96
ZK
3060 log_error("New transaction id must be sequential.");
3061 return 0; /* FIXME: Maybe too strict here? */
3062 }
19e3f8c3 3063 if (id2 != seg->transaction_id) {
2e732e96 3064 log_error("Current transaction id is different from thin pool.");
25e6ab87
ZK
3065 return 0; /* FIXME: Maybe too strict here? */
3066 }
2e732e96
ZK
3067 tm->message.u.m_set_transaction_id.current_id = id1;
3068 tm->message.u.m_set_transaction_id.new_id = id2;
25e6ab87
ZK
3069 break;
3070 default:
2e732e96 3071 log_error("Unsupported message type %d.", (int) type);
25e6ab87
ZK
3072 return 0;
3073 }
3074
2e732e96 3075 tm->message.type = type;
25e6ab87 3076 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
3077
3078 return 1;
3079}
3080
3081int dm_tree_node_add_thin_target(struct dm_tree_node *node,
3082 uint64_t size,
4d25c81b 3083 const char *pool_uuid,
4251236e
ZK
3084 uint32_t device_id)
3085{
4d25c81b 3086 struct dm_tree_node *pool;
4251236e
ZK
3087 struct load_segment *seg;
3088
4d25c81b
ZK
3089 if (!(pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
3090 log_error("Missing thin pool uuid %s.", pool_uuid);
4251236e
ZK
3091 return 0;
3092 }
3093
4d25c81b 3094 if (!_link_tree_nodes(node, pool))
4251236e
ZK
3095 return_0;
3096
6744c143
ZK
3097 if (!_thin_validate_device_id(device_id))
3098 return_0;
4d25c81b 3099
6744c143
ZK
3100 if (!(seg = _add_segment(node, SEG_THIN, size)))
3101 return_0;
4d25c81b 3102
6744c143
ZK
3103 seg->pool = pool;
3104 seg->device_id = device_id;
1419bf1c 3105
4251236e
ZK
3106 return 1;
3107}
3108
077c4d1a
ZK
3109
3110int dm_get_status_thin_pool(struct dm_pool *mem, const char *params,
3111 struct dm_status_thin_pool **status)
3112{
3113 struct dm_status_thin_pool *s;
3114
3115 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin_pool)))) {
3116 log_error("Failed to allocate thin_pool status structure.");
3117 return 0;
3118 }
3119
3120 if (sscanf(params, "%" PRIu64 " %" PRIu64 "/%" PRIu64 " %" PRIu64 "/%" PRIu64,
3121 &s->transaction_id,
3122 &s->used_meta_blocks,
3123 &s->total_meta_blocks,
3124 &s->used_data_blocks,
3125 &s->total_data_blocks) != 5) {
3126 log_error("Failed to parse thin pool params: %s.", params);
3127 return 0;
3128 }
3129
3130 *status = s;
3131
3132 return 1;
3133}
3134
3135int dm_get_status_thin(struct dm_pool *mem, const char *params,
3136 struct dm_status_thin **status)
3137{
3138 struct dm_status_thin *s;
3139
3140 if (!(s = dm_pool_zalloc(mem, sizeof(struct dm_status_thin)))) {
3141 log_error("Failed to allocate thin status structure.");
3142 return 0;
3143 }
3144
3145 if (sscanf(params, "%" PRIu64 " %" PRIu64,
3146 &s->mapped_sectors,
3147 &s->highest_mapped_sector) != 2) {
3148 log_error("Failed to parse thin params: %s.", params);
3149 return 0;
3150 }
3151
3152 *status = s;
3153
3154 return 1;
3155}
3156
b4f1578f 3157static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
3158{
3159 struct seg_area *area;
3160
b4f1578f 3161 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
3162 log_error("Failed to allocate target segment area.");
3163 return 0;
3164 }
3165
3166 area->dev_node = dev_node;
3167 area->offset = offset;
3168
2c44337b 3169 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
3170 seg->area_count++;
3171
3172 return 1;
3173}
3174
b4f1578f 3175int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
3176 const char *dev_name,
3177 const char *uuid,
3178 uint64_t offset)
165e4a11
AK
3179{
3180 struct load_segment *seg;
3181 struct stat info;
b4f1578f 3182 struct dm_tree_node *dev_node;
165e4a11
AK
3183
3184 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3185 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3186 return 0;
3187 }
3188
3189 if (uuid) {
b4f1578f 3190 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3191 log_error("Couldn't find area uuid %s.", uuid);
3192 return 0;
3193 }
b4f1578f
AK
3194 if (!_link_tree_nodes(node, dev_node))
3195 return_0;
165e4a11 3196 } else {
6d04311e 3197 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3198 log_error("Device %s not found.", dev_name);
3199 return 0;
3200 }
3201
40e5fd8b 3202 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3203 log_error("Device %s is not a block device.", dev_name);
3204 return 0;
3205 }
3206
3207 /* FIXME Check correct macro use */
cda69e17
PR
3208 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3209 MINOR(info.st_rdev), 0)))
b4f1578f 3210 return_0;
165e4a11
AK
3211 }
3212
3213 if (!node->props.segment_count) {
b8175c33 3214 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3215 return 0;
3216 }
3217
2c44337b 3218 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3219
b4f1578f
AK
3220 if (!_add_area(node, seg, dev_node, offset))
3221 return_0;
165e4a11
AK
3222
3223 return 1;
db208f51 3224}
bd90c6b2 3225
6d04311e
JEB
3226int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3227{
3228 struct load_segment *seg;
3229
3230 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3231
415c0690
AK
3232 switch (seg->type) {
3233 case SEG_RAID1:
3234 case SEG_RAID4:
3235 case SEG_RAID5_LA:
3236 case SEG_RAID5_RA:
3237 case SEG_RAID5_LS:
3238 case SEG_RAID5_RS:
3239 case SEG_RAID6_ZR:
3240 case SEG_RAID6_NR:
3241 case SEG_RAID6_NC:
3242 break;
3243 default:
3244 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3245 return 0;
3246 }
3247
6d04311e
JEB
3248 if (!_add_area(node, seg, NULL, offset))
3249 return_0;
3250
3251 return 1;
3252}
3253
bd90c6b2
AK
3254void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
3255{
3256 node->dtree->cookie = cookie;
3257}
3258
3259uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
3260{
3261 return node->dtree->cookie;
3262}
This page took 0.5205 seconds and 5 git commands to generate.