]> sourceware.org Git - lvm2.git/blame - libdm/libdm-deptree.c
More updates for lvcreate manpage
[lvm2.git] / libdm / libdm-deptree.c
CommitLineData
3d0480ed 1/*
4251236e 2 * Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
3d0480ed
AK
3 *
4 * This file is part of the device-mapper userspace tools.
5 *
6 * This copyrighted material is made available to anyone wishing to use,
7 * modify, copy, or redistribute it subject to the terms and conditions
8 * of the GNU Lesser General Public License v.2.1.
9 *
10 * You should have received a copy of the GNU Lesser General Public License
11 * along with this program; if not, write to the Free Software Foundation,
12 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
13 */
14
3e5b6ed2 15#include "dmlib.h"
3d0480ed
AK
16#include "libdm-targets.h"
17#include "libdm-common.h"
3d0480ed 18#include "kdev_t.h"
0782ad50 19#include "dm-ioctl.h"
3d0480ed
AK
20
21#include <stdarg.h>
22#include <sys/param.h>
8f26e18c 23#include <sys/utsname.h>
3d0480ed 24
165e4a11
AK
25#define MAX_TARGET_PARAMSIZE 500000
26
87f98002
AK
27/* FIXME Fix interface so this is used only by LVM */
28#define UUID_PREFIX "LVM-"
29
b262f3e1
ZK
30#define REPLICATOR_LOCAL_SITE 0
31
165e4a11
AK
32/* Supported segment types */
33enum {
12ca060e
MB
34 SEG_CRYPT,
35 SEG_ERROR,
165e4a11
AK
36 SEG_LINEAR,
37 SEG_MIRRORED,
b262f3e1
ZK
38 SEG_REPLICATOR,
39 SEG_REPLICATOR_DEV,
165e4a11
AK
40 SEG_SNAPSHOT,
41 SEG_SNAPSHOT_ORIGIN,
aa6f4e51 42 SEG_SNAPSHOT_MERGE,
165e4a11
AK
43 SEG_STRIPED,
44 SEG_ZERO,
4251236e
ZK
45 SEG_THIN_POOL,
46 SEG_THIN,
cac52ca4
JEB
47 SEG_RAID1,
48 SEG_RAID4,
49 SEG_RAID5_LA,
50 SEG_RAID5_RA,
51 SEG_RAID5_LS,
52 SEG_RAID5_RS,
53 SEG_RAID6_ZR,
54 SEG_RAID6_NR,
55 SEG_RAID6_NC,
56 SEG_LAST,
165e4a11 57};
b4f1578f 58
165e4a11
AK
59/* FIXME Add crypt and multipath support */
60
61struct {
62 unsigned type;
63 const char *target;
64} dm_segtypes[] = {
12ca060e 65 { SEG_CRYPT, "crypt" },
165e4a11
AK
66 { SEG_ERROR, "error" },
67 { SEG_LINEAR, "linear" },
68 { SEG_MIRRORED, "mirror" },
b262f3e1
ZK
69 { SEG_REPLICATOR, "replicator" },
70 { SEG_REPLICATOR_DEV, "replicator-dev" },
165e4a11
AK
71 { SEG_SNAPSHOT, "snapshot" },
72 { SEG_SNAPSHOT_ORIGIN, "snapshot-origin" },
aa6f4e51 73 { SEG_SNAPSHOT_MERGE, "snapshot-merge" },
165e4a11
AK
74 { SEG_STRIPED, "striped" },
75 { SEG_ZERO, "zero"},
4251236e
ZK
76 { SEG_THIN_POOL, "thin-pool"},
77 { SEG_THIN, "thin"},
cac52ca4
JEB
78 { SEG_RAID1, "raid1"},
79 { SEG_RAID4, "raid4"},
80 { SEG_RAID5_LA, "raid5_la"},
81 { SEG_RAID5_RA, "raid5_ra"},
82 { SEG_RAID5_LS, "raid5_ls"},
83 { SEG_RAID5_RS, "raid5_rs"},
84 { SEG_RAID6_ZR, "raid6_zr"},
85 { SEG_RAID6_NR, "raid6_nr"},
86 { SEG_RAID6_NC, "raid6_nc"},
ee05be08
ZK
87
88 /*
89 *WARNING: Since 'raid' target overloads this 1:1 mapping table
90 * for search do not add new enum elements past them!
91 */
cac52ca4
JEB
92 { SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
93 { SEG_RAID6_ZR, "raid6"}, /* same as "raid6_zr" */
94 { SEG_LAST, NULL },
165e4a11
AK
95};
96
97/* Some segment types have a list of areas of other devices attached */
98struct seg_area {
2c44337b 99 struct dm_list list;
165e4a11 100
b4f1578f 101 struct dm_tree_node *dev_node;
165e4a11
AK
102
103 uint64_t offset;
b262f3e1
ZK
104
105 unsigned rsite_index; /* Replicator site index */
106 struct dm_tree_node *slog; /* Replicator sync log node */
107 uint64_t region_size; /* Replicator sync log size */
108 uint32_t flags; /* Replicator sync log flags */
109};
110
25e6ab87
ZK
111struct thin_message {
112 struct dm_list list;
113 struct dm_thin_message message;
660a42bc 114 int expected_errno;
25e6ab87
ZK
115};
116
b262f3e1
ZK
117/* Replicator-log has a list of sites */
118/* FIXME: maybe move to seg_area too? */
119struct replicator_site {
120 struct dm_list list;
121
122 unsigned rsite_index;
123 dm_replicator_mode_t mode;
124 uint32_t async_timeout;
125 uint32_t fall_behind_ios;
126 uint64_t fall_behind_data;
165e4a11
AK
127};
128
129/* Per-segment properties */
130struct load_segment {
2c44337b 131 struct dm_list list;
165e4a11
AK
132
133 unsigned type;
134
135 uint64_t size;
136
b262f3e1
ZK
137 unsigned area_count; /* Linear + Striped + Mirrored + Crypt + Replicator */
138 struct dm_list areas; /* Linear + Striped + Mirrored + Crypt + Replicator */
165e4a11 139
cac52ca4 140 uint32_t stripe_size; /* Striped + raid */
165e4a11
AK
141
142 int persistent; /* Snapshot */
143 uint32_t chunk_size; /* Snapshot */
b4f1578f
AK
144 struct dm_tree_node *cow; /* Snapshot */
145 struct dm_tree_node *origin; /* Snapshot + Snapshot origin */
aa6f4e51 146 struct dm_tree_node *merge; /* Snapshot */
165e4a11 147
b262f3e1 148 struct dm_tree_node *log; /* Mirror + Replicator */
cac52ca4 149 uint32_t region_size; /* Mirror + raid */
165e4a11
AK
150 unsigned clustered; /* Mirror */
151 unsigned mirror_area_count; /* Mirror */
dbcb64b8 152 uint32_t flags; /* Mirror log */
67b25ed4 153 char *uuid; /* Clustered mirror log */
12ca060e
MB
154
155 const char *cipher; /* Crypt */
156 const char *chainmode; /* Crypt */
157 const char *iv; /* Crypt */
158 uint64_t iv_offset; /* Crypt */
159 const char *key; /* Crypt */
b262f3e1
ZK
160
161 const char *rlog_type; /* Replicator */
162 struct dm_list rsites; /* Replicator */
163 unsigned rsite_count; /* Replicator */
164 unsigned rdevice_count; /* Replicator */
165 struct dm_tree_node *replicator;/* Replicator-dev */
166 uint64_t rdevice_index; /* Replicator-dev */
f439e65b 167
40e5fd8b 168 uint64_t rebuilds; /* raid */
4251236e
ZK
169
170 struct dm_tree_node *metadata; /* Thin_pool */
171 struct dm_tree_node *pool; /* Thin_pool, Thin */
25e6ab87 172 struct dm_list thin_messages; /* Thin_pool */
bbcd37e4 173 uint64_t transaction_id; /* Thin_pool */
e9156c2b 174 uint64_t low_water_mark; /* Thin_pool */
e0ea24be 175 uint32_t data_block_size; /* Thin_pool */
460c5991 176 unsigned skip_block_zeroing; /* Thin_pool */
4251236e
ZK
177 uint32_t device_id; /* Thin */
178
165e4a11
AK
179};
180
181/* Per-device properties */
182struct load_properties {
183 int read_only;
184 uint32_t major;
185 uint32_t minor;
186
52b84409
AK
187 uint32_t read_ahead;
188 uint32_t read_ahead_flags;
189
165e4a11 190 unsigned segment_count;
bb875bb9 191 unsigned size_changed;
2c44337b 192 struct dm_list segs;
165e4a11
AK
193
194 const char *new_name;
566515c0
PR
195
196 /* If immediate_dev_node is set to 1, try to create the dev node
197 * as soon as possible (e.g. in preload stage even during traversal
198 * and processing of dm tree). This will also flush all stacked dev
199 * node operations, synchronizing with udev.
200 */
df390f17
AK
201 unsigned immediate_dev_node;
202
203 /*
204 * If the device size changed from zero and this is set,
205 * don't resume the device immediately, even if the device
206 * has parents. This works provided the parents do not
207 * validate the device size and is required by pvmove to
208 * avoid starting the mirror resync operation too early.
209 */
210 unsigned delay_resume_if_new;
bbcd37e4
ZK
211
212 /* Send messages for this node in preload */
213 unsigned send_messages;
165e4a11
AK
214};
215
216/* Two of these used to join two nodes with uses and used_by. */
b4f1578f 217struct dm_tree_link {
2c44337b 218 struct dm_list list;
b4f1578f 219 struct dm_tree_node *node;
165e4a11
AK
220};
221
b4f1578f
AK
222struct dm_tree_node {
223 struct dm_tree *dtree;
3d0480ed 224
40e5fd8b
AK
225 const char *name;
226 const char *uuid;
227 struct dm_info info;
3d0480ed 228
40e5fd8b
AK
229 struct dm_list uses; /* Nodes this node uses */
230 struct dm_list used_by; /* Nodes that use this node */
165e4a11 231
56c28292
AK
232 int activation_priority; /* 0 gets activated first */
233
f16aea9e
PR
234 uint16_t udev_flags; /* Udev control flags */
235
165e4a11
AK
236 void *context; /* External supplied context */
237
238 struct load_properties props; /* For creation/table (re)load */
76d1aec8
ZK
239
240 /*
241 * If presuspend of child node is needed
242 * Note: only direct child is allowed
243 */
244 struct dm_tree_node *presuspend_node;
3d0480ed
AK
245};
246
b4f1578f 247struct dm_tree {
a3f6b2ce
AK
248 struct dm_pool *mem;
249 struct dm_hash_table *devs;
165e4a11 250 struct dm_hash_table *uuids;
b4f1578f 251 struct dm_tree_node root;
c55b1410 252 int skip_lockfs; /* 1 skips lockfs (for non-snapshots) */
787200ef
PR
253 int no_flush; /* 1 sets noflush (mirrors/multipath) */
254 int retry_remove; /* 1 retries remove if not successful */
bd90c6b2 255 uint32_t cookie;
3d0480ed
AK
256};
257
b4f1578f 258struct dm_tree *dm_tree_create(void)
3d0480ed 259{
0395dd22 260 struct dm_pool *dmem;
b4f1578f 261 struct dm_tree *dtree;
3d0480ed 262
0395dd22
ZK
263 if (!(dmem = dm_pool_create("dtree", 1024)) ||
264 !(dtree = dm_pool_zalloc(dmem, sizeof(*dtree)))) {
265 log_error("Failed to allocate dtree.");
266 if (dmem)
267 dm_pool_destroy(dmem);
3d0480ed
AK
268 return NULL;
269 }
270
b4f1578f 271 dtree->root.dtree = dtree;
2c44337b
AK
272 dm_list_init(&dtree->root.uses);
273 dm_list_init(&dtree->root.used_by);
c55b1410 274 dtree->skip_lockfs = 0;
b9ffd32c 275 dtree->no_flush = 0;
0395dd22 276 dtree->mem = dmem;
3d0480ed 277
b4f1578f
AK
278 if (!(dtree->devs = dm_hash_create(8))) {
279 log_error("dtree hash creation failed");
280 dm_pool_destroy(dtree->mem);
3d0480ed
AK
281 return NULL;
282 }
283
b4f1578f
AK
284 if (!(dtree->uuids = dm_hash_create(32))) {
285 log_error("dtree uuid hash creation failed");
286 dm_hash_destroy(dtree->devs);
287 dm_pool_destroy(dtree->mem);
165e4a11
AK
288 return NULL;
289 }
290
b4f1578f 291 return dtree;
3d0480ed
AK
292}
293
b4f1578f 294void dm_tree_free(struct dm_tree *dtree)
3d0480ed 295{
b4f1578f 296 if (!dtree)
3d0480ed
AK
297 return;
298
b4f1578f
AK
299 dm_hash_destroy(dtree->uuids);
300 dm_hash_destroy(dtree->devs);
301 dm_pool_destroy(dtree->mem);
3d0480ed
AK
302}
303
04bde319
ZK
304static int _nodes_are_linked(const struct dm_tree_node *parent,
305 const struct dm_tree_node *child)
3d0480ed 306{
b4f1578f 307 struct dm_tree_link *dlink;
3d0480ed 308
2c44337b 309 dm_list_iterate_items(dlink, &parent->uses)
3d0480ed
AK
310 if (dlink->node == child)
311 return 1;
3d0480ed
AK
312
313 return 0;
314}
315
2c44337b 316static int _link(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 317{
b4f1578f 318 struct dm_tree_link *dlink;
3d0480ed 319
b4f1578f
AK
320 if (!(dlink = dm_pool_alloc(node->dtree->mem, sizeof(*dlink)))) {
321 log_error("dtree link allocation failed");
3d0480ed
AK
322 return 0;
323 }
324
325 dlink->node = node;
2c44337b 326 dm_list_add(list, &dlink->list);
3d0480ed
AK
327
328 return 1;
329}
330
b4f1578f
AK
331static int _link_nodes(struct dm_tree_node *parent,
332 struct dm_tree_node *child)
3d0480ed
AK
333{
334 if (_nodes_are_linked(parent, child))
335 return 1;
336
337 if (!_link(&parent->uses, child))
338 return 0;
339
340 if (!_link(&child->used_by, parent))
341 return 0;
342
343 return 1;
344}
345
2c44337b 346static void _unlink(struct dm_list *list, struct dm_tree_node *node)
3d0480ed 347{
b4f1578f 348 struct dm_tree_link *dlink;
3d0480ed 349
2c44337b 350 dm_list_iterate_items(dlink, list)
3d0480ed 351 if (dlink->node == node) {
2c44337b 352 dm_list_del(&dlink->list);
3d0480ed
AK
353 break;
354 }
3d0480ed
AK
355}
356
b4f1578f
AK
357static void _unlink_nodes(struct dm_tree_node *parent,
358 struct dm_tree_node *child)
3d0480ed
AK
359{
360 if (!_nodes_are_linked(parent, child))
361 return;
362
363 _unlink(&parent->uses, child);
364 _unlink(&child->used_by, parent);
365}
366
b4f1578f 367static int _add_to_toplevel(struct dm_tree_node *node)
165e4a11 368{
b4f1578f 369 return _link_nodes(&node->dtree->root, node);
165e4a11
AK
370}
371
b4f1578f 372static void _remove_from_toplevel(struct dm_tree_node *node)
3d0480ed 373{
b1ebf028 374 _unlink_nodes(&node->dtree->root, node);
3d0480ed
AK
375}
376
b4f1578f 377static int _add_to_bottomlevel(struct dm_tree_node *node)
3d0480ed 378{
b4f1578f 379 return _link_nodes(node, &node->dtree->root);
3d0480ed
AK
380}
381
b4f1578f 382static void _remove_from_bottomlevel(struct dm_tree_node *node)
165e4a11 383{
b1ebf028 384 _unlink_nodes(node, &node->dtree->root);
165e4a11
AK
385}
386
b4f1578f 387static int _link_tree_nodes(struct dm_tree_node *parent, struct dm_tree_node *child)
165e4a11
AK
388{
389 /* Don't link to root node if child already has a parent */
f77736ca 390 if (parent == &parent->dtree->root) {
b4f1578f 391 if (dm_tree_node_num_children(child, 1))
165e4a11
AK
392 return 1;
393 } else
394 _remove_from_toplevel(child);
395
f77736ca 396 if (child == &child->dtree->root) {
b4f1578f 397 if (dm_tree_node_num_children(parent, 0))
165e4a11
AK
398 return 1;
399 } else
400 _remove_from_bottomlevel(parent);
401
402 return _link_nodes(parent, child);
403}
404
b4f1578f 405static struct dm_tree_node *_create_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
406 const char *name,
407 const char *uuid,
165e4a11 408 struct dm_info *info,
f16aea9e
PR
409 void *context,
410 uint16_t udev_flags)
3d0480ed 411{
b4f1578f 412 struct dm_tree_node *node;
3d0480ed
AK
413 uint64_t dev;
414
b4f1578f
AK
415 if (!(node = dm_pool_zalloc(dtree->mem, sizeof(*node)))) {
416 log_error("_create_dm_tree_node alloc failed");
3d0480ed
AK
417 return NULL;
418 }
419
b4f1578f 420 node->dtree = dtree;
3d0480ed
AK
421
422 node->name = name;
423 node->uuid = uuid;
424 node->info = *info;
165e4a11 425 node->context = context;
f16aea9e 426 node->udev_flags = udev_flags;
56c28292 427 node->activation_priority = 0;
3d0480ed 428
2c44337b
AK
429 dm_list_init(&node->uses);
430 dm_list_init(&node->used_by);
431 dm_list_init(&node->props.segs);
3d0480ed
AK
432
433 dev = MKDEV(info->major, info->minor);
434
b4f1578f 435 if (!dm_hash_insert_binary(dtree->devs, (const char *) &dev,
3d0480ed 436 sizeof(dev), node)) {
b4f1578f
AK
437 log_error("dtree node hash insertion failed");
438 dm_pool_free(dtree->mem, node);
3d0480ed
AK
439 return NULL;
440 }
441
165e4a11 442 if (uuid && *uuid &&
b4f1578f
AK
443 !dm_hash_insert(dtree->uuids, uuid, node)) {
444 log_error("dtree uuid hash insertion failed");
445 dm_hash_remove_binary(dtree->devs, (const char *) &dev,
165e4a11 446 sizeof(dev));
b4f1578f 447 dm_pool_free(dtree->mem, node);
165e4a11
AK
448 return NULL;
449 }
450
3d0480ed
AK
451 return node;
452}
453
b4f1578f 454static struct dm_tree_node *_find_dm_tree_node(struct dm_tree *dtree,
3d0480ed
AK
455 uint32_t major, uint32_t minor)
456{
457 uint64_t dev = MKDEV(major, minor);
458
b4f1578f 459 return dm_hash_lookup_binary(dtree->devs, (const char *) &dev,
3d0480ed
AK
460 sizeof(dev));
461}
462
b4f1578f 463static struct dm_tree_node *_find_dm_tree_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
464 const char *uuid)
465{
87f98002
AK
466 struct dm_tree_node *node;
467
468 if ((node = dm_hash_lookup(dtree->uuids, uuid)))
469 return node;
470
471 if (strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
472 return NULL;
473
474 return dm_hash_lookup(dtree->uuids, uuid + sizeof(UUID_PREFIX) - 1);
165e4a11
AK
475}
476
a3f6b2ce 477static int _deps(struct dm_task **dmt, struct dm_pool *mem, uint32_t major, uint32_t minor,
3d0480ed
AK
478 const char **name, const char **uuid,
479 struct dm_info *info, struct dm_deps **deps)
480{
481 memset(info, 0, sizeof(*info));
482
483 if (!dm_is_dm_major(major)) {
484 *name = "";
485 *uuid = "";
486 *deps = NULL;
487 info->major = major;
488 info->minor = minor;
489 info->exists = 0;
165e4a11
AK
490 info->live_table = 0;
491 info->inactive_table = 0;
492 info->read_only = 0;
3d0480ed
AK
493 return 1;
494 }
495
496 if (!(*dmt = dm_task_create(DM_DEVICE_DEPS))) {
497 log_error("deps dm_task creation failed");
498 return 0;
499 }
500
b4f1578f
AK
501 if (!dm_task_set_major(*dmt, major)) {
502 log_error("_deps: failed to set major for (%" PRIu32 ":%" PRIu32 ")",
503 major, minor);
3d0480ed 504 goto failed;
b4f1578f 505 }
3d0480ed 506
b4f1578f
AK
507 if (!dm_task_set_minor(*dmt, minor)) {
508 log_error("_deps: failed to set minor for (%" PRIu32 ":%" PRIu32 ")",
509 major, minor);
3d0480ed 510 goto failed;
b4f1578f 511 }
3d0480ed 512
b4f1578f
AK
513 if (!dm_task_run(*dmt)) {
514 log_error("_deps: task run failed for (%" PRIu32 ":%" PRIu32 ")",
515 major, minor);
3d0480ed 516 goto failed;
b4f1578f 517 }
3d0480ed 518
b4f1578f
AK
519 if (!dm_task_get_info(*dmt, info)) {
520 log_error("_deps: failed to get info for (%" PRIu32 ":%" PRIu32 ")",
521 major, minor);
3d0480ed 522 goto failed;
b4f1578f 523 }
3d0480ed
AK
524
525 if (!info->exists) {
526 *name = "";
527 *uuid = "";
528 *deps = NULL;
529 } else {
530 if (info->major != major) {
b4f1578f 531 log_error("Inconsistent dtree major number: %u != %u",
3d0480ed
AK
532 major, info->major);
533 goto failed;
534 }
535 if (info->minor != minor) {
b4f1578f 536 log_error("Inconsistent dtree minor number: %u != %u",
3d0480ed
AK
537 minor, info->minor);
538 goto failed;
539 }
a3f6b2ce 540 if (!(*name = dm_pool_strdup(mem, dm_task_get_name(*dmt)))) {
3d0480ed
AK
541 log_error("name pool_strdup failed");
542 goto failed;
543 }
a3f6b2ce 544 if (!(*uuid = dm_pool_strdup(mem, dm_task_get_uuid(*dmt)))) {
3d0480ed
AK
545 log_error("uuid pool_strdup failed");
546 goto failed;
547 }
548 *deps = dm_task_get_deps(*dmt);
549 }
550
551 return 1;
552
553failed:
554 dm_task_destroy(*dmt);
555 return 0;
556}
557
b4f1578f
AK
558static struct dm_tree_node *_add_dev(struct dm_tree *dtree,
559 struct dm_tree_node *parent,
cda69e17
PR
560 uint32_t major, uint32_t minor,
561 uint16_t udev_flags)
3d0480ed
AK
562{
563 struct dm_task *dmt = NULL;
564 struct dm_info info;
565 struct dm_deps *deps = NULL;
566 const char *name = NULL;
567 const char *uuid = NULL;
b4f1578f 568 struct dm_tree_node *node = NULL;
3d0480ed 569 uint32_t i;
3d0480ed
AK
570 int new = 0;
571
572 /* Already in tree? */
b4f1578f
AK
573 if (!(node = _find_dm_tree_node(dtree, major, minor))) {
574 if (!_deps(&dmt, dtree->mem, major, minor, &name, &uuid, &info, &deps))
575 return_NULL;
3d0480ed 576
f16aea9e 577 if (!(node = _create_dm_tree_node(dtree, name, uuid, &info,
cda69e17 578 NULL, udev_flags)))
b4f1578f 579 goto_out;
3d0480ed
AK
580 new = 1;
581 }
582
165e4a11
AK
583 if (!_link_tree_nodes(parent, node)) {
584 node = NULL;
b4f1578f 585 goto_out;
165e4a11 586 }
3d0480ed
AK
587
588 /* If node was already in tree, no need to recurse. */
589 if (!new)
165e4a11 590 goto out;
3d0480ed
AK
591
592 /* Can't recurse if not a mapped device or there are no dependencies */
593 if (!node->info.exists || !deps->count) {
b4f1578f
AK
594 if (!_add_to_bottomlevel(node)) {
595 stack;
165e4a11 596 node = NULL;
b4f1578f 597 }
165e4a11 598 goto out;
3d0480ed
AK
599 }
600
601 /* Add dependencies to tree */
602 for (i = 0; i < deps->count; i++)
b4f1578f 603 if (!_add_dev(dtree, node, MAJOR(deps->device[i]),
cda69e17 604 MINOR(deps->device[i]), udev_flags)) {
165e4a11 605 node = NULL;
b4f1578f 606 goto_out;
165e4a11 607 }
3d0480ed 608
3d0480ed
AK
609out:
610 if (dmt)
611 dm_task_destroy(dmt);
612
165e4a11
AK
613 return node;
614}
615
b4f1578f 616static int _node_clear_table(struct dm_tree_node *dnode)
165e4a11
AK
617{
618 struct dm_task *dmt;
619 struct dm_info *info;
620 const char *name;
621 int r;
622
623 if (!(info = &dnode->info)) {
b4f1578f 624 log_error("_node_clear_table failed: missing info");
165e4a11
AK
625 return 0;
626 }
627
b4f1578f
AK
628 if (!(name = dm_tree_node_get_name(dnode))) {
629 log_error("_node_clear_table failed: missing name");
165e4a11
AK
630 return 0;
631 }
632
633 /* Is there a table? */
634 if (!info->exists || !info->inactive_table)
635 return 1;
636
10d0d9c7
AK
637// FIXME Get inactive deps. If any dev referenced has 1 opener and no live table, remove it after the clear.
638
165e4a11
AK
639 log_verbose("Clearing inactive table %s (%" PRIu32 ":%" PRIu32 ")",
640 name, info->major, info->minor);
641
642 if (!(dmt = dm_task_create(DM_DEVICE_CLEAR))) {
165e4a11
AK
643 log_error("Table clear dm_task creation failed for %s", name);
644 return 0;
645 }
646
647 if (!dm_task_set_major(dmt, info->major) ||
648 !dm_task_set_minor(dmt, info->minor)) {
649 log_error("Failed to set device number for %s table clear", name);
650 dm_task_destroy(dmt);
651 return 0;
652 }
653
654 r = dm_task_run(dmt);
655
656 if (!dm_task_get_info(dmt, info)) {
b4f1578f 657 log_error("_node_clear_table failed: info missing after running task for %s", name);
165e4a11
AK
658 r = 0;
659 }
660
661 dm_task_destroy(dmt);
662
3d0480ed
AK
663 return r;
664}
665
b4f1578f 666struct dm_tree_node *dm_tree_add_new_dev(struct dm_tree *dtree,
165e4a11
AK
667 const char *name,
668 const char *uuid,
669 uint32_t major, uint32_t minor,
670 int read_only,
671 int clear_inactive,
672 void *context)
673{
b4f1578f 674 struct dm_tree_node *dnode;
165e4a11
AK
675 struct dm_info info;
676 const char *name2;
677 const char *uuid2;
678
679 /* Do we need to add node to tree? */
b4f1578f
AK
680 if (!(dnode = dm_tree_find_node_by_uuid(dtree, uuid))) {
681 if (!(name2 = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
682 log_error("name pool_strdup failed");
683 return NULL;
684 }
b4f1578f 685 if (!(uuid2 = dm_pool_strdup(dtree->mem, uuid))) {
165e4a11
AK
686 log_error("uuid pool_strdup failed");
687 return NULL;
688 }
689
690 info.major = 0;
691 info.minor = 0;
692 info.exists = 0;
693 info.live_table = 0;
694 info.inactive_table = 0;
695 info.read_only = 0;
696
f16aea9e
PR
697 if (!(dnode = _create_dm_tree_node(dtree, name2, uuid2, &info,
698 context, 0)))
b4f1578f 699 return_NULL;
165e4a11
AK
700
701 /* Attach to root node until a table is supplied */
b4f1578f
AK
702 if (!_add_to_toplevel(dnode) || !_add_to_bottomlevel(dnode))
703 return_NULL;
165e4a11
AK
704
705 dnode->props.major = major;
706 dnode->props.minor = minor;
707 dnode->props.new_name = NULL;
bb875bb9 708 dnode->props.size_changed = 0;
165e4a11
AK
709 } else if (strcmp(name, dnode->name)) {
710 /* Do we need to rename node? */
b4f1578f 711 if (!(dnode->props.new_name = dm_pool_strdup(dtree->mem, name))) {
165e4a11
AK
712 log_error("name pool_strdup failed");
713 return 0;
714 }
715 }
716
717 dnode->props.read_only = read_only ? 1 : 0;
52b84409
AK
718 dnode->props.read_ahead = DM_READ_AHEAD_AUTO;
719 dnode->props.read_ahead_flags = 0;
165e4a11 720
b4f1578f
AK
721 if (clear_inactive && !_node_clear_table(dnode))
722 return_NULL;
165e4a11
AK
723
724 dnode->context = context;
f16aea9e 725 dnode->udev_flags = 0;
165e4a11
AK
726
727 return dnode;
728}
729
f16aea9e
PR
730struct dm_tree_node *dm_tree_add_new_dev_with_udev_flags(struct dm_tree *dtree,
731 const char *name,
732 const char *uuid,
733 uint32_t major,
734 uint32_t minor,
735 int read_only,
736 int clear_inactive,
737 void *context,
738 uint16_t udev_flags)
739{
740 struct dm_tree_node *node;
741
742 if ((node = dm_tree_add_new_dev(dtree, name, uuid, major, minor, read_only,
743 clear_inactive, context)))
744 node->udev_flags = udev_flags;
745
746 return node;
747}
748
83c606ae
JEB
749void dm_tree_node_set_udev_flags(struct dm_tree_node *dnode, uint16_t udev_flags)
750
751{
752 struct dm_info *dinfo = &dnode->info;
753
754 if (udev_flags != dnode->udev_flags)
755 log_debug("Resetting %s (%" PRIu32 ":%" PRIu32
756 ") udev_flags from 0x%x to 0x%x",
757 dnode->name, dinfo->major, dinfo->minor,
758 dnode->udev_flags, udev_flags);
759 dnode->udev_flags = udev_flags;
760}
f16aea9e 761
52b84409
AK
762void dm_tree_node_set_read_ahead(struct dm_tree_node *dnode,
763 uint32_t read_ahead,
764 uint32_t read_ahead_flags)
08e64ce5 765{
52b84409
AK
766 dnode->props.read_ahead = read_ahead;
767 dnode->props.read_ahead_flags = read_ahead_flags;
768}
769
76d1aec8
ZK
770void dm_tree_node_set_presuspend_node(struct dm_tree_node *node,
771 struct dm_tree_node *presuspend_node)
772{
773 node->presuspend_node = presuspend_node;
774}
775
b4f1578f 776int dm_tree_add_dev(struct dm_tree *dtree, uint32_t major, uint32_t minor)
3d0480ed 777{
cda69e17
PR
778 return _add_dev(dtree, &dtree->root, major, minor, 0) ? 1 : 0;
779}
780
781int dm_tree_add_dev_with_udev_flags(struct dm_tree *dtree, uint32_t major,
782 uint32_t minor, uint16_t udev_flags)
783{
784 return _add_dev(dtree, &dtree->root, major, minor, udev_flags) ? 1 : 0;
3d0480ed
AK
785}
786
04bde319 787const char *dm_tree_node_get_name(const struct dm_tree_node *node)
3d0480ed
AK
788{
789 return node->info.exists ? node->name : "";
790}
791
04bde319 792const char *dm_tree_node_get_uuid(const struct dm_tree_node *node)
3d0480ed
AK
793{
794 return node->info.exists ? node->uuid : "";
795}
796
04bde319 797const struct dm_info *dm_tree_node_get_info(const struct dm_tree_node *node)
3d0480ed
AK
798{
799 return &node->info;
800}
801
04bde319 802void *dm_tree_node_get_context(const struct dm_tree_node *node)
165e4a11
AK
803{
804 return node->context;
805}
806
04bde319 807int dm_tree_node_size_changed(const struct dm_tree_node *dnode)
eb91c4ee
MB
808{
809 return dnode->props.size_changed;
810}
811
04bde319 812int dm_tree_node_num_children(const struct dm_tree_node *node, uint32_t inverted)
3d0480ed
AK
813{
814 if (inverted) {
b4f1578f 815 if (_nodes_are_linked(&node->dtree->root, node))
3d0480ed 816 return 0;
2c44337b 817 return dm_list_size(&node->used_by);
3d0480ed
AK
818 }
819
b4f1578f 820 if (_nodes_are_linked(node, &node->dtree->root))
3d0480ed
AK
821 return 0;
822
2c44337b 823 return dm_list_size(&node->uses);
3d0480ed
AK
824}
825
2b69db1f
AK
826/*
827 * Returns 1 if no prefix supplied
828 */
829static int _uuid_prefix_matches(const char *uuid, const char *uuid_prefix, size_t uuid_prefix_len)
830{
831 if (!uuid_prefix)
832 return 1;
833
834 if (!strncmp(uuid, uuid_prefix, uuid_prefix_len))
835 return 1;
836
837 /* Handle transition: active device uuids might be missing the prefix */
838 if (uuid_prefix_len <= 4)
839 return 0;
840
87f98002 841 if (!strncmp(uuid, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
872dea04
AK
842 return 0;
843
87f98002 844 if (strncmp(uuid_prefix, UUID_PREFIX, sizeof(UUID_PREFIX) - 1))
2b69db1f
AK
845 return 0;
846
87f98002 847 if (!strncmp(uuid, uuid_prefix + sizeof(UUID_PREFIX) - 1, uuid_prefix_len - (sizeof(UUID_PREFIX) - 1)))
2b69db1f
AK
848 return 1;
849
850 return 0;
851}
852
690a5da2
AK
853/*
854 * Returns 1 if no children.
855 */
b4f1578f 856static int _children_suspended(struct dm_tree_node *node,
690a5da2
AK
857 uint32_t inverted,
858 const char *uuid_prefix,
859 size_t uuid_prefix_len)
860{
2c44337b 861 struct dm_list *list;
b4f1578f 862 struct dm_tree_link *dlink;
690a5da2
AK
863 const struct dm_info *dinfo;
864 const char *uuid;
865
866 if (inverted) {
b4f1578f 867 if (_nodes_are_linked(&node->dtree->root, node))
690a5da2
AK
868 return 1;
869 list = &node->used_by;
870 } else {
b4f1578f 871 if (_nodes_are_linked(node, &node->dtree->root))
690a5da2
AK
872 return 1;
873 list = &node->uses;
874 }
875
2c44337b 876 dm_list_iterate_items(dlink, list) {
b4f1578f 877 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
690a5da2
AK
878 stack;
879 continue;
880 }
881
882 /* Ignore if it doesn't belong to this VG */
2b69db1f 883 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2
AK
884 continue;
885
76d1aec8
ZK
886 /* Ignore if parent node wants to presuspend this node */
887 if (dlink->node->presuspend_node == node)
888 continue;
889
b4f1578f
AK
890 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
891 stack; /* FIXME Is this normal? */
690a5da2
AK
892 return 0;
893 }
894
895 if (!dinfo->suspended)
896 return 0;
897 }
898
899 return 1;
900}
901
3d0480ed
AK
902/*
903 * Set major and minor to zero for root of tree.
904 */
b4f1578f 905struct dm_tree_node *dm_tree_find_node(struct dm_tree *dtree,
3d0480ed
AK
906 uint32_t major,
907 uint32_t minor)
908{
909 if (!major && !minor)
b4f1578f 910 return &dtree->root;
3d0480ed 911
b4f1578f 912 return _find_dm_tree_node(dtree, major, minor);
3d0480ed
AK
913}
914
165e4a11
AK
915/*
916 * Set uuid to NULL for root of tree.
917 */
b4f1578f 918struct dm_tree_node *dm_tree_find_node_by_uuid(struct dm_tree *dtree,
165e4a11
AK
919 const char *uuid)
920{
921 if (!uuid || !*uuid)
b4f1578f 922 return &dtree->root;
165e4a11 923
b4f1578f 924 return _find_dm_tree_node_by_uuid(dtree, uuid);
165e4a11
AK
925}
926
3d0480ed
AK
927/*
928 * First time set *handle to NULL.
929 * Set inverted to invert the tree.
930 */
b4f1578f 931struct dm_tree_node *dm_tree_next_child(void **handle,
04bde319
ZK
932 const struct dm_tree_node *parent,
933 uint32_t inverted)
3d0480ed 934{
2c44337b 935 struct dm_list **dlink = (struct dm_list **) handle;
04bde319 936 const struct dm_list *use_list;
3d0480ed
AK
937
938 if (inverted)
939 use_list = &parent->used_by;
940 else
941 use_list = &parent->uses;
942
943 if (!*dlink)
2c44337b 944 *dlink = dm_list_first(use_list);
3d0480ed 945 else
2c44337b 946 *dlink = dm_list_next(use_list, *dlink);
3d0480ed 947
2c44337b 948 return (*dlink) ? dm_list_item(*dlink, struct dm_tree_link)->node : NULL;
3d0480ed
AK
949}
950
3e8c6b73 951/*
a6d97ede 952 * Deactivate a device with its dependencies if the uuid prefix matches.
3e8c6b73 953 */
db208f51
AK
954static int _info_by_dev(uint32_t major, uint32_t minor, int with_open_count,
955 struct dm_info *info)
3e8c6b73
AK
956{
957 struct dm_task *dmt;
958 int r;
959
960 if (!(dmt = dm_task_create(DM_DEVICE_INFO))) {
961 log_error("_info_by_dev: dm_task creation failed");
962 return 0;
963 }
964
965 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
966 log_error("_info_by_dev: Failed to set device number");
967 dm_task_destroy(dmt);
968 return 0;
969 }
970
db208f51
AK
971 if (!with_open_count && !dm_task_no_open_count(dmt))
972 log_error("Failed to disable open_count");
973
3e8c6b73
AK
974 if ((r = dm_task_run(dmt)))
975 r = dm_task_get_info(dmt, info);
976
977 dm_task_destroy(dmt);
978
979 return r;
980}
981
4ce43894 982static int _check_device_not_in_use(const char *name, struct dm_info *info)
125712be
PR
983{
984 if (!info->exists)
985 return 1;
986
987 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
988 if (!*dm_sysfs_dir()) {
989 if (info->open_count) {
4ce43894
ZK
990 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") in use",
991 name, info->major, info->minor);
c3e5b497
PR
992 return 0;
993 }
994
995 return 1;
996 }
125712be
PR
997
998 if (dm_device_has_holders(info->major, info->minor)) {
4ce43894
ZK
999 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") is used "
1000 "by another device.", name, info->major, info->minor);
125712be
PR
1001 return 0;
1002 }
1003
1004 if (dm_device_has_mounted_fs(info->major, info->minor)) {
4ce43894
ZK
1005 log_error("Device %s (%" PRIu32 ":%" PRIu32 ") contains "
1006 "a filesystem in use.", name, info->major, info->minor);
125712be
PR
1007 return 0;
1008 }
1009
1010 return 1;
1011}
1012
f3ef15ef
ZK
1013/* Check if all parent nodes of given node have open_count == 0 */
1014static int _node_has_closed_parents(struct dm_tree_node *node,
1015 const char *uuid_prefix,
1016 size_t uuid_prefix_len)
1017{
1018 struct dm_tree_link *dlink;
1019 const struct dm_info *dinfo;
1020 struct dm_info info;
1021 const char *uuid;
1022
1023 /* Iterate through parents of this node */
1024 dm_list_iterate_items(dlink, &node->used_by) {
1025 if (!(uuid = dm_tree_node_get_uuid(dlink->node))) {
1026 stack;
1027 continue;
1028 }
1029
1030 /* Ignore if it doesn't belong to this VG */
1031 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1032 continue;
1033
1034 if (!(dinfo = dm_tree_node_get_info(dlink->node))) {
1035 stack; /* FIXME Is this normal? */
1036 return 0;
1037 }
1038
1039 /* Refresh open_count */
1040 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
1041 !info.exists)
1042 continue;
1043
eb418883
ZK
1044 if (info.open_count) {
1045 log_debug("Node %s %d:%d has open_count %d", uuid_prefix,
1046 dinfo->major, dinfo->minor, info.open_count);
f3ef15ef 1047 return 0;
eb418883 1048 }
f3ef15ef
ZK
1049 }
1050
1051 return 1;
1052}
1053
f16aea9e 1054static int _deactivate_node(const char *name, uint32_t major, uint32_t minor,
787200ef 1055 uint32_t *cookie, uint16_t udev_flags, int retry)
3e8c6b73
AK
1056{
1057 struct dm_task *dmt;
bd90c6b2 1058 int r = 0;
3e8c6b73
AK
1059
1060 log_verbose("Removing %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1061
1062 if (!(dmt = dm_task_create(DM_DEVICE_REMOVE))) {
1063 log_error("Deactivation dm_task creation failed for %s", name);
1064 return 0;
1065 }
1066
1067 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1068 log_error("Failed to set device number for %s deactivation", name);
bd90c6b2 1069 goto out;
3e8c6b73
AK
1070 }
1071
1072 if (!dm_task_no_open_count(dmt))
1073 log_error("Failed to disable open_count");
1074
f16aea9e 1075 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1076 goto out;
1077
787200ef
PR
1078
1079 if (retry)
1080 dm_task_retry_remove(dmt);
1081
3e8c6b73
AK
1082 r = dm_task_run(dmt);
1083
0437bccc
AK
1084 /* FIXME Until kernel returns actual name so dm-iface.c can handle it */
1085 rm_dev_node(name, dmt->cookie_set && !(udev_flags & DM_UDEV_DISABLE_DM_RULES_FLAG),
9032898e 1086 dmt->cookie_set && (udev_flags & DM_UDEV_DISABLE_LIBRARY_FALLBACK));
165e4a11 1087
db208f51
AK
1088 /* FIXME Remove node from tree or mark invalid? */
1089
bd90c6b2 1090out:
db208f51
AK
1091 dm_task_destroy(dmt);
1092
1093 return r;
1094}
1095
bd90c6b2 1096static int _rename_node(const char *old_name, const char *new_name, uint32_t major,
f16aea9e 1097 uint32_t minor, uint32_t *cookie, uint16_t udev_flags)
165e4a11
AK
1098{
1099 struct dm_task *dmt;
1100 int r = 0;
1101
1102 log_verbose("Renaming %s (%" PRIu32 ":%" PRIu32 ") to %s", old_name, major, minor, new_name);
1103
1104 if (!(dmt = dm_task_create(DM_DEVICE_RENAME))) {
1105 log_error("Rename dm_task creation failed for %s", old_name);
1106 return 0;
1107 }
1108
1109 if (!dm_task_set_name(dmt, old_name)) {
1110 log_error("Failed to set name for %s rename.", old_name);
1111 goto out;
1112 }
1113
b4f1578f 1114 if (!dm_task_set_newname(dmt, new_name))
40e5fd8b 1115 goto_out;
165e4a11
AK
1116
1117 if (!dm_task_no_open_count(dmt))
1118 log_error("Failed to disable open_count");
1119
f16aea9e 1120 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
bd90c6b2
AK
1121 goto out;
1122
165e4a11
AK
1123 r = dm_task_run(dmt);
1124
1125out:
1126 dm_task_destroy(dmt);
1127
1128 return r;
1129}
1130
165e4a11
AK
1131/* FIXME Merge with _suspend_node? */
1132static int _resume_node(const char *name, uint32_t major, uint32_t minor,
52b84409 1133 uint32_t read_ahead, uint32_t read_ahead_flags,
f16aea9e 1134 struct dm_info *newinfo, uint32_t *cookie,
1840aa09 1135 uint16_t udev_flags, int already_suspended)
165e4a11
AK
1136{
1137 struct dm_task *dmt;
bd90c6b2 1138 int r = 0;
165e4a11
AK
1139
1140 log_verbose("Resuming %s (%" PRIu32 ":%" PRIu32 ")", name, major, minor);
1141
1142 if (!(dmt = dm_task_create(DM_DEVICE_RESUME))) {
9a8f192a 1143 log_debug("Suspend dm_task creation failed for %s.", name);
165e4a11
AK
1144 return 0;
1145 }
1146
0b7d16bc
AK
1147 /* FIXME Kernel should fill in name on return instead */
1148 if (!dm_task_set_name(dmt, name)) {
9a8f192a 1149 log_debug("Failed to set device name for %s resumption.", name);
bd90c6b2 1150 goto out;
0b7d16bc
AK
1151 }
1152
165e4a11
AK
1153 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1154 log_error("Failed to set device number for %s resumption.", name);
bd90c6b2 1155 goto out;
165e4a11
AK
1156 }
1157
1158 if (!dm_task_no_open_count(dmt))
1159 log_error("Failed to disable open_count");
1160
52b84409
AK
1161 if (!dm_task_set_read_ahead(dmt, read_ahead, read_ahead_flags))
1162 log_error("Failed to set read ahead");
1163
f16aea9e 1164 if (!dm_task_set_cookie(dmt, cookie, udev_flags))
9a8f192a 1165 goto_out;
bd90c6b2 1166
9a8f192a
ZK
1167 if (!(r = dm_task_run(dmt)))
1168 goto_out;
1169
1170 if (already_suspended)
1171 dec_suspended();
1172
1173 if (!(r = dm_task_get_info(dmt, newinfo)))
1174 stack;
165e4a11 1175
bd90c6b2 1176out:
165e4a11
AK
1177 dm_task_destroy(dmt);
1178
1179 return r;
1180}
1181
db208f51 1182static int _suspend_node(const char *name, uint32_t major, uint32_t minor,
b9ffd32c 1183 int skip_lockfs, int no_flush, struct dm_info *newinfo)
db208f51
AK
1184{
1185 struct dm_task *dmt;
1186 int r;
1187
b9ffd32c
AK
1188 log_verbose("Suspending %s (%" PRIu32 ":%" PRIu32 ")%s%s",
1189 name, major, minor,
1190 skip_lockfs ? "" : " with filesystem sync",
6e1898a5 1191 no_flush ? "" : " with device flush");
db208f51
AK
1192
1193 if (!(dmt = dm_task_create(DM_DEVICE_SUSPEND))) {
1194 log_error("Suspend dm_task creation failed for %s", name);
1195 return 0;
1196 }
1197
1198 if (!dm_task_set_major(dmt, major) || !dm_task_set_minor(dmt, minor)) {
1199 log_error("Failed to set device number for %s suspension.", name);
1200 dm_task_destroy(dmt);
1201 return 0;
1202 }
1203
1204 if (!dm_task_no_open_count(dmt))
1205 log_error("Failed to disable open_count");
1206
c55b1410
AK
1207 if (skip_lockfs && !dm_task_skip_lockfs(dmt))
1208 log_error("Failed to set skip_lockfs flag.");
1209
b9ffd32c
AK
1210 if (no_flush && !dm_task_no_flush(dmt))
1211 log_error("Failed to set no_flush flag.");
1212
1840aa09
AK
1213 if ((r = dm_task_run(dmt))) {
1214 inc_suspended();
db208f51 1215 r = dm_task_get_info(dmt, newinfo);
1840aa09 1216 }
db208f51 1217
3e8c6b73
AK
1218 dm_task_destroy(dmt);
1219
1220 return r;
1221}
1222
25e6ab87 1223static int _thin_pool_status_transaction_id(struct dm_tree_node *dnode, uint64_t *transaction_id)
e0ea24be
ZK
1224{
1225 struct dm_task *dmt;
1226 int r = 0;
1227 uint64_t start, length;
1228 char *type = NULL;
1229 char *params = NULL;
e0ea24be 1230
25e6ab87
ZK
1231 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
1232 return_0;
e0ea24be 1233
25e6ab87
ZK
1234 if (!dm_task_set_major(dmt, dnode->info.major) ||
1235 !dm_task_set_minor(dmt, dnode->info.minor)) {
1236 log_error("Failed to set major minor.");
1237 goto out;
e0ea24be
ZK
1238 }
1239
25e6ab87
ZK
1240 if (!dm_task_run(dmt))
1241 goto_out;
1242
1243 dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
1244
1245 if (type && (strcmp(type, "thin-pool") != 0)) {
c590a9cd 1246 log_error("Expected thin-pool target for %d:%d and got %s.",
25e6ab87 1247 dnode->info.major, dnode->info.minor, type);
e0ea24be
ZK
1248 goto out;
1249 }
1250
25e6ab87 1251 if (!params || (sscanf(params, "%" PRIu64, transaction_id) != 1)) {
c590a9cd 1252 log_error("Failed to parse transaction_id from %s.", params);
e0ea24be
ZK
1253 goto out;
1254 }
1255
25e6ab87 1256 log_debug("Thin pool transaction id: %" PRIu64 " status: %s.", *transaction_id, params);
e0ea24be 1257
25e6ab87
ZK
1258 r = 1;
1259out:
1260 dm_task_destroy(dmt);
e0ea24be 1261
25e6ab87
ZK
1262 return r;
1263}
e0ea24be 1264
25e6ab87
ZK
1265static int _thin_pool_node_message(struct dm_tree_node *dnode, struct thin_message *tm)
1266{
1267 struct dm_task *dmt;
1268 struct dm_thin_message *m = &tm->message;
1269 char buf[64];
1270 int r;
e0ea24be 1271
25e6ab87
ZK
1272 switch (m->type) {
1273 case DM_THIN_MESSAGE_CREATE_SNAP:
1274 r = dm_snprintf(buf, sizeof(buf), "create_snap %u %u",
1275 m->u.m_create_snap.device_id,
1276 m->u.m_create_snap.origin_id);
1277 break;
1278 case DM_THIN_MESSAGE_CREATE_THIN:
1279 r = dm_snprintf(buf, sizeof(buf), "create_thin %u",
1280 m->u.m_create_thin.device_id);
1281 break;
1282 case DM_THIN_MESSAGE_DELETE:
1283 r = dm_snprintf(buf, sizeof(buf), "delete %u",
1284 m->u.m_delete.device_id);
1285 break;
1286 case DM_THIN_MESSAGE_TRIM:
1287 r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
1288 m->u.m_trim.device_id,
1289 m->u.m_trim.new_size);
1290 break;
1291 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
1292 r = dm_snprintf(buf, sizeof(buf),
1293 "set_transaction_id %" PRIu64 " %" PRIu64,
1294 m->u.m_set_transaction_id.current_id,
1295 m->u.m_set_transaction_id.new_id);
1296 break;
1297 }
1298
1299 if (!r) {
1300 log_error("Failed to prepare message.");
1301 return 0;
1302 }
1303
1304 r = 0;
1305
1306 if (!(dmt = dm_task_create(DM_DEVICE_TARGET_MSG)))
1307 return_0;
1308
1309 if (!dm_task_set_major(dmt, dnode->info.major) ||
1310 !dm_task_set_minor(dmt, dnode->info.minor)) {
1311 log_error("Failed to set message major minor.");
1312 goto out;
1313 }
1314
1315 if (!dm_task_set_message(dmt, buf))
1316 goto_out;
1317
660a42bc
ZK
1318 /* Internal functionality of dm_task */
1319 dmt->expected_errno = tm->expected_errno;
1320
25e6ab87
ZK
1321 if (!dm_task_run(dmt))
1322 goto_out;
1323
1324 r = 1;
e0ea24be
ZK
1325out:
1326 dm_task_destroy(dmt);
1327
1328 return r;
1329}
1330
11f64f0a
ZK
1331static int _node_send_messages(struct dm_tree_node *dnode,
1332 const char *uuid_prefix,
1333 size_t uuid_prefix_len)
25e6ab87
ZK
1334{
1335 struct load_segment *seg;
1336 struct thin_message *tmsg;
11f64f0a 1337 uint64_t trans_id;
25e6ab87
ZK
1338 const char *uuid;
1339
bbcd37e4 1340 if (!dnode->info.exists || (dm_list_size(&dnode->props.segs) != 1))
25e6ab87
ZK
1341 return 1;
1342
1343 seg = dm_list_item(dm_list_last(&dnode->props.segs), struct load_segment);
25e6ab87
ZK
1344 if (seg->type != SEG_THIN_POOL)
1345 return 1;
1346
1347 if (!(uuid = dm_tree_node_get_uuid(dnode)))
1348 return_0;
1349
1350 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len)) {
1351 log_debug("UUID \"%s\" does not match.", uuid);
1352 return 1;
1353 }
1354
11f64f0a 1355 if (!_thin_pool_status_transaction_id(dnode, &trans_id))
bbcd37e4 1356 goto_bad;
25e6ab87 1357
bbcd37e4 1358 if (trans_id == seg->transaction_id)
25e6ab87
ZK
1359 return 1; /* In sync - skip messages */
1360
bbcd37e4 1361 if (trans_id != (seg->transaction_id - 1)) {
25e6ab87 1362 log_error("Thin pool transaction_id=%" PRIu64 ", while expected: %" PRIu64 ".",
bbcd37e4
ZK
1363 trans_id, seg->transaction_id - 1);
1364 goto bad; /* Nothing to send */
25e6ab87
ZK
1365 }
1366
1367 dm_list_iterate_items(tmsg, &seg->thin_messages)
1368 if (!(_thin_pool_node_message(dnode, tmsg)))
bbcd37e4 1369 goto_bad;
25e6ab87
ZK
1370
1371 return 1;
bbcd37e4
ZK
1372bad:
1373 /* Try to deactivate */
1374 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
1375 log_error("Failed to deactivate %s", dnode->name);
1376
1377 return 0;
25e6ab87
ZK
1378}
1379
18e0f934
AK
1380/*
1381 * FIXME Don't attempt to deactivate known internal dependencies.
1382 */
1383static int _dm_tree_deactivate_children(struct dm_tree_node *dnode,
1384 const char *uuid_prefix,
1385 size_t uuid_prefix_len,
1386 unsigned level)
3e8c6b73 1387{
b7eb2ad0 1388 int r = 1;
3e8c6b73 1389 void *handle = NULL;
b4f1578f 1390 struct dm_tree_node *child = dnode;
3e8c6b73
AK
1391 struct dm_info info;
1392 const struct dm_info *dinfo;
1393 const char *name;
1394 const char *uuid;
1395
b4f1578f
AK
1396 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1397 if (!(dinfo = dm_tree_node_get_info(child))) {
3e8c6b73
AK
1398 stack;
1399 continue;
1400 }
1401
b4f1578f 1402 if (!(name = dm_tree_node_get_name(child))) {
3e8c6b73
AK
1403 stack;
1404 continue;
1405 }
1406
b4f1578f 1407 if (!(uuid = dm_tree_node_get_uuid(child))) {
3e8c6b73
AK
1408 stack;
1409 continue;
1410 }
1411
1412 /* Ignore if it doesn't belong to this VG */
2b69db1f 1413 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
3e8c6b73 1414 continue;
3e8c6b73
AK
1415
1416 /* Refresh open_count */
db208f51 1417 if (!_info_by_dev(dinfo->major, dinfo->minor, 1, &info) ||
f55021f4 1418 !info.exists)
3e8c6b73
AK
1419 continue;
1420
4ce43894
ZK
1421 if (info.open_count) {
1422 /* Skip internal non-toplevel opened nodes */
1423 if (level)
1424 continue;
1425
1426 /* When retry is not allowed, error */
1427 if (!child->dtree->retry_remove) {
1428 log_error("Unable to deactivate open %s (%" PRIu32
1429 ":%" PRIu32 ")", name, info.major, info.minor);
1430 r = 0;
1431 continue;
1432 }
1433
1434 /* Check toplevel node for holders/mounted fs */
1435 if (!_check_device_not_in_use(name, &info)) {
1436 stack;
1437 r = 0;
1438 continue;
1439 }
1440 /* Go on with retry */
1441 }
125712be 1442
f3ef15ef 1443 /* Also checking open_count in parent nodes of presuspend_node */
125712be 1444 if ((child->presuspend_node &&
f3ef15ef
ZK
1445 !_node_has_closed_parents(child->presuspend_node,
1446 uuid_prefix, uuid_prefix_len))) {
18e0f934
AK
1447 /* Only report error from (likely non-internal) dependency at top level */
1448 if (!level) {
1449 log_error("Unable to deactivate open %s (%" PRIu32
1450 ":%" PRIu32 ")", name, info.major,
1451 info.minor);
1452 r = 0;
1453 }
f55021f4
AK
1454 continue;
1455 }
1456
76d1aec8
ZK
1457 /* Suspend child node first if requested */
1458 if (child->presuspend_node &&
1459 !dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1460 continue;
1461
f16aea9e 1462 if (!_deactivate_node(name, info.major, info.minor,
787200ef 1463 &child->dtree->cookie, child->udev_flags,
4ce43894 1464 (level == 0) ? child->dtree->retry_remove : 0)) {
3e8c6b73
AK
1465 log_error("Unable to deactivate %s (%" PRIu32
1466 ":%" PRIu32 ")", name, info.major,
1467 info.minor);
b7eb2ad0 1468 r = 0;
3e8c6b73 1469 continue;
f4249251
AK
1470 } else if (info.suspended)
1471 dec_suspended();
3e8c6b73 1472
18e0f934
AK
1473 if (dm_tree_node_num_children(child, 0)) {
1474 if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
b7eb2ad0 1475 return_0;
18e0f934 1476 }
3e8c6b73
AK
1477 }
1478
b7eb2ad0 1479 return r;
3e8c6b73 1480}
db208f51 1481
18e0f934
AK
1482int dm_tree_deactivate_children(struct dm_tree_node *dnode,
1483 const char *uuid_prefix,
1484 size_t uuid_prefix_len)
1485{
1486 return _dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len, 0);
1487}
1488
c55b1410
AK
1489void dm_tree_skip_lockfs(struct dm_tree_node *dnode)
1490{
1491 dnode->dtree->skip_lockfs = 1;
1492}
1493
b9ffd32c
AK
1494void dm_tree_use_no_flush_suspend(struct dm_tree_node *dnode)
1495{
1496 dnode->dtree->no_flush = 1;
1497}
1498
787200ef
PR
1499void dm_tree_retry_remove(struct dm_tree_node *dnode)
1500{
1501 dnode->dtree->retry_remove = 1;
1502}
1503
b4f1578f 1504int dm_tree_suspend_children(struct dm_tree_node *dnode,
08e64ce5
ZK
1505 const char *uuid_prefix,
1506 size_t uuid_prefix_len)
db208f51 1507{
68085c93 1508 int r = 1;
db208f51 1509 void *handle = NULL;
b4f1578f 1510 struct dm_tree_node *child = dnode;
db208f51
AK
1511 struct dm_info info, newinfo;
1512 const struct dm_info *dinfo;
1513 const char *name;
1514 const char *uuid;
1515
690a5da2 1516 /* Suspend nodes at this level of the tree */
b4f1578f
AK
1517 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1518 if (!(dinfo = dm_tree_node_get_info(child))) {
db208f51
AK
1519 stack;
1520 continue;
1521 }
1522
b4f1578f 1523 if (!(name = dm_tree_node_get_name(child))) {
db208f51
AK
1524 stack;
1525 continue;
1526 }
1527
b4f1578f 1528 if (!(uuid = dm_tree_node_get_uuid(child))) {
db208f51
AK
1529 stack;
1530 continue;
1531 }
1532
1533 /* Ignore if it doesn't belong to this VG */
2b69db1f 1534 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
db208f51
AK
1535 continue;
1536
690a5da2
AK
1537 /* Ensure immediate parents are already suspended */
1538 if (!_children_suspended(child, 1, uuid_prefix, uuid_prefix_len))
1539 continue;
1540
db208f51 1541 if (!_info_by_dev(dinfo->major, dinfo->minor, 0, &info) ||
b700541f 1542 !info.exists || info.suspended)
db208f51
AK
1543 continue;
1544
c55b1410 1545 if (!_suspend_node(name, info.major, info.minor,
b9ffd32c
AK
1546 child->dtree->skip_lockfs,
1547 child->dtree->no_flush, &newinfo)) {
db208f51
AK
1548 log_error("Unable to suspend %s (%" PRIu32
1549 ":%" PRIu32 ")", name, info.major,
1550 info.minor);
68085c93 1551 r = 0;
db208f51
AK
1552 continue;
1553 }
1554
1555 /* Update cached info */
1556 child->info = newinfo;
690a5da2
AK
1557 }
1558
1559 /* Then suspend any child nodes */
1560 handle = NULL;
1561
b4f1578f
AK
1562 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1563 if (!(uuid = dm_tree_node_get_uuid(child))) {
690a5da2
AK
1564 stack;
1565 continue;
1566 }
1567
1568 /* Ignore if it doesn't belong to this VG */
87f98002 1569 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
690a5da2 1570 continue;
db208f51 1571
b4f1578f 1572 if (dm_tree_node_num_children(child, 0))
68085c93
MS
1573 if (!dm_tree_suspend_children(child, uuid_prefix, uuid_prefix_len))
1574 return_0;
db208f51
AK
1575 }
1576
68085c93 1577 return r;
db208f51
AK
1578}
1579
b4f1578f 1580int dm_tree_activate_children(struct dm_tree_node *dnode,
db208f51
AK
1581 const char *uuid_prefix,
1582 size_t uuid_prefix_len)
1583{
2ca6b865 1584 int r = 1;
db208f51 1585 void *handle = NULL;
b4f1578f 1586 struct dm_tree_node *child = dnode;
165e4a11
AK
1587 struct dm_info newinfo;
1588 const char *name;
db208f51 1589 const char *uuid;
56c28292 1590 int priority;
db208f51 1591
165e4a11 1592 /* Activate children first */
b4f1578f
AK
1593 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
1594 if (!(uuid = dm_tree_node_get_uuid(child))) {
165e4a11
AK
1595 stack;
1596 continue;
db208f51
AK
1597 }
1598
908db078
AK
1599 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1600 continue;
db208f51 1601
b4f1578f 1602 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
1603 if (!dm_tree_activate_children(child, uuid_prefix, uuid_prefix_len))
1604 return_0;
56c28292 1605 }
165e4a11 1606
56c28292 1607 handle = NULL;
165e4a11 1608
aa6f4e51 1609 for (priority = 0; priority < 3; priority++) {
56c28292 1610 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
a5a31ce9
ZK
1611 if (priority != child->activation_priority)
1612 continue;
1613
56c28292
AK
1614 if (!(uuid = dm_tree_node_get_uuid(child))) {
1615 stack;
1616 continue;
165e4a11 1617 }
165e4a11 1618
56c28292
AK
1619 if (!_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
1620 continue;
165e4a11 1621
56c28292
AK
1622 if (!(name = dm_tree_node_get_name(child))) {
1623 stack;
1624 continue;
1625 }
1626
1627 /* Rename? */
1628 if (child->props.new_name) {
bd90c6b2 1629 if (!_rename_node(name, child->props.new_name, child->info.major,
f16aea9e
PR
1630 child->info.minor, &child->dtree->cookie,
1631 child->udev_flags)) {
56c28292
AK
1632 log_error("Failed to rename %s (%" PRIu32
1633 ":%" PRIu32 ") to %s", name, child->info.major,
1634 child->info.minor, child->props.new_name);
1635 return 0;
1636 }
1637 child->name = child->props.new_name;
1638 child->props.new_name = NULL;
1639 }
1640
1641 if (!child->info.inactive_table && !child->info.suspended)
1642 continue;
1643
bafa2f39 1644 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 1645 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09 1646 &newinfo, &child->dtree->cookie, child->udev_flags, child->info.suspended)) {
56c28292 1647 log_error("Unable to resume %s (%" PRIu32
bafa2f39 1648 ":%" PRIu32 ")", child->name, child->info.major,
56c28292 1649 child->info.minor);
2ca6b865 1650 r = 0;
56c28292
AK
1651 continue;
1652 }
1653
1654 /* Update cached info */
1655 child->info = newinfo;
1656 }
db208f51
AK
1657 }
1658
165e4a11
AK
1659 handle = NULL;
1660
2ca6b865 1661 return r;
165e4a11
AK
1662}
1663
b4f1578f 1664static int _create_node(struct dm_tree_node *dnode)
165e4a11
AK
1665{
1666 int r = 0;
1667 struct dm_task *dmt;
1668
1669 log_verbose("Creating %s", dnode->name);
1670
1671 if (!(dmt = dm_task_create(DM_DEVICE_CREATE))) {
1672 log_error("Create dm_task creation failed for %s", dnode->name);
1673 return 0;
1674 }
1675
1676 if (!dm_task_set_name(dmt, dnode->name)) {
1677 log_error("Failed to set device name for %s", dnode->name);
1678 goto out;
1679 }
1680
1681 if (!dm_task_set_uuid(dmt, dnode->uuid)) {
1682 log_error("Failed to set uuid for %s", dnode->name);
1683 goto out;
1684 }
1685
1686 if (dnode->props.major &&
1687 (!dm_task_set_major(dmt, dnode->props.major) ||
1688 !dm_task_set_minor(dmt, dnode->props.minor))) {
1689 log_error("Failed to set device number for %s creation.", dnode->name);
1690 goto out;
1691 }
1692
1693 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
1694 log_error("Failed to set read only flag for %s", dnode->name);
1695 goto out;
1696 }
1697
1698 if (!dm_task_no_open_count(dmt))
1699 log_error("Failed to disable open_count");
1700
1701 if ((r = dm_task_run(dmt)))
1702 r = dm_task_get_info(dmt, &dnode->info);
1703
1704out:
1705 dm_task_destroy(dmt);
1706
1707 return r;
1708}
1709
1710
b4f1578f 1711static int _build_dev_string(char *devbuf, size_t bufsize, struct dm_tree_node *node)
165e4a11
AK
1712{
1713 if (!dm_format_dev(devbuf, bufsize, node->info.major, node->info.minor)) {
40e5fd8b
AK
1714 log_error("Failed to format %s device number for %s as dm "
1715 "target (%u,%u)",
1716 node->name, node->uuid, node->info.major, node->info.minor);
1717 return 0;
165e4a11
AK
1718 }
1719
1720 return 1;
1721}
1722
ffa9b6a5
ZK
1723/* simplify string emiting code */
1724#define EMIT_PARAMS(p, str...)\
7b6c011c
AK
1725do {\
1726 int w;\
1727 if ((w = dm_snprintf(params + p, paramsize - (size_t) p, str)) < 0) {\
1728 stack; /* Out of space */\
1729 return -1;\
1730 }\
1731 p += w;\
1732} while (0)
ffa9b6a5 1733
3c74075f
JEB
1734/*
1735 * _emit_areas_line
1736 *
1737 * Returns: 1 on success, 0 on failure
1738 */
08f1ddea 1739static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
4dcaa230
AK
1740 struct load_segment *seg, char *params,
1741 size_t paramsize, int *pos)
165e4a11
AK
1742{
1743 struct seg_area *area;
7d7d93ac 1744 char devbuf[DM_FORMAT_DEV_BUFSIZE];
609faae9 1745 unsigned first_time = 1;
db3c1ac1 1746 const char *logtype, *synctype;
b262f3e1 1747 unsigned log_parm_count;
165e4a11 1748
2c44337b 1749 dm_list_iterate_items(area, &seg->areas) {
b262f3e1
ZK
1750 switch (seg->type) {
1751 case SEG_REPLICATOR_DEV:
6d04311e
JEB
1752 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1753 return_0;
1754
b262f3e1
ZK
1755 EMIT_PARAMS(*pos, " %d 1 %s", area->rsite_index, devbuf);
1756 if (first_time)
1757 EMIT_PARAMS(*pos, " nolog 0");
1758 else {
1759 /* Remote devices */
1760 log_parm_count = (area->flags &
1761 (DM_NOSYNC | DM_FORCESYNC)) ? 2 : 1;
1762
1763 if (!area->slog) {
1764 devbuf[0] = 0; /* Only core log parameters */
1765 logtype = "core";
1766 } else {
1767 devbuf[0] = ' '; /* Extra space before device name */
1768 if (!_build_dev_string(devbuf + 1,
1769 sizeof(devbuf) - 1,
1770 area->slog))
1771 return_0;
1772 logtype = "disk";
1773 log_parm_count++; /* Extra sync log device name parameter */
1774 }
1775
1776 EMIT_PARAMS(*pos, " %s %u%s %" PRIu64, logtype,
1777 log_parm_count, devbuf, area->region_size);
1778
db3c1ac1
AK
1779 synctype = (area->flags & DM_NOSYNC) ?
1780 " nosync" : (area->flags & DM_FORCESYNC) ?
1781 " sync" : NULL;
b262f3e1 1782
db3c1ac1
AK
1783 if (synctype)
1784 EMIT_PARAMS(*pos, "%s", synctype);
b262f3e1
ZK
1785 }
1786 break;
cac52ca4
JEB
1787 case SEG_RAID1:
1788 case SEG_RAID4:
1789 case SEG_RAID5_LA:
1790 case SEG_RAID5_RA:
1791 case SEG_RAID5_LS:
1792 case SEG_RAID5_RS:
1793 case SEG_RAID6_ZR:
1794 case SEG_RAID6_NR:
1795 case SEG_RAID6_NC:
6d04311e
JEB
1796 if (!area->dev_node) {
1797 EMIT_PARAMS(*pos, " -");
1798 break;
1799 }
1800 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1801 return_0;
1802
cac52ca4
JEB
1803 EMIT_PARAMS(*pos, " %s", devbuf);
1804 break;
b262f3e1 1805 default:
6d04311e
JEB
1806 if (!_build_dev_string(devbuf, sizeof(devbuf), area->dev_node))
1807 return_0;
1808
b262f3e1
ZK
1809 EMIT_PARAMS(*pos, "%s%s %" PRIu64, first_time ? "" : " ",
1810 devbuf, area->offset);
1811 }
609faae9
AK
1812
1813 first_time = 0;
165e4a11
AK
1814 }
1815
1816 return 1;
1817}
1818
b262f3e1
ZK
1819static int _replicator_emit_segment_line(const struct load_segment *seg, char *params,
1820 size_t paramsize, int *pos)
1821{
1822 const struct load_segment *rlog_seg;
1823 struct replicator_site *rsite;
1824 char rlogbuf[DM_FORMAT_DEV_BUFSIZE];
1825 unsigned parm_count;
1826
1827 if (!seg->log || !_build_dev_string(rlogbuf, sizeof(rlogbuf), seg->log))
1828 return_0;
1829
1830 rlog_seg = dm_list_item(dm_list_last(&seg->log->props.segs),
1831 struct load_segment);
1832
1833 EMIT_PARAMS(*pos, "%s 4 %s 0 auto %" PRIu64,
1834 seg->rlog_type, rlogbuf, rlog_seg->size);
1835
1836 dm_list_iterate_items(rsite, &seg->rsites) {
1837 parm_count = (rsite->fall_behind_data
1838 || rsite->fall_behind_ios
1839 || rsite->async_timeout) ? 4 : 2;
1840
1841 EMIT_PARAMS(*pos, " blockdev %u %u %s", parm_count, rsite->rsite_index,
1842 (rsite->mode == DM_REPLICATOR_SYNC) ? "synchronous" : "asynchronous");
1843
1844 if (rsite->fall_behind_data)
1845 EMIT_PARAMS(*pos, " data %" PRIu64, rsite->fall_behind_data);
1846 else if (rsite->fall_behind_ios)
1847 EMIT_PARAMS(*pos, " ios %" PRIu32, rsite->fall_behind_ios);
1848 else if (rsite->async_timeout)
1849 EMIT_PARAMS(*pos, " timeout %" PRIu32, rsite->async_timeout);
1850 }
1851
1852 return 1;
1853}
1854
3c74075f 1855/*
3c74075f
JEB
1856 * Returns: 1 on success, 0 on failure
1857 */
beecb1e1
ZK
1858static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *seg,
1859 char *params, size_t paramsize)
165e4a11 1860{
8f26e18c
JEB
1861 int block_on_error = 0;
1862 int handle_errors = 0;
1863 int dm_log_userspace = 0;
1864 struct utsname uts;
dbcb64b8 1865 unsigned log_parm_count;
b39fdcf4 1866 int pos = 0, parts;
7d7d93ac 1867 char logbuf[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 1868 const char *logtype;
b39fdcf4 1869 unsigned kmaj = 0, kmin = 0, krel = 0;
165e4a11 1870
b39fdcf4
MB
1871 if (uname(&uts) == -1) {
1872 log_error("Cannot read kernel release version.");
1873 return 0;
1874 }
1875
1876 /* Kernels with a major number of 2 always had 3 parts. */
1877 parts = sscanf(uts.release, "%u.%u.%u", &kmaj, &kmin, &krel);
1878 if (parts < 1 || (kmaj < 3 && parts < 3)) {
1879 log_error("Wrong kernel release version %s.", uts.release);
30a65310
ZK
1880 return 0;
1881 }
67b25ed4 1882
8f26e18c
JEB
1883 if ((seg->flags & DM_BLOCK_ON_ERROR)) {
1884 /*
1885 * Originally, block_on_error was an argument to the log
1886 * portion of the mirror CTR table. It was renamed to
1887 * "handle_errors" and now resides in the 'features'
1888 * section of the mirror CTR table (i.e. at the end).
1889 *
1890 * We can identify whether to use "block_on_error" or
1891 * "handle_errors" by the dm-mirror module's version
1892 * number (>= 1.12) or by the kernel version (>= 2.6.22).
1893 */
ba61f848 1894 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 22))
8f26e18c
JEB
1895 handle_errors = 1;
1896 else
1897 block_on_error = 1;
1898 }
1899
1900 if (seg->clustered) {
1901 /* Cluster mirrors require a UUID */
1902 if (!seg->uuid)
1903 return_0;
1904
1905 /*
1906 * Cluster mirrors used to have their own log
1907 * types. Now they are accessed through the
1908 * userspace log type.
1909 *
1910 * The dm-log-userspace module was added to the
1911 * 2.6.31 kernel.
1912 */
ba61f848 1913 if (KERNEL_VERSION(kmaj, kmin, krel) >= KERNEL_VERSION(2, 6, 31))
8f26e18c
JEB
1914 dm_log_userspace = 1;
1915 }
1916
1917 /* Region size */
1918 log_parm_count = 1;
1919
1920 /* [no]sync, block_on_error etc. */
1921 log_parm_count += hweight32(seg->flags);
311d6d81 1922
8f26e18c
JEB
1923 /* "handle_errors" is a feature arg now */
1924 if (handle_errors)
1925 log_parm_count--;
1926
1927 /* DM_CORELOG does not count in the param list */
1928 if (seg->flags & DM_CORELOG)
1929 log_parm_count--;
1930
1931 if (seg->clustered) {
1932 log_parm_count++; /* For UUID */
1933
1934 if (!dm_log_userspace)
ffa9b6a5 1935 EMIT_PARAMS(pos, "clustered-");
49b95a5e
JEB
1936 else
1937 /* For clustered-* type field inserted later */
1938 log_parm_count++;
8f26e18c 1939 }
dbcb64b8 1940
8f26e18c
JEB
1941 if (!seg->log)
1942 logtype = "core";
1943 else {
1944 logtype = "disk";
1945 log_parm_count++;
1946 if (!_build_dev_string(logbuf, sizeof(logbuf), seg->log))
1947 return_0;
1948 }
dbcb64b8 1949
8f26e18c
JEB
1950 if (dm_log_userspace)
1951 EMIT_PARAMS(pos, "userspace %u %s clustered-%s",
1952 log_parm_count, seg->uuid, logtype);
1953 else
ffa9b6a5 1954 EMIT_PARAMS(pos, "%s %u", logtype, log_parm_count);
dbcb64b8 1955
8f26e18c
JEB
1956 if (seg->log)
1957 EMIT_PARAMS(pos, " %s", logbuf);
1958
1959 EMIT_PARAMS(pos, " %u", seg->region_size);
dbcb64b8 1960
8f26e18c
JEB
1961 if (seg->clustered && !dm_log_userspace)
1962 EMIT_PARAMS(pos, " %s", seg->uuid);
67b25ed4 1963
8f26e18c
JEB
1964 if ((seg->flags & DM_NOSYNC))
1965 EMIT_PARAMS(pos, " nosync");
1966 else if ((seg->flags & DM_FORCESYNC))
1967 EMIT_PARAMS(pos, " sync");
dbcb64b8 1968
8f26e18c
JEB
1969 if (block_on_error)
1970 EMIT_PARAMS(pos, " block_on_error");
1971
1972 EMIT_PARAMS(pos, " %u ", seg->mirror_area_count);
1973
5f3325fc 1974 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
3c74075f 1975 return_0;
dbcb64b8 1976
8f26e18c
JEB
1977 if (handle_errors)
1978 EMIT_PARAMS(pos, " 1 handle_errors");
ffa9b6a5 1979
3c74075f 1980 return 1;
8f26e18c
JEB
1981}
1982
cac52ca4
JEB
1983static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
1984 uint32_t minor, struct load_segment *seg,
1985 uint64_t *seg_start, char *params,
1986 size_t paramsize)
1987{
ad2432dc 1988 uint32_t i;
cac52ca4
JEB
1989 int param_count = 1; /* mandatory 'chunk size'/'stripe size' arg */
1990 int pos = 0;
1991
1992 if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
1993 param_count++;
1994
1995 if (seg->region_size)
1996 param_count += 2;
1997
ad2432dc
MB
1998 /* rebuilds is 64-bit */
1999 param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
2000 param_count += 2 * hweight32(seg->rebuilds >> 32);
f439e65b 2001
cac52ca4
JEB
2002 if ((seg->type == SEG_RAID1) && seg->stripe_size)
2003 log_error("WARNING: Ignoring RAID1 stripe size");
2004
2005 EMIT_PARAMS(pos, "%s %d %u", dm_segtypes[seg->type].target,
2006 param_count, seg->stripe_size);
2007
2008 if (seg->flags & DM_NOSYNC)
2009 EMIT_PARAMS(pos, " nosync");
2010 else if (seg->flags & DM_FORCESYNC)
2011 EMIT_PARAMS(pos, " sync");
2012
2013 if (seg->region_size)
2014 EMIT_PARAMS(pos, " region_size %u", seg->region_size);
2015
f439e65b
JEB
2016 for (i = 0; i < (seg->area_count / 2); i++)
2017 if (seg->rebuilds & (1 << i))
2018 EMIT_PARAMS(pos, " rebuild %u", i);
2019
cac52ca4
JEB
2020 /* Print number of metadata/data device pairs */
2021 EMIT_PARAMS(pos, " %u", seg->area_count/2);
2022
2023 if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
2024 return_0;
2025
2026 return 1;
2027}
2028
8f26e18c
JEB
2029static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
2030 uint32_t minor, struct load_segment *seg,
2031 uint64_t *seg_start, char *params,
2032 size_t paramsize)
2033{
2034 int pos = 0;
2035 int r;
cac52ca4 2036 int target_type_is_raid = 0;
8f26e18c 2037 char originbuf[DM_FORMAT_DEV_BUFSIZE], cowbuf[DM_FORMAT_DEV_BUFSIZE];
4251236e 2038 char pool[DM_FORMAT_DEV_BUFSIZE], metadata[DM_FORMAT_DEV_BUFSIZE];
dbcb64b8 2039
8f26e18c
JEB
2040 switch(seg->type) {
2041 case SEG_ERROR:
2042 case SEG_ZERO:
2043 case SEG_LINEAR:
2044 break;
2045 case SEG_MIRRORED:
2046 /* Mirrors are pretty complicated - now in separate function */
beecb1e1 2047 r = _mirror_emit_segment_line(dmt, seg, params, paramsize);
3c74075f
JEB
2048 if (!r)
2049 return_0;
165e4a11 2050 break;
b262f3e1
ZK
2051 case SEG_REPLICATOR:
2052 if ((r = _replicator_emit_segment_line(seg, params, paramsize,
2053 &pos)) <= 0) {
2054 stack;
2055 return r;
2056 }
2057 break;
2058 case SEG_REPLICATOR_DEV:
2059 if (!seg->replicator || !_build_dev_string(originbuf,
2060 sizeof(originbuf),
2061 seg->replicator))
2062 return_0;
2063
2064 EMIT_PARAMS(pos, "%s %" PRIu64, originbuf, seg->rdevice_index);
2065 break;
165e4a11 2066 case SEG_SNAPSHOT:
aa6f4e51 2067 case SEG_SNAPSHOT_MERGE:
b4f1578f
AK
2068 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2069 return_0;
2070 if (!_build_dev_string(cowbuf, sizeof(cowbuf), seg->cow))
2071 return_0;
ffa9b6a5
ZK
2072 EMIT_PARAMS(pos, "%s %s %c %d", originbuf, cowbuf,
2073 seg->persistent ? 'P' : 'N', seg->chunk_size);
165e4a11
AK
2074 break;
2075 case SEG_SNAPSHOT_ORIGIN:
b4f1578f
AK
2076 if (!_build_dev_string(originbuf, sizeof(originbuf), seg->origin))
2077 return_0;
ffa9b6a5 2078 EMIT_PARAMS(pos, "%s", originbuf);
165e4a11
AK
2079 break;
2080 case SEG_STRIPED:
609faae9 2081 EMIT_PARAMS(pos, "%u %u ", seg->area_count, seg->stripe_size);
165e4a11 2082 break;
12ca060e 2083 case SEG_CRYPT:
609faae9 2084 EMIT_PARAMS(pos, "%s%s%s%s%s %s %" PRIu64 " ", seg->cipher,
12ca060e
MB
2085 seg->chainmode ? "-" : "", seg->chainmode ?: "",
2086 seg->iv ? "-" : "", seg->iv ?: "", seg->key,
2087 seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
2088 seg->iv_offset : *seg_start);
2089 break;
cac52ca4
JEB
2090 case SEG_RAID1:
2091 case SEG_RAID4:
2092 case SEG_RAID5_LA:
2093 case SEG_RAID5_RA:
2094 case SEG_RAID5_LS:
2095 case SEG_RAID5_RS:
2096 case SEG_RAID6_ZR:
2097 case SEG_RAID6_NR:
2098 case SEG_RAID6_NC:
2099 target_type_is_raid = 1;
2100 r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
2101 params, paramsize);
2102 if (!r)
2103 return_0;
2104
2105 break;
4251236e
ZK
2106 case SEG_THIN_POOL:
2107 if (!_build_dev_string(metadata, sizeof(metadata), seg->metadata))
2108 return_0;
2109 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2110 return_0;
2111 EMIT_PARAMS(pos, "%s %s %d %" PRIu64 " %s", metadata, pool,
e9156c2b 2112 seg->data_block_size, seg->low_water_mark,
ac08d9c0 2113 seg->skip_block_zeroing ? "1 skip_block_zeroing" : "0");
4251236e
ZK
2114 break;
2115 case SEG_THIN:
2116 if (!_build_dev_string(pool, sizeof(pool), seg->pool))
2117 return_0;
2118 EMIT_PARAMS(pos, "%s %d", pool, seg->device_id);
2119 break;
165e4a11
AK
2120 }
2121
2122 switch(seg->type) {
2123 case SEG_ERROR:
b262f3e1 2124 case SEG_REPLICATOR:
165e4a11
AK
2125 case SEG_SNAPSHOT:
2126 case SEG_SNAPSHOT_ORIGIN:
aa6f4e51 2127 case SEG_SNAPSHOT_MERGE:
165e4a11 2128 case SEG_ZERO:
4251236e
ZK
2129 case SEG_THIN_POOL:
2130 case SEG_THIN:
165e4a11 2131 break;
12ca060e 2132 case SEG_CRYPT:
165e4a11 2133 case SEG_LINEAR:
b262f3e1 2134 case SEG_REPLICATOR_DEV:
165e4a11
AK
2135 case SEG_STRIPED:
2136 if ((r = _emit_areas_line(dmt, seg, params, paramsize, &pos)) <= 0) {
2137 stack;
2138 return r;
2139 }
b6793963
AK
2140 if (!params[0]) {
2141 log_error("No parameters supplied for %s target "
2142 "%u:%u.", dm_segtypes[seg->type].target,
812e10ac 2143 major, minor);
b6793963
AK
2144 return 0;
2145 }
165e4a11
AK
2146 break;
2147 }
2148
4b2cae46
AK
2149 log_debug("Adding target to (%" PRIu32 ":%" PRIu32 "): %" PRIu64
2150 " %" PRIu64 " %s %s", major, minor,
f439e65b
JEB
2151 *seg_start, seg->size, target_type_is_raid ? "raid" :
2152 dm_segtypes[seg->type].target, params);
165e4a11 2153
cac52ca4
JEB
2154 if (!dm_task_add_target(dmt, *seg_start, seg->size,
2155 target_type_is_raid ? "raid" :
2156 dm_segtypes[seg->type].target, params))
b4f1578f 2157 return_0;
165e4a11
AK
2158
2159 *seg_start += seg->size;
2160
2161 return 1;
2162}
2163
ffa9b6a5
ZK
2164#undef EMIT_PARAMS
2165
4b2cae46
AK
2166static int _emit_segment(struct dm_task *dmt, uint32_t major, uint32_t minor,
2167 struct load_segment *seg, uint64_t *seg_start)
165e4a11
AK
2168{
2169 char *params;
2170 size_t paramsize = 4096;
2171 int ret;
2172
2173 do {
2174 if (!(params = dm_malloc(paramsize))) {
2175 log_error("Insufficient space for target parameters.");
2176 return 0;
2177 }
2178
12ea7cb1 2179 params[0] = '\0';
4b2cae46
AK
2180 ret = _emit_segment_line(dmt, major, minor, seg, seg_start,
2181 params, paramsize);
165e4a11
AK
2182 dm_free(params);
2183
2184 if (!ret)
2185 stack;
2186
2187 if (ret >= 0)
2188 return ret;
2189
2190 log_debug("Insufficient space in params[%" PRIsize_t
2191 "] for target parameters.", paramsize);
2192
2193 paramsize *= 2;
2194 } while (paramsize < MAX_TARGET_PARAMSIZE);
2195
2196 log_error("Target parameter size too big. Aborting.");
2197 return 0;
2198}
2199
b4f1578f 2200static int _load_node(struct dm_tree_node *dnode)
165e4a11
AK
2201{
2202 int r = 0;
2203 struct dm_task *dmt;
2204 struct load_segment *seg;
df390f17 2205 uint64_t seg_start = 0, existing_table_size;
165e4a11 2206
4b2cae46
AK
2207 log_verbose("Loading %s table (%" PRIu32 ":%" PRIu32 ")", dnode->name,
2208 dnode->info.major, dnode->info.minor);
165e4a11
AK
2209
2210 if (!(dmt = dm_task_create(DM_DEVICE_RELOAD))) {
2211 log_error("Reload dm_task creation failed for %s", dnode->name);
2212 return 0;
2213 }
2214
2215 if (!dm_task_set_major(dmt, dnode->info.major) ||
2216 !dm_task_set_minor(dmt, dnode->info.minor)) {
2217 log_error("Failed to set device number for %s reload.", dnode->name);
2218 goto out;
2219 }
2220
2221 if (dnode->props.read_only && !dm_task_set_ro(dmt)) {
2222 log_error("Failed to set read only flag for %s", dnode->name);
2223 goto out;
2224 }
2225
2226 if (!dm_task_no_open_count(dmt))
2227 log_error("Failed to disable open_count");
2228
2c44337b 2229 dm_list_iterate_items(seg, &dnode->props.segs)
4b2cae46
AK
2230 if (!_emit_segment(dmt, dnode->info.major, dnode->info.minor,
2231 seg, &seg_start))
b4f1578f 2232 goto_out;
165e4a11 2233
ec289b64
AK
2234 if (!dm_task_suppress_identical_reload(dmt))
2235 log_error("Failed to suppress reload of identical tables.");
2236
2237 if ((r = dm_task_run(dmt))) {
165e4a11 2238 r = dm_task_get_info(dmt, &dnode->info);
ec289b64
AK
2239 if (r && !dnode->info.inactive_table)
2240 log_verbose("Suppressed %s identical table reload.",
2241 dnode->name);
bb875bb9 2242
df390f17 2243 existing_table_size = dm_task_get_existing_table_size(dmt);
bb875bb9 2244 if ((dnode->props.size_changed =
df390f17 2245 (existing_table_size == seg_start) ? 0 : 1)) {
bb875bb9 2246 log_debug("Table size changed from %" PRIu64 " to %"
df390f17 2247 PRIu64 " for %s", existing_table_size,
bb875bb9 2248 seg_start, dnode->name);
df390f17
AK
2249 /*
2250 * Kernel usually skips size validation on zero-length devices
2251 * now so no need to preload them.
2252 */
2253 /* FIXME In which kernel version did this begin? */
2254 if (!existing_table_size && dnode->props.delay_resume_if_new)
2255 dnode->props.size_changed = 0;
2256 }
ec289b64 2257 }
165e4a11
AK
2258
2259 dnode->props.segment_count = 0;
2260
2261out:
2262 dm_task_destroy(dmt);
2263
2264 return r;
165e4a11
AK
2265}
2266
b4f1578f 2267int dm_tree_preload_children(struct dm_tree_node *dnode,
bb875bb9
AK
2268 const char *uuid_prefix,
2269 size_t uuid_prefix_len)
165e4a11 2270{
2ca6b865 2271 int r = 1;
165e4a11 2272 void *handle = NULL;
b4f1578f 2273 struct dm_tree_node *child;
165e4a11 2274 struct dm_info newinfo;
566515c0 2275 int update_devs_flag = 0;
165e4a11
AK
2276
2277 /* Preload children first */
b4f1578f 2278 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
165e4a11
AK
2279 /* Skip existing non-device-mapper devices */
2280 if (!child->info.exists && child->info.major)
2281 continue;
2282
2283 /* Ignore if it doesn't belong to this VG */
87f98002
AK
2284 if (child->info.exists &&
2285 !_uuid_prefix_matches(child->uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2286 continue;
2287
b4f1578f 2288 if (dm_tree_node_num_children(child, 0))
2ca6b865
MS
2289 if (!dm_tree_preload_children(child, uuid_prefix, uuid_prefix_len))
2290 return_0;
165e4a11 2291
165e4a11 2292 /* FIXME Cope if name exists with no uuid? */
3d6782b3
ZK
2293 if (!child->info.exists && !_create_node(child))
2294 return_0;
165e4a11 2295
3d6782b3
ZK
2296 if (!child->info.inactive_table &&
2297 child->props.segment_count &&
2298 !_load_node(child))
2299 return_0;
165e4a11 2300
eb91c4ee
MB
2301 /* Propagate device size change change */
2302 if (child->props.size_changed)
2303 dnode->props.size_changed = 1;
2304
bb875bb9 2305 /* Resume device immediately if it has parents and its size changed */
3776c494 2306 if (!dm_tree_node_num_children(child, 1) || !child->props.size_changed)
165e4a11
AK
2307 continue;
2308
7707ea90
AK
2309 if (!child->info.inactive_table && !child->info.suspended)
2310 continue;
2311
fc795d87 2312 if (!_resume_node(child->name, child->info.major, child->info.minor,
bd90c6b2 2313 child->props.read_ahead, child->props.read_ahead_flags,
1840aa09
AK
2314 &newinfo, &child->dtree->cookie, child->udev_flags,
2315 child->info.suspended)) {
165e4a11 2316 log_error("Unable to resume %s (%" PRIu32
fc795d87 2317 ":%" PRIu32 ")", child->name, child->info.major,
165e4a11 2318 child->info.minor);
2ca6b865 2319 r = 0;
165e4a11
AK
2320 continue;
2321 }
2322
2323 /* Update cached info */
2324 child->info = newinfo;
bbcd37e4
ZK
2325 if (child->props.send_messages &&
2326 !(r = _node_send_messages(child, uuid_prefix, uuid_prefix_len))) {
2327 stack;
2328 continue;
2329 }
566515c0
PR
2330 /*
2331 * Prepare for immediate synchronization with udev and flush all stacked
2332 * dev node operations if requested by immediate_dev_node property. But
2333 * finish processing current level in the tree first.
2334 */
2335 if (child->props.immediate_dev_node)
2336 update_devs_flag = 1;
165e4a11
AK
2337 }
2338
bbcd37e4
ZK
2339 if (r && dnode->props.send_messages &&
2340 !(r = _node_send_messages(dnode, uuid_prefix, uuid_prefix_len)))
2341 stack;
165e4a11 2342
566515c0
PR
2343 if (update_devs_flag) {
2344 if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
2345 stack;
2346 dm_tree_set_cookie(dnode, 0);
566515c0
PR
2347 }
2348
11f64f0a 2349 if (r && !_node_send_messages(dnode, uuid_prefix, uuid_prefix_len)) {
25e6ab87
ZK
2350 stack;
2351 if (!(dm_tree_deactivate_children(dnode, uuid_prefix, uuid_prefix_len)))
2352 log_error("Failed to deactivate %s", dnode->name);
2353 r = 0;
2354 }
2355
2ca6b865 2356 return r;
165e4a11
AK
2357}
2358
165e4a11
AK
2359/*
2360 * Returns 1 if unsure.
2361 */
b4f1578f 2362int dm_tree_children_use_uuid(struct dm_tree_node *dnode,
165e4a11
AK
2363 const char *uuid_prefix,
2364 size_t uuid_prefix_len)
2365{
2366 void *handle = NULL;
b4f1578f 2367 struct dm_tree_node *child = dnode;
165e4a11
AK
2368 const char *uuid;
2369
b4f1578f
AK
2370 while ((child = dm_tree_next_child(&handle, dnode, 0))) {
2371 if (!(uuid = dm_tree_node_get_uuid(child))) {
2372 log_error("Failed to get uuid for dtree node.");
165e4a11
AK
2373 return 1;
2374 }
2375
87f98002 2376 if (_uuid_prefix_matches(uuid, uuid_prefix, uuid_prefix_len))
165e4a11
AK
2377 return 1;
2378
b4f1578f
AK
2379 if (dm_tree_node_num_children(child, 0))
2380 dm_tree_children_use_uuid(child, uuid_prefix, uuid_prefix_len);
165e4a11
AK
2381 }
2382
2383 return 0;
2384}
2385
2386/*
2387 * Target functions
2388 */
b4f1578f 2389static struct load_segment *_add_segment(struct dm_tree_node *dnode, unsigned type, uint64_t size)
165e4a11
AK
2390{
2391 struct load_segment *seg;
2392
b4f1578f
AK
2393 if (!(seg = dm_pool_zalloc(dnode->dtree->mem, sizeof(*seg)))) {
2394 log_error("dtree node segment allocation failed");
165e4a11
AK
2395 return NULL;
2396 }
2397
2398 seg->type = type;
2399 seg->size = size;
2400 seg->area_count = 0;
2c44337b 2401 dm_list_init(&seg->areas);
165e4a11
AK
2402 seg->stripe_size = 0;
2403 seg->persistent = 0;
2404 seg->chunk_size = 0;
2405 seg->cow = NULL;
2406 seg->origin = NULL;
aa6f4e51 2407 seg->merge = NULL;
165e4a11 2408
2c44337b 2409 dm_list_add(&dnode->props.segs, &seg->list);
165e4a11
AK
2410 dnode->props.segment_count++;
2411
2412 return seg;
2413}
2414
b4f1578f 2415int dm_tree_node_add_snapshot_origin_target(struct dm_tree_node *dnode,
40e5fd8b
AK
2416 uint64_t size,
2417 const char *origin_uuid)
165e4a11
AK
2418{
2419 struct load_segment *seg;
b4f1578f 2420 struct dm_tree_node *origin_node;
165e4a11 2421
b4f1578f
AK
2422 if (!(seg = _add_segment(dnode, SEG_SNAPSHOT_ORIGIN, size)))
2423 return_0;
165e4a11 2424
b4f1578f 2425 if (!(origin_node = dm_tree_find_node_by_uuid(dnode->dtree, origin_uuid))) {
165e4a11
AK
2426 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2427 return 0;
2428 }
2429
2430 seg->origin = origin_node;
b4f1578f
AK
2431 if (!_link_tree_nodes(dnode, origin_node))
2432 return_0;
165e4a11 2433
56c28292
AK
2434 /* Resume snapshot origins after new snapshots */
2435 dnode->activation_priority = 1;
2436
165e4a11
AK
2437 return 1;
2438}
2439
aa6f4e51
MS
2440static int _add_snapshot_target(struct dm_tree_node *node,
2441 uint64_t size,
2442 const char *origin_uuid,
2443 const char *cow_uuid,
2444 const char *merge_uuid,
2445 int persistent,
2446 uint32_t chunk_size)
165e4a11
AK
2447{
2448 struct load_segment *seg;
aa6f4e51
MS
2449 struct dm_tree_node *origin_node, *cow_node, *merge_node;
2450 unsigned seg_type;
2451
2452 seg_type = !merge_uuid ? SEG_SNAPSHOT : SEG_SNAPSHOT_MERGE;
165e4a11 2453
aa6f4e51 2454 if (!(seg = _add_segment(node, seg_type, size)))
b4f1578f 2455 return_0;
165e4a11 2456
b4f1578f 2457 if (!(origin_node = dm_tree_find_node_by_uuid(node->dtree, origin_uuid))) {
165e4a11
AK
2458 log_error("Couldn't find snapshot origin uuid %s.", origin_uuid);
2459 return 0;
2460 }
2461
2462 seg->origin = origin_node;
b4f1578f
AK
2463 if (!_link_tree_nodes(node, origin_node))
2464 return_0;
165e4a11 2465
b4f1578f 2466 if (!(cow_node = dm_tree_find_node_by_uuid(node->dtree, cow_uuid))) {
aa6f4e51 2467 log_error("Couldn't find snapshot COW device uuid %s.", cow_uuid);
165e4a11
AK
2468 return 0;
2469 }
2470
2471 seg->cow = cow_node;
b4f1578f
AK
2472 if (!_link_tree_nodes(node, cow_node))
2473 return_0;
165e4a11
AK
2474
2475 seg->persistent = persistent ? 1 : 0;
2476 seg->chunk_size = chunk_size;
2477
aa6f4e51
MS
2478 if (merge_uuid) {
2479 if (!(merge_node = dm_tree_find_node_by_uuid(node->dtree, merge_uuid))) {
2480 /* not a pure error, merging snapshot may have been deactivated */
2481 log_verbose("Couldn't find merging snapshot uuid %s.", merge_uuid);
2482 } else {
2483 seg->merge = merge_node;
2484 /* must not link merging snapshot, would undermine activation_priority below */
2485 }
2486
2487 /* Resume snapshot-merge (acting origin) after other snapshots */
2488 node->activation_priority = 1;
2489 if (seg->merge) {
2490 /* Resume merging snapshot after snapshot-merge */
2491 seg->merge->activation_priority = 2;
2492 }
2493 }
2494
165e4a11
AK
2495 return 1;
2496}
2497
aa6f4e51
MS
2498
2499int dm_tree_node_add_snapshot_target(struct dm_tree_node *node,
2500 uint64_t size,
2501 const char *origin_uuid,
2502 const char *cow_uuid,
2503 int persistent,
2504 uint32_t chunk_size)
2505{
2506 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2507 NULL, persistent, chunk_size);
2508}
2509
2510int dm_tree_node_add_snapshot_merge_target(struct dm_tree_node *node,
2511 uint64_t size,
2512 const char *origin_uuid,
2513 const char *cow_uuid,
2514 const char *merge_uuid,
2515 uint32_t chunk_size)
2516{
2517 return _add_snapshot_target(node, size, origin_uuid, cow_uuid,
2518 merge_uuid, 1, chunk_size);
2519}
2520
b4f1578f 2521int dm_tree_node_add_error_target(struct dm_tree_node *node,
40e5fd8b 2522 uint64_t size)
165e4a11 2523{
b4f1578f
AK
2524 if (!_add_segment(node, SEG_ERROR, size))
2525 return_0;
165e4a11
AK
2526
2527 return 1;
2528}
2529
b4f1578f 2530int dm_tree_node_add_zero_target(struct dm_tree_node *node,
40e5fd8b 2531 uint64_t size)
165e4a11 2532{
b4f1578f
AK
2533 if (!_add_segment(node, SEG_ZERO, size))
2534 return_0;
165e4a11
AK
2535
2536 return 1;
2537}
2538
b4f1578f 2539int dm_tree_node_add_linear_target(struct dm_tree_node *node,
40e5fd8b 2540 uint64_t size)
165e4a11 2541{
b4f1578f
AK
2542 if (!_add_segment(node, SEG_LINEAR, size))
2543 return_0;
165e4a11
AK
2544
2545 return 1;
2546}
2547
b4f1578f 2548int dm_tree_node_add_striped_target(struct dm_tree_node *node,
40e5fd8b
AK
2549 uint64_t size,
2550 uint32_t stripe_size)
165e4a11
AK
2551{
2552 struct load_segment *seg;
2553
b4f1578f
AK
2554 if (!(seg = _add_segment(node, SEG_STRIPED, size)))
2555 return_0;
165e4a11
AK
2556
2557 seg->stripe_size = stripe_size;
2558
2559 return 1;
2560}
2561
12ca060e
MB
2562int dm_tree_node_add_crypt_target(struct dm_tree_node *node,
2563 uint64_t size,
2564 const char *cipher,
2565 const char *chainmode,
2566 const char *iv,
2567 uint64_t iv_offset,
2568 const char *key)
2569{
2570 struct load_segment *seg;
2571
2572 if (!(seg = _add_segment(node, SEG_CRYPT, size)))
2573 return_0;
2574
2575 seg->cipher = cipher;
2576 seg->chainmode = chainmode;
2577 seg->iv = iv;
2578 seg->iv_offset = iv_offset;
2579 seg->key = key;
2580
2581 return 1;
2582}
2583
b4f1578f 2584int dm_tree_node_add_mirror_target_log(struct dm_tree_node *node,
165e4a11 2585 uint32_t region_size,
08e64ce5 2586 unsigned clustered,
165e4a11 2587 const char *log_uuid,
ce7ed2c0
AK
2588 unsigned area_count,
2589 uint32_t flags)
165e4a11 2590{
908db078 2591 struct dm_tree_node *log_node = NULL;
165e4a11
AK
2592 struct load_segment *seg;
2593
2594 if (!node->props.segment_count) {
b8175c33 2595 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
2596 return 0;
2597 }
2598
2c44337b 2599 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 2600
24b026e3 2601 if (log_uuid) {
67b25ed4
AK
2602 if (!(seg->uuid = dm_pool_strdup(node->dtree->mem, log_uuid))) {
2603 log_error("log uuid pool_strdup failed");
2604 return 0;
2605 }
df390f17
AK
2606 if ((flags & DM_CORELOG))
2607 /* For pvmove: immediate resume (for size validation) isn't needed. */
2608 node->props.delay_resume_if_new = 1;
2609 else {
9723090c
AK
2610 if (!(log_node = dm_tree_find_node_by_uuid(node->dtree, log_uuid))) {
2611 log_error("Couldn't find mirror log uuid %s.", log_uuid);
2612 return 0;
2613 }
2614
566515c0
PR
2615 if (clustered)
2616 log_node->props.immediate_dev_node = 1;
2617
0a99713e
AK
2618 /* The kernel validates the size of disk logs. */
2619 /* FIXME Propagate to any devices below */
2620 log_node->props.delay_resume_if_new = 0;
2621
9723090c
AK
2622 if (!_link_tree_nodes(node, log_node))
2623 return_0;
2624 }
165e4a11
AK
2625 }
2626
2627 seg->log = log_node;
165e4a11
AK
2628 seg->region_size = region_size;
2629 seg->clustered = clustered;
2630 seg->mirror_area_count = area_count;
dbcb64b8 2631 seg->flags = flags;
165e4a11
AK
2632
2633 return 1;
2634}
2635
b4f1578f 2636int dm_tree_node_add_mirror_target(struct dm_tree_node *node,
40e5fd8b 2637 uint64_t size)
165e4a11 2638{
cbecd3cd 2639 if (!_add_segment(node, SEG_MIRRORED, size))
b4f1578f 2640 return_0;
165e4a11
AK
2641
2642 return 1;
2643}
2644
cac52ca4
JEB
2645int dm_tree_node_add_raid_target(struct dm_tree_node *node,
2646 uint64_t size,
2647 const char *raid_type,
2648 uint32_t region_size,
2649 uint32_t stripe_size,
f439e65b 2650 uint64_t rebuilds,
cac52ca4
JEB
2651 uint64_t reserved2)
2652{
2653 int i;
2654 struct load_segment *seg = NULL;
2655
2656 for (i = 0; dm_segtypes[i].target && !seg; i++)
2657 if (!strcmp(raid_type, dm_segtypes[i].target))
2658 if (!(seg = _add_segment(node,
2659 dm_segtypes[i].type, size)))
2660 return_0;
2661
b2fa9b43
JEB
2662 if (!seg)
2663 return_0;
2664
cac52ca4
JEB
2665 seg->region_size = region_size;
2666 seg->stripe_size = stripe_size;
2667 seg->area_count = 0;
f439e65b 2668 seg->rebuilds = rebuilds;
cac52ca4
JEB
2669
2670 return 1;
2671}
2672
b262f3e1
ZK
2673int dm_tree_node_add_replicator_target(struct dm_tree_node *node,
2674 uint64_t size,
2675 const char *rlog_uuid,
2676 const char *rlog_type,
2677 unsigned rsite_index,
2678 dm_replicator_mode_t mode,
2679 uint32_t async_timeout,
2680 uint64_t fall_behind_data,
2681 uint32_t fall_behind_ios)
2682{
2683 struct load_segment *rseg;
2684 struct replicator_site *rsite;
2685
2686 /* Local site0 - adds replicator segment and links rlog device */
2687 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2688 if (node->props.segment_count) {
2689 log_error(INTERNAL_ERROR "Attempt to add replicator segment to already used node.");
2690 return 0;
2691 }
2692
2693 if (!(rseg = _add_segment(node, SEG_REPLICATOR, size)))
2694 return_0;
2695
2696 if (!(rseg->log = dm_tree_find_node_by_uuid(node->dtree, rlog_uuid))) {
2697 log_error("Missing replicator log uuid %s.", rlog_uuid);
2698 return 0;
2699 }
2700
2701 if (!_link_tree_nodes(node, rseg->log))
2702 return_0;
2703
2704 if (strcmp(rlog_type, "ringbuffer") != 0) {
2705 log_error("Unsupported replicator log type %s.", rlog_type);
2706 return 0;
2707 }
2708
2709 if (!(rseg->rlog_type = dm_pool_strdup(node->dtree->mem, rlog_type)))
2710 return_0;
2711
2712 dm_list_init(&rseg->rsites);
2713 rseg->rdevice_count = 0;
2714 node->activation_priority = 1;
2715 }
2716
2717 /* Add site to segment */
2718 if (mode == DM_REPLICATOR_SYNC
2719 && (async_timeout || fall_behind_ios || fall_behind_data)) {
2720 log_error("Async parameters passed for synchronnous replicator.");
2721 return 0;
2722 }
2723
2724 if (node->props.segment_count != 1) {
2725 log_error(INTERNAL_ERROR "Attempt to add remote site area before setting replicator log.");
2726 return 0;
2727 }
2728
2729 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2730 if (rseg->type != SEG_REPLICATOR) {
2731 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2732 dm_segtypes[rseg->type].target);
2733 return 0;
2734 }
2735
2736 if (!(rsite = dm_pool_zalloc(node->dtree->mem, sizeof(*rsite)))) {
2737 log_error("Failed to allocate remote site segment.");
2738 return 0;
2739 }
2740
2741 dm_list_add(&rseg->rsites, &rsite->list);
2742 rseg->rsite_count++;
2743
2744 rsite->mode = mode;
2745 rsite->async_timeout = async_timeout;
2746 rsite->fall_behind_data = fall_behind_data;
2747 rsite->fall_behind_ios = fall_behind_ios;
2748 rsite->rsite_index = rsite_index;
2749
2750 return 1;
2751}
2752
2753/* Appends device node to Replicator */
2754int dm_tree_node_add_replicator_dev_target(struct dm_tree_node *node,
2755 uint64_t size,
2756 const char *replicator_uuid,
2757 uint64_t rdevice_index,
2758 const char *rdev_uuid,
2759 unsigned rsite_index,
2760 const char *slog_uuid,
2761 uint32_t slog_flags,
2762 uint32_t slog_region_size)
2763{
2764 struct seg_area *area;
2765 struct load_segment *rseg;
2766 struct load_segment *rep_seg;
2767
2768 if (rsite_index == REPLICATOR_LOCAL_SITE) {
2769 /* Site index for local target */
2770 if (!(rseg = _add_segment(node, SEG_REPLICATOR_DEV, size)))
2771 return_0;
2772
2773 if (!(rseg->replicator = dm_tree_find_node_by_uuid(node->dtree, replicator_uuid))) {
2774 log_error("Missing replicator uuid %s.", replicator_uuid);
2775 return 0;
2776 }
2777
2778 /* Local slink0 for replicator must be always initialized first */
2779 if (rseg->replicator->props.segment_count != 1) {
2780 log_error(INTERNAL_ERROR "Attempt to use non replicator segment.");
2781 return 0;
2782 }
2783
2784 rep_seg = dm_list_item(dm_list_last(&rseg->replicator->props.segs), struct load_segment);
2785 if (rep_seg->type != SEG_REPLICATOR) {
2786 log_error(INTERNAL_ERROR "Attempt to use non replicator segment %s.",
2787 dm_segtypes[rep_seg->type].target);
2788 return 0;
2789 }
2790 rep_seg->rdevice_count++;
2791
2792 if (!_link_tree_nodes(node, rseg->replicator))
2793 return_0;
2794
2795 rseg->rdevice_index = rdevice_index;
2796 } else {
2797 /* Local slink0 for replicator must be always initialized first */
2798 if (node->props.segment_count != 1) {
2799 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment.");
2800 return 0;
2801 }
2802
2803 rseg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
2804 if (rseg->type != SEG_REPLICATOR_DEV) {
2805 log_error(INTERNAL_ERROR "Attempt to use non replicator-dev segment %s.",
2806 dm_segtypes[rseg->type].target);
2807 return 0;
2808 }
2809 }
2810
2811 if (!(slog_flags & DM_CORELOG) && !slog_uuid) {
2812 log_error("Unspecified sync log uuid.");
2813 return 0;
2814 }
2815
2816 if (!dm_tree_node_add_target_area(node, NULL, rdev_uuid, 0))
2817 return_0;
2818
2819 area = dm_list_item(dm_list_last(&rseg->areas), struct seg_area);
2820
2821 if (!(slog_flags & DM_CORELOG)) {
2822 if (!(area->slog = dm_tree_find_node_by_uuid(node->dtree, slog_uuid))) {
2823 log_error("Couldn't find sync log uuid %s.", slog_uuid);
2824 return 0;
2825 }
2826
2827 if (!_link_tree_nodes(node, area->slog))
2828 return_0;
2829 }
2830
2831 area->flags = slog_flags;
2832 area->region_size = slog_region_size;
2833 area->rsite_index = rsite_index;
2834
2835 return 1;
2836}
2837
5668fe04
ZK
2838static int _thin_validate_device_id(uint32_t device_id)
2839{
2840 if (device_id > DM_THIN_MAX_DEVICE_ID) {
2841 log_error("Device id %u is higher then %u.",
2842 device_id, DM_THIN_MAX_DEVICE_ID);
2843 return 0;
2844 }
2845
2846 return 1;
2847}
2848
4251236e
ZK
2849int dm_tree_node_add_thin_pool_target(struct dm_tree_node *node,
2850 uint64_t size,
e0ea24be 2851 uint64_t transaction_id,
4251236e 2852 const char *metadata_uuid,
5668fd6a 2853 const char *pool_uuid,
4251236e 2854 uint32_t data_block_size,
e9156c2b 2855 uint64_t low_water_mark,
460c5991 2856 unsigned skip_block_zeroing)
4251236e
ZK
2857{
2858 struct load_segment *seg;
2859
3f53c059 2860 if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
565a4bfc 2861 log_error("Data block size %u is lower then %u sectors.",
3f53c059 2862 data_block_size, DM_THIN_MIN_DATA_BLOCK_SIZE);
4251236e
ZK
2863 return 0;
2864 }
2865
3f53c059 2866 if (data_block_size > DM_THIN_MAX_DATA_BLOCK_SIZE) {
565a4bfc 2867 log_error("Data block size %u is higher then %u sectors.",
3f53c059 2868 data_block_size, DM_THIN_MAX_DATA_BLOCK_SIZE);
4251236e
ZK
2869 return 0;
2870 }
2871
2872 if (!(seg = _add_segment(node, SEG_THIN_POOL, size)))
2873 return_0;
2874
2875 if (!(seg->metadata = dm_tree_find_node_by_uuid(node->dtree, metadata_uuid))) {
2876 log_error("Missing metadata uuid %s.", metadata_uuid);
2877 return 0;
2878 }
2879
2880 if (!_link_tree_nodes(node, seg->metadata))
2881 return_0;
2882
2883 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
2884 log_error("Missing pool uuid %s.", pool_uuid);
2885 return 0;
2886 }
2887
2888 if (!_link_tree_nodes(node, seg->pool))
2889 return_0;
2890
bbcd37e4
ZK
2891 node->props.send_messages = 1;
2892 seg->transaction_id = transaction_id;
e9156c2b 2893 seg->low_water_mark = low_water_mark;
e0ea24be 2894 seg->data_block_size = data_block_size;
460c5991 2895 seg->skip_block_zeroing = skip_block_zeroing;
25e6ab87
ZK
2896 dm_list_init(&seg->thin_messages);
2897
2898 return 1;
2899}
2900
2901int dm_tree_node_add_thin_pool_message(struct dm_tree_node *node,
7b199dc5 2902 const struct dm_thin_message *message)
25e6ab87
ZK
2903{
2904 struct load_segment *seg;
2905 struct thin_message *tm;
2906
2907 if (node->props.segment_count != 1) {
759b9592 2908 log_error("Thin pool node must have only one segment.");
25e6ab87
ZK
2909 return 0;
2910 }
2911
2912 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
25e6ab87 2913 if (seg->type != SEG_THIN_POOL) {
759b9592 2914 log_error("Thin pool node has segment type %s.",
25e6ab87
ZK
2915 dm_segtypes[seg->type].target);
2916 return 0;
2917 }
2918
2919 if (!(tm = dm_pool_zalloc(node->dtree->mem, sizeof (*tm)))) {
2920 log_error("Failed to allocate thin message.");
2921 return 0;
2922 }
2923
2924 switch (message->type) {
2925 case DM_THIN_MESSAGE_CREATE_SNAP:
759b9592 2926 /* If the thin origin is active, it must be suspend first! */
25e6ab87 2927 if (message->u.m_create_snap.device_id == message->u.m_create_snap.origin_id) {
759b9592 2928 log_error("Cannot use same device id for origin and its snapshot.");
25e6ab87
ZK
2929 return 0;
2930 }
2931 if (!_thin_validate_device_id(message->u.m_create_snap.device_id) ||
2932 !_thin_validate_device_id(message->u.m_create_snap.origin_id))
2933 return_0;
2a0d806b 2934 tm->message.u.m_create_snap = message->u.m_create_snap;
25e6ab87
ZK
2935 break;
2936 case DM_THIN_MESSAGE_CREATE_THIN:
2937 if (!_thin_validate_device_id(message->u.m_create_thin.device_id))
2938 return_0;
2a0d806b 2939 tm->message.u.m_create_thin = message->u.m_create_thin;
660a42bc 2940 tm->expected_errno = EEXIST;
25e6ab87
ZK
2941 break;
2942 case DM_THIN_MESSAGE_DELETE:
2943 if (!_thin_validate_device_id(message->u.m_delete.device_id))
2944 return_0;
2a0d806b 2945 tm->message.u.m_delete = message->u.m_delete;
660a42bc 2946 tm->expected_errno = ENODATA;
25e6ab87
ZK
2947 break;
2948 case DM_THIN_MESSAGE_TRIM:
2949 if (!_thin_validate_device_id(message->u.m_trim.device_id))
2950 return_0;
2a0d806b 2951 tm->message.u.m_trim = message->u.m_trim;
25e6ab87
ZK
2952 break;
2953 case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
2954 if (message->u.m_set_transaction_id.current_id !=
2955 (message->u.m_set_transaction_id.new_id - 1)) {
2956 log_error("New transaction_id must be sequential.");
2957 return 0; /* FIXME: Maybe too strict here? */
2958 }
2a0d806b 2959 tm->message.u.m_set_transaction_id = message->u.m_set_transaction_id;
25e6ab87
ZK
2960 break;
2961 default:
2962 log_error("Unsupported message type %d.", (int) message->type);
2963 return 0;
2964 }
2965
2966 tm->message.type = message->type;
2967 dm_list_add(&seg->thin_messages, &tm->list);
4251236e
ZK
2968
2969 return 1;
2970}
2971
2972int dm_tree_node_add_thin_target(struct dm_tree_node *node,
2973 uint64_t size,
4251236e
ZK
2974 const char *thin_pool_uuid,
2975 uint32_t device_id)
2976{
2977 struct load_segment *seg;
2978
5668fe04
ZK
2979 if (!_thin_validate_device_id(device_id))
2980 return_0;
4251236e
ZK
2981
2982 if (!(seg = _add_segment(node, SEG_THIN, size)))
2983 return_0;
2984
2985 if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, thin_pool_uuid))) {
2986 log_error("Missing thin pool uuid %s.", thin_pool_uuid);
2987 return 0;
2988 }
2989
2990 if (!_link_tree_nodes(node, seg->pool))
2991 return_0;
2992
1419bf1c
ZK
2993 seg->device_id = device_id;
2994
4251236e
ZK
2995 return 1;
2996}
2997
b4f1578f 2998static int _add_area(struct dm_tree_node *node, struct load_segment *seg, struct dm_tree_node *dev_node, uint64_t offset)
165e4a11
AK
2999{
3000 struct seg_area *area;
3001
b4f1578f 3002 if (!(area = dm_pool_zalloc(node->dtree->mem, sizeof (*area)))) {
165e4a11
AK
3003 log_error("Failed to allocate target segment area.");
3004 return 0;
3005 }
3006
3007 area->dev_node = dev_node;
3008 area->offset = offset;
3009
2c44337b 3010 dm_list_add(&seg->areas, &area->list);
165e4a11
AK
3011 seg->area_count++;
3012
3013 return 1;
3014}
3015
b4f1578f 3016int dm_tree_node_add_target_area(struct dm_tree_node *node,
40e5fd8b
AK
3017 const char *dev_name,
3018 const char *uuid,
3019 uint64_t offset)
165e4a11
AK
3020{
3021 struct load_segment *seg;
3022 struct stat info;
b4f1578f 3023 struct dm_tree_node *dev_node;
165e4a11
AK
3024
3025 if ((!dev_name || !*dev_name) && (!uuid || !*uuid)) {
b4f1578f 3026 log_error("dm_tree_node_add_target_area called without device");
165e4a11
AK
3027 return 0;
3028 }
3029
3030 if (uuid) {
b4f1578f 3031 if (!(dev_node = dm_tree_find_node_by_uuid(node->dtree, uuid))) {
165e4a11
AK
3032 log_error("Couldn't find area uuid %s.", uuid);
3033 return 0;
3034 }
b4f1578f
AK
3035 if (!_link_tree_nodes(node, dev_node))
3036 return_0;
165e4a11 3037 } else {
6d04311e 3038 if (stat(dev_name, &info) < 0) {
165e4a11
AK
3039 log_error("Device %s not found.", dev_name);
3040 return 0;
3041 }
3042
40e5fd8b 3043 if (!S_ISBLK(info.st_mode)) {
165e4a11
AK
3044 log_error("Device %s is not a block device.", dev_name);
3045 return 0;
3046 }
3047
3048 /* FIXME Check correct macro use */
cda69e17
PR
3049 if (!(dev_node = _add_dev(node->dtree, node, MAJOR(info.st_rdev),
3050 MINOR(info.st_rdev), 0)))
b4f1578f 3051 return_0;
165e4a11
AK
3052 }
3053
3054 if (!node->props.segment_count) {
b8175c33 3055 log_error(INTERNAL_ERROR "Attempt to add target area to missing segment.");
165e4a11
AK
3056 return 0;
3057 }
3058
2c44337b 3059 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
165e4a11 3060
b4f1578f
AK
3061 if (!_add_area(node, seg, dev_node, offset))
3062 return_0;
165e4a11
AK
3063
3064 return 1;
db208f51 3065}
bd90c6b2 3066
6d04311e
JEB
3067int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
3068{
3069 struct load_segment *seg;
3070
3071 seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
3072
415c0690
AK
3073 switch (seg->type) {
3074 case SEG_RAID1:
3075 case SEG_RAID4:
3076 case SEG_RAID5_LA:
3077 case SEG_RAID5_RA:
3078 case SEG_RAID5_LS:
3079 case SEG_RAID5_RS:
3080 case SEG_RAID6_ZR:
3081 case SEG_RAID6_NR:
3082 case SEG_RAID6_NC:
3083 break;
3084 default:
3085 log_error("dm_tree_node_add_null_area() called on an unsupported segment type");
3086 return 0;
3087 }
3088
6d04311e
JEB
3089 if (!_add_area(node, seg, NULL, offset))
3090 return_0;
3091
3092 return 1;
3093}
3094
bd90c6b2
AK
3095void dm_tree_set_cookie(struct dm_tree_node *node, uint32_t cookie)
3096{
3097 node->dtree->cookie = cookie;
3098}
3099
3100uint32_t dm_tree_get_cookie(struct dm_tree_node *node)
3101{
3102 return node->dtree->cookie;
3103}
This page took 0.503883 seconds and 5 git commands to generate.