]> sourceware.org Git - lvm2.git/blame - lib/activate/activate.c
alloc: fix raid --alloc anywhere double allocs
[lvm2.git] / lib / activate / activate.c
CommitLineData
b1713d28 1/*
67cdbd7e 2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
a18dcfb5 3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
b1713d28 4 *
6606c3ae
AK
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
be684599 9 * of the GNU Lesser General Public License v.2.1.
6606c3ae 10 *
be684599 11 * You should have received a copy of the GNU Lesser General Public License
6606c3ae
AK
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
b1713d28
JT
14 */
15
d1d9800e 16#include "lib.h"
a381c45a 17#include "metadata.h"
b1713d28 18#include "activate.h"
914c9723 19#include "memlock.h"
78125be9 20#include "display.h"
f7a14956 21#include "fs.h"
f894b4b1 22#include "lvm-exec.h"
7d1552c9 23#include "lvm-file.h"
41b2fd5f 24#include "lvm-string.h"
413cc918 25#include "toolcontext.h"
de6c9183 26#include "dev_manager.h"
de17d760 27#include "str_list.h"
4922197a 28#include "config.h"
352a99b9 29#include "filter.h"
15d91f5a 30#include "segtype.h"
d1e8046f 31#include "sharedlib.h"
12137231
JT
32
33#include <limits.h>
5986ec94 34#include <fcntl.h>
914c9723 35#include <unistd.h>
12137231 36
6d52fb46 37#define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
b1713d28 38
7d1552c9
AK
39int lvm1_present(struct cmd_context *cmd)
40{
900f5f81 41 static char path[PATH_MAX];
7d1552c9 42
0550c1b6 43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
7d1552c9
AK
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53}
54
6c81ed26 55int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
2c44337b 56 struct dm_list *modules)
6c81ed26
AK
57{
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
2c44337b 60 struct dm_list *snh;
6c81ed26
AK
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
2c44337b 69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
6c81ed26 70 if (!list_lv_modules(mem,
2c44337b 71 dm_list_struct_base(snh,
6c81ed26
AK
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101}
102
103int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
2c44337b 104 struct dm_list *modules)
6c81ed26
AK
105{
106 struct lv_segment *seg;
107
2c44337b 108 dm_list_iterate_items(seg, &lv->segments)
6c81ed26
AK
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113}
114
199e490e
AK
115#ifndef DEVMAPPER_SUPPORT
116void set_activation(int act)
117{
f2046e0a
AK
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
199e490e
AK
127}
128int activation(void)
129{
130 return 0;
131}
132int library_version(char *version, size_t size)
133{
134 return 0;
135}
136int driver_version(char *version, size_t size)
137{
138 return 0;
139}
bbf83db1
AK
140int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142{
143 return 0;
144}
ed82bfd2
AK
145int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
d1f4953a
AK
147{
148 return 0;
149}
b1859936 150int lvm_dm_prefix_check(int major, int minor, const char *prefix)
07113bee
MB
151{
152 return 0;
153}
e8905d98 154int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
2d6fcbf6 155 struct lvinfo *info, int with_open_count, int with_read_ahead)
199e490e
AK
156{
157 return 0;
158}
e8905d98 159int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
a6b22cf3 160 struct lvinfo *info, int with_open_count, int with_read_ahead)
4bd9480d
AK
161{
162 return 0;
163}
b1859936
ZK
164int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
165 struct logical_volume *lv, struct lvinfo *info)
166{
167 return 0;
168}
8191fe4f 169int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
199e490e
AK
170{
171 return 0;
172}
aec21154 173int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
8191fe4f 174 int wait, percent_t *percent, uint32_t *event_nr)
10b29b8d
AK
175{
176 return 0;
177}
b1859936
ZK
178int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
179{
180 return 0;
181}
182int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
183 percent_t *percent)
184{
185 return 0;
186}
187int lv_thin_percent(const struct logical_volume *lv, int mapped,
188 percent_t *percent)
189{
190 return 0;
191}
192int lv_thin_pool_transaction_id(const struct logical_volume *lv,
193 uint64_t *transaction_id)
194{
195 return 0;
196}
499a1616 197int lvs_in_vg_activated(const struct volume_group *vg)
199e490e
AK
198{
199 return 0;
200}
ab8b85fb 201int lvs_in_vg_opened(const struct volume_group *vg)
199e490e
AK
202{
203 return 0;
204}
2d6fcbf6 205/******
658b5812
AK
206int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
207{
208 return 1;
209}
2d6fcbf6 210*******/
b1859936 211int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
199e490e
AK
212{
213 return 1;
214}
ab8b85fb 215int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
658b5812
AK
216{
217 return 1;
218}
ab8b85fb 219int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
10d0d9c7 220 unsigned origin_only, unsigned exclusive, unsigned revert)
199e490e
AK
221{
222 return 1;
223}
224int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
225{
226 return 1;
227}
658b5812
AK
228int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
229 int *activate_lv)
230{
231 return 1;
232}
07d31831 233int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
199e490e
AK
234{
235 return 1;
236}
07d31831 237int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
658b5812
AK
238{
239 return 1;
240}
f7dd6d84
AK
241int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
242{
243 return 1;
244}
0ce83a83 245int pv_uses_vg(struct physical_volume *pv,
3e3d5d85 246 struct volume_group *vg)
352a99b9
AK
247{
248 return 0;
249}
2293567c
AK
250void activation_release(void)
251{
2293567c 252}
914c9723
AK
253void activation_exit(void)
254{
914c9723 255}
b19f0121 256
499a1616 257int lv_is_active(const struct logical_volume *lv)
ab8b85fb
ZK
258{
259 return 0;
260}
499a1616 261int lv_is_active_but_not_locally(const struct logical_volume *lv)
b19f0121
JEB
262{
263 return 0;
264}
499a1616 265int lv_is_active_exclusive(const struct logical_volume *lv)
b19f0121
JEB
266{
267 return 0;
268}
499a1616 269int lv_is_active_exclusive_locally(const struct logical_volume *lv)
ab8b85fb
ZK
270{
271 return 0;
272}
499a1616 273int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
ab8b85fb
ZK
274{
275 return 0;
276}
b19f0121 277
ab8b85fb
ZK
278int lv_check_transient(struct logical_volume *lv)
279{
280 return 1;
281}
282int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
b1859936 283 const struct lv_activate_opts *laopts, int monitor)
ab8b85fb
ZK
284{
285 return 1;
286}
b1859936
ZK
287/* fs.c */
288void fs_unlock(void)
289{
290}
291/* dev_manager.c */
499a1616 292#include "targets.h"
b1859936
ZK
293int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
294 struct dm_tree_node *node, uint32_t start_area,
295 uint32_t areas)
296{
297 return 0;
298}
299int device_is_usable(struct device *dev)
300{
301 return 0;
302}
303int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
304 const char *layer, const char *target_type)
305{
306 return 0;
307}
199e490e
AK
308#else /* DEVMAPPER_SUPPORT */
309
d1d9800e
AK
310static int _activation = 1;
311
8ef2b021 312void set_activation(int act)
d1d9800e 313{
8ef2b021 314 if (act == _activation)
d1d9800e
AK
315 return;
316
8ef2b021 317 _activation = act;
d1d9800e
AK
318 if (_activation)
319 log_verbose("Activation enabled. Device-mapper kernel "
320 "driver will be used.");
321 else
e7ddf416 322 log_warn("WARNING: Activation disabled. No device-mapper "
bfe2b548 323 "interaction will be attempted.");
d1d9800e
AK
324}
325
8ef2b021 326int activation(void)
d1d9800e
AK
327{
328 return _activation;
329}
330
95ced7a7
PR
331int lv_passes_volumes_filter(struct cmd_context *cmd, struct logical_volume *lv,
332 const struct dm_config_node *cn, const char *config_path)
de17d760 333{
e59e2f7c 334 const struct dm_config_value *cv;
760d1fac 335 const char *str;
900f5f81 336 static char path[PATH_MAX];
de17d760 337
a18dcfb5
AK
338 log_verbose("%s configuration setting defined: "
339 "Checking the list to match %s/%s",
340 config_path, lv->vg->name, lv->name);
fefa4323 341
f7e3a19f 342 for (cv = cn->v; cv; cv = cv->next) {
e59e2f7c 343 if (cv->type != DM_CFG_STRING) {
a18dcfb5
AK
344 log_error("Ignoring invalid string in config file %s",
345 config_path);
de17d760
AK
346 continue;
347 }
348 str = cv->v.str;
349 if (!*str) {
a18dcfb5
AK
350 log_error("Ignoring empty string in config file %s",
351 config_path);
de17d760
AK
352 continue;
353 }
354
f7e3a19f 355
de17d760
AK
356 /* Tag? */
357 if (*str == '@') {
358 str++;
359 if (!*str) {
360 log_error("Ignoring empty tag in config file "
a18dcfb5 361 "%s", config_path);
de17d760
AK
362 continue;
363 }
364 /* If any host tag matches any LV or VG tag, activate */
365 if (!strcmp(str, "*")) {
eb82bd05 366 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
de17d760 367 || str_list_match_list(&cmd->tags,
eb82bd05 368 &lv->vg->tags, NULL))
de17d760
AK
369 return 1;
370 else
371 continue;
372 }
373 /* If supplied tag matches LV or VG tag, activate */
374 if (str_list_match_item(&lv->tags, str) ||
375 str_list_match_item(&lv->vg->tags, str))
376 return 1;
377 else
378 continue;
379 }
13835b5f 380 if (!strchr(str, '/')) {
de17d760
AK
381 /* vgname supplied */
382 if (!strcmp(str, lv->vg->name))
383 return 1;
384 else
385 continue;
386 }
387 /* vgname/lvname */
0550c1b6 388 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
de17d760 389 lv->name) < 0) {
0550c1b6 390 log_error("dm_snprintf error from %s/%s", lv->vg->name,
de17d760
AK
391 lv->name);
392 continue;
393 }
394 if (!strcmp(path, str))
395 return 1;
396 }
397
a18dcfb5
AK
398 log_verbose("No item supplied in %s configuration setting "
399 "matches %s/%s", config_path, lv->vg->name, lv->name);
fefa4323 400
de17d760
AK
401 return 0;
402}
403
a18dcfb5
AK
404static int _passes_activation_filter(struct cmd_context *cmd,
405 struct logical_volume *lv)
406{
407 const struct dm_config_node *cn;
408
409 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
410 log_verbose("activation/volume_list configuration setting "
411 "not defined: Checking only host tags for %s/%s",
412 lv->vg->name, lv->name);
413
414 /* If no host tags defined, activate */
415 if (dm_list_empty(&cmd->tags))
416 return 1;
417
418 /* If any host tag matches any LV or VG tag, activate */
419 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
420 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
421 return 1;
422
423 log_verbose("No host tag matches %s/%s",
424 lv->vg->name, lv->name);
425
426 /* Don't activate */
427 return 0;
428 }
429
95ced7a7 430 return lv_passes_volumes_filter(cmd, lv, cn, "activation/volume_list");
a18dcfb5
AK
431}
432
433static int _passes_readonly_filter(struct cmd_context *cmd,
434 struct logical_volume *lv)
435{
5d5c80ac 436 const struct dm_config_node *cn;
a18dcfb5
AK
437
438 if (!(cn = find_config_tree_node(cmd, "activation/read_only_volume_list")))
439 return 0;
440
95ced7a7
PR
441 return lv_passes_volumes_filter(cmd, lv, cn, "activation/read_only_volume_list");
442}
443
444
445int lv_passes_auto_activation_filter(struct cmd_context *cmd, struct logical_volume *lv)
446{
447 const struct dm_config_node *cn;
448
449 if (!(cn = find_config_tree_node(cmd, "activation/auto_activation_volume_list"))) {
450 log_verbose("activation/auto_activation_volume_list configuration setting "
451 "not defined: All logical volumes will be auto-activated.");
452 return 1;
453 }
454
455 return lv_passes_volumes_filter(cmd, lv, cn, "activation/auto_activation_volume_list");
a18dcfb5
AK
456}
457
fae0c576
AK
458int library_version(char *version, size_t size)
459{
d1d9800e
AK
460 if (!activation())
461 return 0;
462
f894b4b1 463 return dm_get_library_version(version, size);
fae0c576
AK
464}
465
fae0c576
AK
466int driver_version(char *version, size_t size)
467{
d1d9800e
AK
468 if (!activation())
469 return 0;
470
fae0c576 471 log_very_verbose("Getting driver version");
fae0c576 472
f894b4b1 473 return dm_driver_version(version, size);
fae0c576
AK
474}
475
bbf83db1
AK
476int target_version(const char *target_name, uint32_t *maj,
477 uint32_t *min, uint32_t *patchlevel)
d1f4953a
AK
478{
479 int r = 0;
480 struct dm_task *dmt;
481 struct dm_versions *target, *last_target;
482
d1f4953a 483 log_very_verbose("Getting target version for %s", target_name);
5f4b2acf
AK
484 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
485 return_0;
d1f4953a 486
2243718f
AK
487 if (activation_checks() && !dm_task_enable_checks(dmt))
488 goto_out;
489
d1f4953a
AK
490 if (!dm_task_run(dmt)) {
491 log_debug("Failed to get %s target version", target_name);
492 /* Assume this was because LIST_VERSIONS isn't supported */
209da6ef
ZK
493 *maj = 0;
494 *min = 0;
495 *patchlevel = 0;
496 r = 1;
497 goto out;
d1f4953a
AK
498 }
499
500 target = dm_task_get_versions(dmt);
501
502 do {
503 last_target = target;
504
505 if (!strcmp(target_name, target->name)) {
506 r = 1;
bbf83db1
AK
507 *maj = target->version[0];
508 *min = target->version[1];
509 *patchlevel = target->version[2];
d1f4953a
AK
510 goto out;
511 }
512
d40d166f 513 target = (struct dm_versions *)((char *) target + target->next);
d1f4953a
AK
514 } while (last_target != target);
515
516 out:
517 dm_task_destroy(dmt);
518
519 return r;
520}
521
a3390bb5 522int lvm_dm_prefix_check(int major, int minor, const char *prefix)
07113bee
MB
523{
524 struct dm_task *dmt;
525 const char *uuid;
526 int r;
527
528 if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
d1b36fbe 529 return_0;
07113bee
MB
530
531 if (!dm_task_set_minor(dmt, minor) ||
532 !dm_task_set_major(dmt, major) ||
533 !dm_task_run(dmt) ||
534 !(uuid = dm_task_get_uuid(dmt))) {
535 dm_task_destroy(dmt);
536 return 0;
537 }
538
539 r = strncasecmp(uuid, prefix, strlen(prefix));
540 dm_task_destroy(dmt);
541
d1b36fbe 542 return r ? 0 : 1;
07113bee
MB
543}
544
ed82bfd2 545int module_present(struct cmd_context *cmd, const char *target_name)
f894b4b1 546{
5619c629 547 int ret = 0;
03b49fe1 548#ifdef MODPROBE_CMD
f894b4b1 549 char module[128];
c8669f6b 550 const char *argv[3];
5619c629
MB
551
552 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
553 log_error("module_present module name too long: %s",
554 target_name);
555 return 0;
556 }
557
c8669f6b
ZK
558 argv[0] = MODPROBE_CMD;
559 argv[1] = module;
560 argv[2] = NULL;
561
b1b38215 562 ret = exec_cmd(cmd, argv, NULL, 0);
03b49fe1 563#endif
5619c629
MB
564 return ret;
565}
566
ed82bfd2
AK
567int target_present(struct cmd_context *cmd, const char *target_name,
568 int use_modprobe)
5619c629
MB
569{
570 uint32_t maj, min, patchlevel;
f894b4b1
AK
571
572 if (!activation())
573 return 0;
574
575#ifdef MODPROBE_CMD
5f4b2acf 576 if (use_modprobe) {
bbf83db1 577 if (target_version(target_name, &maj, &min, &patchlevel))
5f4b2acf 578 return 1;
f894b4b1 579
ed82bfd2 580 if (!module_present(cmd, target_name))
5f4b2acf 581 return_0;
f894b4b1
AK
582 }
583#endif
584
bbf83db1 585 return target_version(target_name, &maj, &min, &patchlevel);
f894b4b1
AK
586}
587
de6c9183
JT
588/*
589 * Returns 1 if info structure populated, else 0 on failure.
590 */
e8905d98 591int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
ab9663f3 592 struct lvinfo *info, int with_open_count, int with_read_ahead)
37ed70b9 593{
199e490e 594 struct dm_info dminfo;
e8905d98 595 const char *layer;
4a624ca0 596
d1d9800e
AK
597 if (!activation())
598 return 0;
56cab8cc
ZK
599 /*
600 * If open_count info is requested and we have to be sure our own udev
601 * transactions are finished
602 * For non-clustered locking type we are only interested for non-delete operation
603 * in progress - as only those could lead to opened files
604 */
605 if (with_open_count) {
606 if (locking_is_clustered())
607 sync_local_dev_names(cmd); /* Wait to have udev in sync */
f5f6dcbc 608 else if (fs_has_non_delete_ops())
56cab8cc
ZK
609 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
610 }
d1d9800e 611
e8905d98
ZK
612 if (use_layer && lv_is_thin_pool(lv))
613 layer = "tpool";
614 else if (use_layer && lv_is_origin(lv))
615 layer = "real";
616 else
617 layer = NULL;
618
619 if (!dev_manager_info(lv->vg->cmd->mem, lv, layer, with_open_count,
ab9663f3 620 with_read_ahead, &dminfo, &info->read_ahead))
5f4b2acf 621 return_0;
4a624ca0 622
199e490e
AK
623 info->exists = dminfo.exists;
624 info->suspended = dminfo.suspended;
625 info->open_count = dminfo.open_count;
626 info->major = dminfo.major;
627 info->minor = dminfo.minor;
628 info->read_only = dminfo.read_only;
5f4b2acf
AK
629 info->live_table = dminfo.live_table;
630 info->inactive_table = dminfo.inactive_table;
199e490e 631
f894b4b1 632 return 1;
de6c9183 633}
a62ee8ad 634
e8905d98 635int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
a6b22cf3 636 struct lvinfo *info, int with_open_count, int with_read_ahead)
4bd9480d 637{
0548bcc2 638 int r;
4bd9480d
AK
639 struct logical_volume *lv;
640
7a593325 641 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
4bd9480d
AK
642 return 0;
643
e8905d98 644 r = lv_info(cmd, lv, use_layer, info, with_open_count, with_read_ahead);
077a6755 645 release_vg(lv->vg);
0548bcc2
MB
646
647 return r;
4bd9480d
AK
648}
649
125712be
PR
650int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
651 struct logical_volume *lv, struct lvinfo *info)
652{
653 if (!info->exists)
654 return 1;
655
656 /* If sysfs is not used, use open_count information only. */
c3e5b497
PR
657 if (!*dm_sysfs_dir()) {
658 if (info->open_count) {
659 log_error("Logical volume %s/%s in use.",
660 lv->vg->name, lv->name);
661 return 0;
662 }
663
664 return 1;
665 }
125712be
PR
666
667 if (dm_device_has_holders(info->major, info->minor)) {
668 log_error("Logical volume %s/%s is used by another device.",
669 lv->vg->name, lv->name);
670 return 0;
671 }
672
673 if (dm_device_has_mounted_fs(info->major, info->minor)) {
674 log_error("Logical volume %s/%s contains a filesystem in use.",
675 lv->vg->name, lv->name);
676 return 0;
677 }
678
679 return 1;
680}
681
d345bf2c
PR
682/*
683 * Returns 1 if percent set, else 0 on failure.
684 */
685int lv_check_transient(struct logical_volume *lv)
686{
687 int r;
688 struct dev_manager *dm;
689
690 if (!activation())
691 return 0;
692
7df72b3c
AK
693 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
694
df390f17 695 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
d345bf2c
PR
696 return_0;
697
698 if (!(r = dev_manager_transient(dm, lv)))
699 stack;
700
701 dev_manager_destroy(dm);
702
703 return r;
704}
705
1951dba9
AL
706/*
707 * Returns 1 if percent set, else 0 on failure.
708 */
8191fe4f 709int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
1951dba9
AL
710{
711 int r;
712 struct dev_manager *dm;
713
d1d9800e
AK
714 if (!activation())
715 return 0;
716
7df72b3c
AK
717 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
718
df390f17 719 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
5f4b2acf 720 return_0;
1951dba9 721
8191fe4f 722 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
1951dba9 723 stack;
c826c0d1 724
1951dba9
AL
725 dev_manager_destroy(dm);
726
727 return r;
728}
729
10b29b8d 730/* FIXME Merge with snapshot_percent */
aec21154 731int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
8191fe4f 732 int wait, percent_t *percent, uint32_t *event_nr)
10b29b8d
AK
733{
734 int r;
735 struct dev_manager *dm;
b65b777d 736 struct lvinfo info;
10b29b8d 737
876003dc
AK
738 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
739 * it should be considered in-sync. */
2c44337b 740 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
8191fe4f 741 *percent = PERCENT_100;
876003dc
AK
742 return 1;
743 }
744
10b29b8d
AK
745 if (!activation())
746 return 0;
747
7df72b3c
AK
748 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
749
2d6fcbf6 750 if (!lv_info(cmd, lv, 0, &info, 0, 0))
5f4b2acf 751 return_0;
b65b777d
AK
752
753 if (!info.exists)
754 return 0;
755
df390f17 756 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
5f4b2acf 757 return_0;
10b29b8d 758
8191fe4f 759 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
10b29b8d
AK
760 stack;
761
762 dev_manager_destroy(dm);
763
764 return r;
765}
766
4aebd52c
JEB
767int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
768{
769 return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
770}
771
34507894 772/*
63368983 773 * Returns data or metadata percent usage, depends on metadata 0/1.
34507894
ZK
774 * Returns 1 if percent set, else 0 on failure.
775 */
63368983
ZK
776int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
777 percent_t *percent)
34507894
ZK
778{
779 int r;
780 struct dev_manager *dm;
781
782 if (!activation())
783 return 0;
784
63368983
ZK
785 log_debug("Checking thin %sdata percent for LV %s/%s",
786 (metadata) ? "meta" : "", lv->vg->name, lv->name);
34507894
ZK
787
788 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
789 return_0;
790
63368983 791 if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
34507894
ZK
792 stack;
793
794 dev_manager_destroy(dm);
795
796 return r;
797}
798
76ee0899
ZK
799/*
800 * Returns 1 if percent set, else 0 on failure.
801 */
802int lv_thin_percent(const struct logical_volume *lv,
803 int mapped, percent_t *percent)
804{
805 int r;
806 struct dev_manager *dm;
807
808 if (!activation())
809 return 0;
810
811 log_debug("Checking thin percent for LV %s/%s",
812 lv->vg->name, lv->name);
813
814 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
815 return_0;
816
817 if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
818 stack;
819
820 dev_manager_destroy(dm);
821
822 return r;
823}
824
bdba904d
ZK
825/*
826 * Returns 1 if transaction_id set, else 0 on failure.
827 */
828int lv_thin_pool_transaction_id(const struct logical_volume *lv,
829 uint64_t *transaction_id)
830{
831 int r;
832 struct dev_manager *dm;
833 struct dm_status_thin_pool *status;
834
835 if (!activation())
836 return 0;
837
838 log_debug("Checking thin percent for LV %s/%s",
839 lv->vg->name, lv->name);
840
841 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
842 return_0;
843
844 if (!(r = dev_manager_thin_pool_status(dm, lv, &status)))
845 stack;
846 else
847 *transaction_id = status->transaction_id;
848
849 dev_manager_destroy(dm);
850
851 return r;
852}
853
499a1616 854static int _lv_active(struct cmd_context *cmd, const struct logical_volume *lv)
2ba80b43 855{
199e490e 856 struct lvinfo info;
2ba80b43 857
2d6fcbf6 858 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
2ba80b43 859 stack;
de6c9183 860 return -1;
2ba80b43
JT
861 }
862
de6c9183 863 return info.exists;
2ba80b43
JT
864}
865
f894b4b1 866static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
5986ec94 867{
199e490e 868 struct lvinfo info;
5986ec94 869
2d6fcbf6 870 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
5986ec94 871 stack;
de6c9183 872 return -1;
5986ec94
JT
873 }
874
de6c9183 875 return info.open_count;
5986ec94
JT
876}
877
81beded3 878static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
b1713d28 879{
6d52fb46 880 int r;
de6c9183 881 struct dev_manager *dm;
b1713d28 882
df390f17 883 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
5f4b2acf 884 return_0;
ae2bb665 885
81beded3 886 if (!(r = dev_manager_activate(dm, lv, laopts)))
6d52fb46 887 stack;
ae2bb665 888
de6c9183 889 dev_manager_destroy(dm);
ae2bb665 890 return r;
b1713d28 891}
a381c45a 892
81beded3
ZK
893static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
894 int *flush_required)
0a5e4a14 895{
a18dcfb5 896 int r = 0;
de6c9183 897 struct dev_manager *dm;
a18dcfb5
AK
898 int old_readonly = laopts->read_only;
899
900 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
37ed70b9 901
df390f17 902 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
a18dcfb5 903 goto_out;
5f4b2acf 904
81beded3 905 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
6d52fb46 906 stack;
5f4b2acf
AK
907
908 dev_manager_destroy(dm);
a18dcfb5
AK
909
910 laopts->read_only = old_readonly;
911out:
5f4b2acf
AK
912 return r;
913}
914
915static int _lv_deactivate(struct logical_volume *lv)
916{
917 int r;
918 struct dev_manager *dm;
919
df390f17 920 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
5f4b2acf 921 return_0;
37ed70b9 922
de6c9183 923 if (!(r = dev_manager_deactivate(dm, lv)))
37ed70b9 924 stack;
37ed70b9 925
de6c9183
JT
926 dev_manager_destroy(dm);
927 return r;
37ed70b9
JT
928}
929
81beded3
ZK
930static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
931 int lockfs, int flush_required)
4a624ca0 932{
20c5fcf7
AK
933 int r;
934 struct dev_manager *dm;
c2d72fd4 935
a18dcfb5
AK
936 laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
937
df390f17
AK
938 /*
939 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
940 * device directly, we don't want to touch the devices that use it.
941 */
942 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
5f4b2acf 943 return_0;
0a5e4a14 944
81beded3 945 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
37ed70b9 946 stack;
0a5e4a14 947
20c5fcf7
AK
948 dev_manager_destroy(dm);
949 return r;
6d52fb46 950}
4a624ca0 951
8c013da4 952/*
f75c11ed 953 * These two functions return the number of visible LVs in the state,
7df72b3c 954 * or -1 on error. FIXME Check this.
8c013da4 955 */
499a1616 956int lvs_in_vg_activated(const struct volume_group *vg)
f047219b 957{
60f13f01 958 struct lv_list *lvl;
94b8220f 959 int count = 0;
37ed70b9 960
d1d9800e
AK
961 if (!activation())
962 return 0;
963
7df72b3c 964 dm_list_iterate_items(lvl, &vg->lvs)
59d8429c 965 if (lv_is_visible(lvl->lv))
ab9663f3 966 count += (_lv_active(vg->cmd, lvl->lv) == 1);
7df72b3c
AK
967
968 log_debug("Counted %d active LVs in VG %s", count, vg->name);
37ed70b9
JT
969
970 return count;
f047219b 971}
2ba80b43 972
08c9ff43 973int lvs_in_vg_opened(const struct volume_group *vg)
2ba80b43 974{
08c9ff43 975 const struct lv_list *lvl;
94b8220f 976 int count = 0;
2ba80b43 977
d1d9800e
AK
978 if (!activation())
979 return 0;
980
7df72b3c 981 dm_list_iterate_items(lvl, &vg->lvs)
87371d48 982 if (lv_is_visible(lvl->lv))
f894b4b1 983 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
7df72b3c
AK
984
985 log_debug("Counted %d open LVs in VG %s", count, vg->name);
2ba80b43
JT
986
987 return count;
988}
413cc918 989
5ca6698f 990/*
27ff8813
JEB
991 * _lv_is_active
992 * @lv: logical volume being queried
993 * @locally: set if active locally (when provided)
994 * @exclusive: set if active exclusively (when provided)
995 *
5ca6698f 996 * Determine whether an LV is active locally or in a cluster.
27ff8813
JEB
997 * In addition to the return code which indicates whether or
998 * not the LV is active somewhere, two other values are set
999 * to yield more information about the status of the activation:
1000 * return locally exclusively status
1001 * ====== ======= =========== ======
1002 * 0 0 0 not active
1003 * 1 0 0 active remotely
1004 * 1 0 1 exclusive remotely
1005 * 1 1 0 active locally and possibly remotely
1006 * 1 1 1 exclusive locally (or local && !cluster)
1007 * The VG lock must be held to call this function.
1008 *
1009 * Returns: 0 or 1
5ca6698f 1010 */
499a1616 1011static int _lv_is_active(const struct logical_volume *lv,
27ff8813 1012 int *locally, int *exclusive)
5ca6698f 1013{
27ff8813
JEB
1014 int r, l, e; /* remote, local, and exclusive */
1015
1016 r = l = e = 0;
6ac30c94 1017
ab9663f3 1018 if (_lv_active(lv->vg->cmd, lv))
27ff8813 1019 l = 1;
5ca6698f 1020
27ff8813 1021 if (!vg_is_clustered(lv->vg)) {
f5bfc8b1
AK
1022 if (l)
1023 e = 1; /* exclusive by definition */
27ff8813
JEB
1024 goto out;
1025 }
1026
1027 /* Active locally, and the caller doesn't care about exclusive */
1028 if (l && !exclusive)
1029 goto out;
5ca6698f 1030
27ff8813
JEB
1031 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
1032 goto out;
6ac30c94
MB
1033
1034 /*
27ff8813
JEB
1035 * If lock query is not supported (due to interfacing with old
1036 * code), then we cannot evaluate exclusivity properly.
1037 *
1038 * Old users of this function will never be affected by this,
1039 * since they are only concerned about active vs. not active.
1040 * New users of this function who specifically ask for 'exclusive'
1041 * will be given an error message.
6ac30c94 1042 */
f5bfc8b1
AK
1043 log_error("Unable to determine exclusivity of %s", lv->name);
1044
1045 e = 0;
1046
1047 /*
1048 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
1049 * but it's unreliable.
1050 */
27ff8813 1051
27ff8813
JEB
1052out:
1053 if (locally)
1054 *locally = l;
1055 if (exclusive)
1056 *exclusive = e;
1057
1058 log_very_verbose("%s/%s is %sactive%s%s",
1059 lv->vg->name, lv->name,
1060 (r || l) ? "" : "not ",
1061 (exclusive && e) ? " exclusive" : "",
1062 e ? (l ? " locally" : " remotely") : "");
1063
1064 return r || l;
1065}
1066
499a1616 1067int lv_is_active(const struct logical_volume *lv)
27ff8813
JEB
1068{
1069 return _lv_is_active(lv, NULL, NULL);
1070}
1071
499a1616 1072int lv_is_active_but_not_locally(const struct logical_volume *lv)
27ff8813
JEB
1073{
1074 int l;
9e277b9e 1075 return _lv_is_active(lv, &l, NULL) && !l;
27ff8813 1076}
27ff8813 1077
499a1616 1078int lv_is_active_exclusive(const struct logical_volume *lv)
b19f0121
JEB
1079{
1080 int e;
1081
1082 return _lv_is_active(lv, NULL, &e) && e;
1083}
1084
499a1616 1085int lv_is_active_exclusive_locally(const struct logical_volume *lv)
27ff8813
JEB
1086{
1087 int l, e;
7df72b3c 1088
27ff8813
JEB
1089 return _lv_is_active(lv, &l, &e) && l && e;
1090}
1091
499a1616 1092int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
27ff8813
JEB
1093{
1094 int l, e;
7df72b3c 1095
27ff8813 1096 return _lv_is_active(lv, &l, &e) && !l && e;
5ca6698f
DW
1097}
1098
d1e8046f
AK
1099#ifdef DMEVENTD
1100static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
1101 const int timeout, enum dm_event_mask mask)
1102{
1103 struct dm_event_handler *dmevh;
1104
1105 if (!(dmevh = dm_event_handler_create()))
1106 return_NULL;
1107
1108 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
1109 goto_bad;
1110
1111 if (dm_event_handler_set_dso(dmevh, dso))
1112 goto_bad;
1113
1114 if (dm_event_handler_set_uuid(dmevh, dmuuid))
1115 goto_bad;
1116
1117 dm_event_handler_set_timeout(dmevh, timeout);
1118 dm_event_handler_set_event_mask(dmevh, mask);
1119
1120 return dmevh;
1121
1122bad:
1123 dm_event_handler_destroy(dmevh);
1124 return NULL;
1125}
1126
1127char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
1128{
1129 char *path;
1130
1131 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
1132 log_error("Failed to allocate dmeventd library path.");
1133 return NULL;
1134 }
1135
1136 get_shared_library_path(cmd, libpath, path, PATH_MAX);
1137
1138 return path;
1139}
1140
2bc1d759
ZK
1141static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
1142{
1143 const char *layer;
1144
1145 if (lv_is_thin_pool(lv))
1146 layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
1147 else if (lv_is_origin(lv))
1148 layer = "real"; /* Monitor "real" for "snapshot-origin". */
1149 else
1150 layer = NULL;
1151
1152 return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
1153}
1154
f92b4f94
AK
1155int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
1156 struct logical_volume *lv, int *pending)
d1e8046f
AK
1157{
1158 char *uuid;
1159 enum dm_event_mask evmask = 0;
1160 struct dm_event_handler *dmevh;
d1e8046f
AK
1161 *pending = 0;
1162
1163 if (!dso)
1164 return_0;
1165
2bc1d759 1166 if (!(uuid = _build_target_uuid(cmd, lv)))
d1e8046f
AK
1167 return_0;
1168
1169 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
1170 return_0;
1171
1172 if (dm_event_get_registered_device(dmevh, 0)) {
1173 dm_event_handler_destroy(dmevh);
1174 return 0;
1175 }
1176
1177 evmask = dm_event_handler_get_event_mask(dmevh);
1178 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
1179 *pending = 1;
1180 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
1181 }
1182
1183 dm_event_handler_destroy(dmevh);
1184
1185 return evmask;
1186}
1187
f92b4f94 1188int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
d1e8046f
AK
1189 int evmask __attribute__((unused)), int set, int timeout)
1190{
1191 char *uuid;
1192 struct dm_event_handler *dmevh;
1193 int r;
1194
1195 if (!dso)
1196 return_0;
1197
f92b4f94 1198 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
2bc1d759 1199 if (!(uuid = _build_target_uuid(cmd, lv)))
d1e8046f
AK
1200 return_0;
1201
1202 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
1203 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
1204 return_0;
1205
1206 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
1207
1208 dm_event_handler_destroy(dmevh);
1209
1210 if (!r)
1211 return_0;
1212
1213 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
1214
1215 return 1;
1216}
1217
1218#endif
1219
3e3d5d85 1220/*
8a37910d
AK
1221 * Returns 0 if an attempt to (un)monitor the device failed.
1222 * Returns 1 otherwise.
3e3d5d85 1223 */
2d6fcbf6 1224int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
81beded3 1225 const struct lv_activate_opts *laopts, int monitor)
15d91f5a 1226{
ed09d7e3 1227#ifdef DMEVENTD
8a37910d
AK
1228 int i, pending = 0, monitored;
1229 int r = 1;
2c44337b 1230 struct dm_list *tmp, *snh, *snht;
15d91f5a 1231 struct lv_segment *seg;
7a369d37 1232 struct lv_segment *log_seg;
24f4552b 1233 int (*monitor_fn) (struct lv_segment *s, int e);
4e9083db 1234 uint32_t s;
81beded3 1235 static const struct lv_activate_opts zlaopts = { 0 };
2caa558e
ZK
1236 static const struct lv_activate_opts thinopts = { .skip_in_use = 1 };
1237 struct lvinfo info;
81beded3
ZK
1238
1239 if (!laopts)
1240 laopts = &zlaopts;
15d91f5a 1241
20db8ffc
AK
1242 /* skip dmeventd code altogether */
1243 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
1244 return 1;
1245
8a37910d
AK
1246 /*
1247 * Nothing to do if dmeventd configured not to be used.
1248 */
1249 if (monitor && !dmeventd_monitor_mode())
3e3d5d85
AK
1250 return 1;
1251
2caa558e
ZK
1252 /*
1253 * Allow to unmonitor thin pool via explicit pool unmonitor
1254 * or unmonitor before the last thin pool user deactivation
1255 * Skip unmonitor, if invoked via unmonitor of thin volume
1256 * and there is another thin pool user (open_count > 1)
1257 */
1258 if (laopts->skip_in_use && lv_info(lv->vg->cmd, lv, 1, &info, 1, 0) &&
1259 (info.open_count != 1)) {
1260 log_debug("Skipping unmonitor of opened %s (open:%d)",
1261 lv->name, info.open_count);
1262 return 1;
1263 }
1264
67961c7c
PR
1265 /*
1266 * In case of a snapshot device, we monitor lv->snapshot->lv,
1267 * not the actual LV itself.
1268 */
c6168a14
ZK
1269 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
1270 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
67961c7c
PR
1271
1272 /*
1273 * In case this LV is a snapshot origin, we instead monitor
f92b4f94
AK
1274 * each of its respective snapshots. The origin itself may
1275 * also need to be monitored if it is a mirror, for example.
67961c7c 1276 */
81beded3 1277 if (!laopts->origin_only && lv_is_origin(lv))
2c44337b
AK
1278 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
1279 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
81beded3 1280 struct lv_segment, origin_list)->cow, NULL, monitor))
0c06de63 1281 r = 0;
67961c7c 1282
7a369d37
JEB
1283 /*
1284 * If the volume is mirrored and its log is also mirrored, monitor
1285 * the log volume as well.
1286 */
1287 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1288 (log_seg = first_seg(seg->log_lv)) != NULL &&
1289 seg_is_mirrored(log_seg))
81beded3 1290 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
7a369d37
JEB
1291 r = 0;
1292
2c44337b
AK
1293 dm_list_iterate(tmp, &lv->segments) {
1294 seg = dm_list_item(tmp, struct lv_segment);
15d91f5a 1295
4e9083db
AK
1296 /* Recurse for AREA_LV */
1297 for (s = 0; s < seg->area_count; s++) {
1298 if (seg_type(seg, s) != AREA_LV)
1299 continue;
81beded3 1300 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
4e9083db
AK
1301 monitor)) {
1302 log_error("Failed to %smonitor %s",
1303 monitor ? "" : "un",
1304 seg_lv(seg, s)->name);
1305 r = 0;
1306 }
1307 }
1308
2caa558e
ZK
1309 /*
1310 * If requested unmonitoring of thin volume, request test
1311 * if there is no other thin pool user
1312 *
1313 * FIXME: code here looks like _lv_postorder()
1314 */
1315 if (seg->pool_lv &&
1316 !monitor_dev_for_events(cmd, seg->pool_lv,
1317 (!monitor) ? &thinopts : NULL, monitor))
1318 r = 0;
1319
1320 if (seg->metadata_lv &&
1321 !monitor_dev_for_events(cmd, seg->metadata_lv, NULL, monitor))
1322 r = 0;
1323
8ef6eb30
AK
1324 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1325 continue;
8a37910d
AK
1326
1327 monitor_fn = NULL;
ed09d7e3 1328
8ef6eb30 1329 /* Check monitoring status */
8a37910d
AK
1330 if (seg->segtype->ops->target_monitored)
1331 monitored = seg->segtype->ops->target_monitored(seg, &pending);
8ef6eb30
AK
1332 else
1333 continue; /* segtype doesn't support registration */
1334
1335 /*
1336 * FIXME: We should really try again if pending
1337 */
8a37910d 1338 monitored = (pending) ? 0 : monitored;
8ef6eb30 1339
8a37910d
AK
1340 if (monitor) {
1341 if (monitored)
8ef6eb30 1342 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
8a37910d
AK
1343 else if (seg->segtype->ops->target_monitor_events)
1344 monitor_fn = seg->segtype->ops->target_monitor_events;
8ef6eb30 1345 } else {
8a37910d 1346 if (!monitored)
8ef6eb30 1347 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
8a37910d
AK
1348 else if (seg->segtype->ops->target_unmonitor_events)
1349 monitor_fn = seg->segtype->ops->target_unmonitor_events;
8ef6eb30 1350 }
15d91f5a 1351
8ef6eb30 1352 /* Do [un]monitor */
8a37910d 1353 if (!monitor_fn)
3e3d5d85 1354 continue;
e24e7130 1355
85a80e05
AK
1356 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1357 test_mode() ? " [Test mode: skipping this]" : "");
1358
1359 /* FIXME Test mode should really continue a bit further. */
1360 if (test_mode())
1361 continue;
8a37910d 1362
3e3d5d85 1363 /* FIXME specify events */
57fc4cc0 1364 if (!monitor_fn(seg, 0)) {
8a37910d
AK
1365 log_error("%s/%s: %s segment monitoring function failed.",
1366 lv->vg->name, lv->name, seg->segtype->name);
1367 return 0;
e24e7130 1368 }
3e3d5d85 1369
8ef6eb30
AK
1370 /* Check [un]monitor results */
1371 /* Try a couple times if pending, but not forever... */
1372 for (i = 0; i < 10; i++) {
1373 pending = 0;
8a37910d 1374 monitored = seg->segtype->ops->target_monitored(seg, &pending);
8ef6eb30 1375 if (pending ||
8a37910d
AK
1376 (!monitored && monitor) ||
1377 (monitored && !monitor))
1378 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1379 lv->vg->name, lv->name, monitor ? "" : "un");
8ef6eb30
AK
1380 else
1381 break;
1382 sleep(1);
1383 }
1384
85a80e05
AK
1385 if (r)
1386 r = (monitored && monitor) || (!monitored && !monitor);
15d91f5a 1387 }
e6493477 1388
3e3d5d85
AK
1389 return r;
1390#else
15d91f5a 1391 return 1;
3e3d5d85 1392#endif
15d91f5a 1393}
15d91f5a 1394
0f2a4ca2
AK
1395struct detached_lv_data {
1396 struct logical_volume *lv_pre;
1397 struct lv_activate_opts *laopts;
1398 int *flush_required;
1399};
1400
1401static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1402{
1403 struct detached_lv_data *detached = data;
1404 struct lv_list *lvl_pre;
1405
1406 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
ee840ff1 1407 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
0f2a4ca2
AK
1408 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1409 return_0;
1410 }
1411
1412 return 1;
1413}
1414
658b5812 1415static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
81beded3 1416 struct lv_activate_opts *laopts, int error_if_not_suspended)
413cc918 1417{
df390f17
AK
1418 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1419 struct lv_list *lvl_pre;
1420 struct seg_list *sl;
ee840ff1 1421 struct lv_segment *snap_seg;
199e490e 1422 struct lvinfo info;
eb91c4ee 1423 int r = 0, lockfs = 0, flush_required = 0;
0f2a4ca2 1424 struct detached_lv_data detached;
413cc918 1425
d1d9800e
AK
1426 if (!activation())
1427 return 1;
1428
8b888354 1429 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
095bbca6 1430 goto_out;
8b888354 1431
7a593325 1432 /* Use precommitted metadata if present */
8b888354 1433 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
095bbca6 1434 goto_out;
413cc918 1435
2d6fcbf6 1436 /* Ignore origin_only unless LV is origin in both old and new metadata */
efc8ca10 1437 if (!lv_is_thin_volume(lv) && !(lv_is_origin(lv) && lv_is_origin(lv_pre)))
81beded3 1438 laopts->origin_only = 0;
2d6fcbf6 1439
20c5fcf7 1440 if (test_mode()) {
81beded3 1441 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
095bbca6
MB
1442 r = 1;
1443 goto out;
20c5fcf7
AK
1444 }
1445
81beded3 1446 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
095bbca6 1447 goto_out;
41967a02 1448
095bbca6 1449 if (!info.exists || info.suspended) {
63ae0d14
MB
1450 if (!error_if_not_suspended) {
1451 r = 1;
1452 if (info.suspended)
df390f17 1453 critical_section_inc(cmd, "already suspended");
63ae0d14 1454 }
095bbca6
MB
1455 goto out;
1456 }
914c9723 1457
9249fb12
ZK
1458 if (!lv_read_replicator_vgs(lv))
1459 goto_out;
1460
c1fdeec9
MB
1461 lv_calculate_readahead(lv, NULL);
1462
df390f17 1463 /*
3a8eb387 1464 * Preload devices for the LV.
df390f17
AK
1465 * If the PVMOVE LV is being removed, it's only present in the old
1466 * metadata and not the new, so we must explicitly add the new
1467 * tables for all the changed LVs here, as the relationships
1468 * are not found by walking the new metadata.
1469 */
3a8eb387
AK
1470 if (!(lv_pre->status & LOCKED) &&
1471 (lv->status & LOCKED) &&
1472 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1473 /* Preload all the LVs above the PVMOVE LV */
1474 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1475 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
ee840ff1 1476 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
df390f17
AK
1477 goto out;
1478 }
81beded3 1479 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
df390f17 1480 goto_out;
3a8eb387
AK
1481 }
1482 /* Now preload the PVMOVE LV itself */
1483 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
ee840ff1 1484 log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
3a8eb387
AK
1485 goto out;
1486 }
1487 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1488 goto_out;
1489 } else {
1490 if (!_lv_preload(lv_pre, laopts, &flush_required))
1491 /* FIXME Revert preloading */
1492 goto_out;
0f2a4ca2 1493
3a8eb387
AK
1494 /*
1495 * Search for existing LVs that have become detached and preload them.
1496 */
1497 detached.lv_pre = lv_pre;
1498 detached.laopts = laopts;
1499 detached.flush_required = &flush_required;
0f2a4ca2 1500
3a8eb387
AK
1501 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1502 goto_out;
ee840ff1
AK
1503
1504 /*
1505 * Preload any snapshots that are being removed.
1506 */
1507 if (!laopts->origin_only && lv_is_origin(lv)) {
1508 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
a73e9a6c
AK
1509 if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
1510 log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
1511 snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
ee840ff1
AK
1512 goto out;
1513 }
1514 if (!lv_is_cow(lvl_pre->lv) &&
1515 !_lv_preload(lvl_pre->lv, laopts, &flush_required))
1516 goto_out;
1517 }
1518 }
5f4b2acf
AK
1519 }
1520
81beded3 1521 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
e24e7130 1522 /* FIXME Consider aborting here */
ed09d7e3
AK
1523 stack;
1524
df390f17
AK
1525 critical_section_inc(cmd, "suspending");
1526 if (pvmove_lv)
1527 critical_section_inc(cmd, "suspending pvmove LV");
9cd3426d 1528
81beded3 1529 if (!laopts->origin_only &&
2d6fcbf6 1530 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
9cd3426d
AK
1531 lockfs = 1;
1532
2f99e5e3
ZK
1533 if (laopts->origin_only && lv_is_thin_volume(lv) && lv_is_thin_volume(lv_pre))
1534 lockfs = 1;
1535
df390f17
AK
1536 /*
1537 * Suspending an LV directly above a PVMOVE LV also
1538 * suspends other LVs using that same PVMOVE LV.
1539 * FIXME Remove this and delay the 'clear node' until
1540 * after the code knows whether there's a different
1541 * inactive table to load or not instead so lv_suspend
1542 * can be called separately for each LV safely.
1543 */
1544 if ((lv_pre->vg->status & PRECOMMITTED) &&
1545 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
81beded3 1546 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
df390f17
AK
1547 critical_section_dec(cmd, "failed precommitted suspend");
1548 if (pvmove_lv)
1549 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1550 goto_out;
1551 }
1552 } else {
1553 /* Normal suspend */
81beded3 1554 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
df390f17
AK
1555 critical_section_dec(cmd, "failed suspend");
1556 if (pvmove_lv)
1557 critical_section_dec(cmd, "failed suspend (pvmove)");
1558 goto_out;
1559 }
914c9723 1560 }
8c013da4 1561
095bbca6
MB
1562 r = 1;
1563out:
1564 if (lv_pre)
077a6755 1565 release_vg(lv_pre->vg);
9249fb12
ZK
1566 if (lv) {
1567 lv_release_replicator_vgs(lv);
077a6755 1568 release_vg(lv->vg);
9249fb12 1569 }
095bbca6
MB
1570
1571 return r;
413cc918
AK
1572}
1573
fd7d09e3
AK
1574/*
1575 * In a cluster, set exclusive to indicate that only one node is using the
1576 * device. Any preloaded tables may then use non-clustered targets.
1577 *
1578 * Returns success if the device is not active
1579 */
25d14105 1580int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
658b5812 1581{
25d14105
JEB
1582 struct lv_activate_opts laopts = {
1583 .origin_only = origin_only,
1584 .exclusive = exclusive
1585 };
81beded3
ZK
1586
1587 return _lv_suspend(cmd, lvid_s, &laopts, 0);
658b5812
AK
1588}
1589
2d6fcbf6
AK
1590/* No longer used */
1591/***********
658b5812
AK
1592int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1593{
1594 return _lv_suspend(cmd, lvid_s, 1);
1595}
2d6fcbf6 1596***********/
658b5812
AK
1597
1598static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
81beded3 1599 struct lv_activate_opts *laopts, int error_if_not_active)
413cc918
AK
1600{
1601 struct logical_volume *lv;
199e490e 1602 struct lvinfo info;
095bbca6 1603 int r = 0;
2258242f 1604 int messages_only = 0;
413cc918 1605
d1d9800e
AK
1606 if (!activation())
1607 return 1;
1608
7a593325 1609 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
75b37a11 1610 goto_out;
413cc918 1611
2258242f
ZK
1612 if (lv_is_thin_pool(lv) && laopts->origin_only)
1613 messages_only = 1;
1614
2f99e5e3 1615 if (!lv_is_origin(lv) && !lv_is_thin_volume(lv))
81beded3 1616 laopts->origin_only = 0;
2d6fcbf6 1617
20c5fcf7 1618 if (test_mode()) {
10d0d9c7
AK
1619 _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
1620 laopts->revert ? " (reverting)" : "");
095bbca6
MB
1621 r = 1;
1622 goto out;
20c5fcf7
AK
1623 }
1624
10d0d9c7 1625 log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
7df72b3c 1626 error_if_not_active ? "" : " if active",
10d0d9c7
AK
1627 laopts->origin_only ? " without snapshots" : "",
1628 laopts->revert ? " (reverting)" : "");
7df72b3c 1629
81beded3 1630 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
095bbca6 1631 goto_out;
41967a02 1632
2258242f 1633 if (!info.exists || !(info.suspended || messages_only)) {
4ec2ae86
ZK
1634 if (error_if_not_active)
1635 goto_out;
1636 r = 1;
df390f17
AK
1637 if (!info.suspended)
1638 critical_section_dec(cmd, "already resumed");
4ec2ae86 1639 goto out;
095bbca6 1640 }
914c9723 1641
a18dcfb5
AK
1642 laopts->read_only = _passes_readonly_filter(cmd, lv);
1643
81beded3 1644 if (!_lv_activate_lv(lv, laopts))
75b37a11 1645 goto_out;
914c9723 1646
df390f17 1647 critical_section_dec(cmd, "resumed");
413cc918 1648
81beded3 1649 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
ed09d7e3 1650 stack;
15d91f5a 1651
095bbca6
MB
1652 r = 1;
1653out:
1654 if (lv)
077a6755 1655 release_vg(lv->vg);
095bbca6
MB
1656
1657 return r;
413cc918
AK
1658}
1659
fd7d09e3
AK
1660/*
1661 * In a cluster, set exclusive to indicate that only one node is using the
1662 * device. Any tables loaded may then use non-clustered targets.
1663 *
5c8b1486
ZK
1664 * @origin_only
1665 * @exclusive This parameter only has an affect in cluster-context.
1666 * It forces local target type to be used (instead of
1667 * cluster-aware type).
fd7d09e3
AK
1668 * Returns success if the device is not active
1669 */
c054e7cc 1670int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
fd7d09e3
AK
1671 unsigned origin_only, unsigned exclusive,
1672 unsigned revert)
658b5812 1673{
81beded3
ZK
1674 struct lv_activate_opts laopts = {
1675 .origin_only = origin_only,
10d0d9c7
AK
1676 .exclusive = exclusive,
1677 .revert = revert
81beded3
ZK
1678 };
1679
1680 return _lv_resume(cmd, lvid_s, &laopts, 0);
658b5812
AK
1681}
1682
2d6fcbf6 1683int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
658b5812 1684{
81beded3
ZK
1685 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1686
1687 return _lv_resume(cmd, lvid_s, &laopts, 1);
658b5812
AK
1688}
1689
64a95010
AK
1690static int _lv_has_open_snapshots(struct logical_volume *lv)
1691{
1692 struct lv_segment *snap_seg;
1693 struct lvinfo info;
1694 int r = 0;
1695
1696 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
2d6fcbf6 1697 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
64a95010
AK
1698 r = 1;
1699 continue;
1700 }
1701
1702 if (info.exists && info.open_count) {
1703 log_error("LV %s/%s has open snapshot %s: "
1704 "not deactivating", lv->vg->name, lv->name,
1705 snap_seg->cow->name);
1706 r = 1;
1707 }
1708 }
1709
1710 return r;
1711}
1712
be326a2f 1713int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
f4cbeaf0
AK
1714{
1715 struct logical_volume *lv;
199e490e 1716 struct lvinfo info;
095bbca6 1717 int r = 0;
f4cbeaf0 1718
d1d9800e
AK
1719 if (!activation())
1720 return 1;
1721
7a593325 1722 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
095bbca6 1723 goto out;
f4cbeaf0 1724
20c5fcf7
AK
1725 if (test_mode()) {
1726 _skip("Deactivating '%s'.", lv->name);
095bbca6
MB
1727 r = 1;
1728 goto out;
20c5fcf7
AK
1729 }
1730
7df72b3c
AK
1731 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1732
2d6fcbf6 1733 if (!lv_info(cmd, lv, 0, &info, 1, 0))
095bbca6 1734 goto_out;
41967a02 1735
095bbca6
MB
1736 if (!info.exists) {
1737 r = 1;
1738 goto out;
1739 }
f4cbeaf0 1740
64a95010 1741 if (lv_is_visible(lv)) {
125712be
PR
1742 if (!lv_check_not_in_use(cmd, lv, &info))
1743 goto_out;
1744
64a95010
AK
1745 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1746 goto_out;
0cf96f33
AK
1747 }
1748
9249fb12
ZK
1749 if (!lv_read_replicator_vgs(lv))
1750 goto_out;
1751
c1fdeec9
MB
1752 lv_calculate_readahead(lv, NULL);
1753
81beded3 1754 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
ed09d7e3 1755 stack;
15d91f5a 1756
df390f17 1757 critical_section_inc(cmd, "deactivating");
914c9723 1758 r = _lv_deactivate(lv);
df390f17 1759 critical_section_dec(cmd, "deactivated");
914c9723 1760
401a40d9 1761 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
89a6cdfd 1762 r = 0;
095bbca6 1763out:
9249fb12
ZK
1764 if (lv) {
1765 lv_release_replicator_vgs(lv);
077a6755 1766 release_vg(lv->vg);
9249fb12 1767 }
095bbca6 1768
914c9723 1769 return r;
f4cbeaf0
AK
1770}
1771
658b5812
AK
1772/* Test if LV passes filter */
1773int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1774 int *activate_lv)
1775{
1776 struct logical_volume *lv;
095bbca6 1777 int r = 0;
658b5812 1778
095bbca6
MB
1779 if (!activation()) {
1780 *activate_lv = 1;
1781 return 1;
1782 }
658b5812 1783
424dd43e 1784 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
095bbca6 1785 goto out;
658b5812
AK
1786
1787 if (!_passes_activation_filter(cmd, lv)) {
f7e3a19f
PR
1788 log_verbose("Not activating %s/%s since it does not pass "
1789 "activation filter.", lv->vg->name, lv->name);
658b5812 1790 *activate_lv = 0;
095bbca6
MB
1791 } else
1792 *activate_lv = 1;
1793 r = 1;
1794out:
1795 if (lv)
077a6755 1796 release_vg(lv->vg);
658b5812 1797
095bbca6 1798 return r;
658b5812
AK
1799}
1800
07d31831 1801static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
81beded3 1802 struct lv_activate_opts *laopts, int filter)
f4cbeaf0
AK
1803{
1804 struct logical_volume *lv;
199e490e 1805 struct lvinfo info;
095bbca6 1806 int r = 0;
f4cbeaf0 1807
d1d9800e
AK
1808 if (!activation())
1809 return 1;
1810
424dd43e 1811 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
095bbca6 1812 goto out;
f4cbeaf0 1813
658b5812 1814 if (filter && !_passes_activation_filter(cmd, lv)) {
f7e3a19f
PR
1815 log_error("Not activating %s/%s since it does not pass "
1816 "activation filter.", lv->vg->name, lv->name);
095bbca6 1817 goto out;
de17d760
AK
1818 }
1819
cda35408 1820 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
8c5bcdab
AK
1821 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1822 lv->name);
095bbca6 1823 goto_out;
8c5bcdab
AK
1824 }
1825
b4048242
PR
1826 if (lv_has_unknown_segments(lv)) {
1827 log_error("Refusing activation of LV %s containing "
1828 "an unrecognised segment.", lv->name);
1829 goto_out;
1830 }
1831
20c5fcf7
AK
1832 if (test_mode()) {
1833 _skip("Activating '%s'.", lv->name);
095bbca6
MB
1834 r = 1;
1835 goto out;
20c5fcf7
AK
1836 }
1837
a18dcfb5
AK
1838 if (filter)
1839 laopts->read_only = _passes_readonly_filter(cmd, lv);
1840
1841 log_debug("Activating %s/%s%s%s.", lv->vg->name, lv->name,
1842 laopts->exclusive ? " exclusively" : "",
1843 laopts->read_only ? " read-only" : "");
7df72b3c 1844
2d6fcbf6 1845 if (!lv_info(cmd, lv, 0, &info, 0, 0))
095bbca6 1846 goto_out;
8c013da4 1847
a18dcfb5
AK
1848 /*
1849 * Nothing to do?
1850 */
1851 if (info.exists && !info.suspended && info.live_table &&
1852 (info.read_only == read_only_lv(lv, laopts))) {
095bbca6
MB
1853 r = 1;
1854 goto out;
1855 }
f4cbeaf0 1856
9249fb12
ZK
1857 if (!lv_read_replicator_vgs(lv))
1858 goto_out;
1859
c1fdeec9
MB
1860 lv_calculate_readahead(lv, NULL);
1861
df390f17 1862 critical_section_inc(cmd, "activating");
81beded3 1863 if (!(r = _lv_activate_lv(lv, laopts)))
75b37a11 1864 stack;
df390f17 1865 critical_section_dec(cmd, "activated");
914c9723 1866
81beded3 1867 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
ed09d7e3 1868 stack;
15d91f5a 1869
095bbca6 1870out:
9249fb12
ZK
1871 if (lv) {
1872 lv_release_replicator_vgs(lv);
077a6755 1873 release_vg(lv->vg);
9249fb12 1874 }
095bbca6 1875
914c9723 1876 return r;
f4cbeaf0 1877}
199e490e 1878
658b5812 1879/* Activate LV */
07d31831 1880int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
658b5812 1881{
81beded3
ZK
1882 struct lv_activate_opts laopts = { .exclusive = exclusive };
1883
1884 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
75b37a11
AK
1885 return_0;
1886
1887 return 1;
658b5812
AK
1888}
1889
1890/* Activate LV only if it passes filter */
07d31831 1891int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
658b5812 1892{
81beded3
ZK
1893 struct lv_activate_opts laopts = { .exclusive = exclusive };
1894
1895 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
75b37a11
AK
1896 return_0;
1897
1898 return 1;
658b5812
AK
1899}
1900
f7dd6d84
AK
1901int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1902{
f7dd6d84
AK
1903 int r = 1;
1904
8b076648 1905 if (!lv) {
2262b320 1906 r = dm_mknodes(NULL);
8b076648
AK
1907 fs_unlock();
1908 return r;
1909 }
1910
ab9663f3
MB
1911 if (!activation())
1912 return 1;
f7dd6d84 1913
ab9663f3 1914 r = dev_manager_mknodes(lv);
f7dd6d84
AK
1915
1916 fs_unlock();
1917
1918 return r;
1919}
1920
352a99b9
AK
1921/*
1922 * Does PV use VG somewhere in its construction?
1923 * Returns 1 on failure.
1924 */
898e6f8e 1925int pv_uses_vg(struct physical_volume *pv,
3e3d5d85 1926 struct volume_group *vg)
352a99b9 1927{
dae08226 1928 if (!activation() || !pv->dev)
352a99b9
AK
1929 return 0;
1930
1931 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1932 return 0;
1933
898e6f8e 1934 return dev_manager_device_uses_vg(pv->dev, vg);
352a99b9
AK
1935}
1936
2293567c
AK
1937void activation_release(void)
1938{
1939 dev_manager_release();
1940}
1941
914c9723
AK
1942void activation_exit(void)
1943{
1944 dev_manager_exit();
1945}
199e490e 1946#endif
This page took 0.365639 seconds and 5 git commands to generate.