]> sourceware.org Git - lvm2.git/blob - lib/activate/activate.c
When suspending, automatically preload newly-visible existing LVs
[lvm2.git] / lib / activate / activate.c
1 /*
2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
4 *
5 * This file is part of LVM2.
6 *
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
10 *
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
14 */
15
16 #include "lib.h"
17 #include "metadata.h"
18 #include "activate.h"
19 #include "memlock.h"
20 #include "display.h"
21 #include "fs.h"
22 #include "lvm-exec.h"
23 #include "lvm-file.h"
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
27 #include "str_list.h"
28 #include "config.h"
29 #include "filter.h"
30 #include "segtype.h"
31 #include "sharedlib.h"
32
33 #include <limits.h>
34 #include <fcntl.h>
35 #include <unistd.h>
36
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
38
39 int lvm1_present(struct cmd_context *cmd)
40 {
41 char path[PATH_MAX];
42
43 if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
44 < 0) {
45 log_error("LVM1 proc global snprintf failed");
46 return 0;
47 }
48
49 if (path_exists(path))
50 return 1;
51 else
52 return 0;
53 }
54
55 int list_segment_modules(struct dm_pool *mem, const struct lv_segment *seg,
56 struct dm_list *modules)
57 {
58 unsigned int s;
59 struct lv_segment *seg2, *snap_seg;
60 struct dm_list *snh;
61
62 if (seg->segtype->ops->modules_needed &&
63 !seg->segtype->ops->modules_needed(mem, seg, modules)) {
64 log_error("module string allocation failed");
65 return 0;
66 }
67
68 if (lv_is_origin(seg->lv))
69 dm_list_iterate(snh, &seg->lv->snapshot_segs)
70 if (!list_lv_modules(mem,
71 dm_list_struct_base(snh,
72 struct lv_segment,
73 origin_list)->cow,
74 modules))
75 return_0;
76
77 if (lv_is_cow(seg->lv)) {
78 snap_seg = find_cow(seg->lv);
79 if (snap_seg->segtype->ops->modules_needed &&
80 !snap_seg->segtype->ops->modules_needed(mem, snap_seg,
81 modules)) {
82 log_error("snap_seg module string allocation failed");
83 return 0;
84 }
85 }
86
87 for (s = 0; s < seg->area_count; s++) {
88 switch (seg_type(seg, s)) {
89 case AREA_LV:
90 seg2 = find_seg_by_le(seg_lv(seg, s), seg_le(seg, s));
91 if (seg2 && !list_segment_modules(mem, seg2, modules))
92 return_0;
93 break;
94 case AREA_PV:
95 case AREA_UNASSIGNED:
96 ;
97 }
98 }
99
100 return 1;
101 }
102
103 int list_lv_modules(struct dm_pool *mem, const struct logical_volume *lv,
104 struct dm_list *modules)
105 {
106 struct lv_segment *seg;
107
108 dm_list_iterate_items(seg, &lv->segments)
109 if (!list_segment_modules(mem, seg, modules))
110 return_0;
111
112 return 1;
113 }
114
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act)
117 {
118 static int warned = 0;
119
120 if (warned || !act)
121 return;
122
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
125
126 warned = 1;
127 }
128 int activation(void)
129 {
130 return 0;
131 }
132 int library_version(char *version, size_t size)
133 {
134 return 0;
135 }
136 int driver_version(char *version, size_t size)
137 {
138 return 0;
139 }
140 int target_version(const char *target_name, uint32_t *maj,
141 uint32_t *min, uint32_t *patchlevel)
142 {
143 return 0;
144 }
145 int target_present(struct cmd_context *cmd, const char *target_name,
146 int use_modprobe)
147 {
148 return 0;
149 }
150 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
151 struct lvinfo *info, int with_open_count, int with_read_ahead)
152 {
153 return 0;
154 }
155 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
156 unsigned origin_only,
157 struct lvinfo *info, int with_open_count, int with_read_ahead)
158 {
159 return 0;
160 }
161 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
162 {
163 return 0;
164 }
165 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
166 int wait, percent_t *percent, uint32_t *event_nr)
167 {
168 return 0;
169 }
170 int lvs_in_vg_activated(struct volume_group *vg)
171 {
172 return 0;
173 }
174 int lvs_in_vg_opened(const struct volume_group *vg)
175 {
176 return 0;
177 }
178 /******
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
180 {
181 return 1;
182 }
183 *******/
184 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
185 {
186 return 1;
187 }
188 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
189 {
190 return 1;
191 }
192 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
193 unsigned origin_only, unsigned exclusive)
194 {
195 return 1;
196 }
197 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
198 {
199 return 1;
200 }
201 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
202 int *activate_lv)
203 {
204 return 1;
205 }
206 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
207 {
208 return 1;
209 }
210 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
211 {
212 return 1;
213 }
214 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
215 {
216 return 1;
217 }
218 int pv_uses_vg(struct physical_volume *pv,
219 struct volume_group *vg)
220 {
221 return 0;
222 }
223 void activation_release(void)
224 {
225 }
226 void activation_exit(void)
227 {
228 }
229 int lv_is_active(struct logical_volume *lv)
230 {
231 return 0;
232 }
233 int lv_is_active_exclusive_locally(struct logical_volume *lv)
234 {
235 return 0;
236 }
237 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
238 {
239 return 0;
240 }
241 int lv_check_transient(struct logical_volume *lv)
242 {
243 return 1;
244 }
245 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
246 struct lv_activate_opts *laopts, int monitor)
247 {
248 return 1;
249 }
250 #else /* DEVMAPPER_SUPPORT */
251
252 static int _activation = 1;
253
254 void set_activation(int act)
255 {
256 if (act == _activation)
257 return;
258
259 _activation = act;
260 if (_activation)
261 log_verbose("Activation enabled. Device-mapper kernel "
262 "driver will be used.");
263 else
264 log_warn("WARNING: Activation disabled. No device-mapper "
265 "interaction will be attempted.");
266 }
267
268 int activation(void)
269 {
270 return _activation;
271 }
272
273 static int _passes_activation_filter(struct cmd_context *cmd,
274 struct logical_volume *lv)
275 {
276 const struct config_node *cn;
277 const struct config_value *cv;
278 const char *str;
279 char path[PATH_MAX];
280
281 if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
282 log_verbose("activation/volume_list configuration setting "
283 "not defined, checking only host tags for %s/%s",
284 lv->vg->name, lv->name);
285
286 /* If no host tags defined, activate */
287 if (dm_list_empty(&cmd->tags))
288 return 1;
289
290 /* If any host tag matches any LV or VG tag, activate */
291 if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
292 str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
293 return 1;
294
295 log_verbose("No host tag matches %s/%s",
296 lv->vg->name, lv->name);
297
298 /* Don't activate */
299 return 0;
300 }
301 else
302 log_verbose("activation/volume_list configuration setting "
303 "defined, checking the list to match %s/%s",
304 lv->vg->name, lv->name);
305
306 for (cv = cn->v; cv; cv = cv->next) {
307 if (cv->type != CFG_STRING) {
308 log_error("Ignoring invalid string in config file "
309 "activation/volume_list");
310 continue;
311 }
312 str = cv->v.str;
313 if (!*str) {
314 log_error("Ignoring empty string in config file "
315 "activation/volume_list");
316 continue;
317 }
318
319
320 /* Tag? */
321 if (*str == '@') {
322 str++;
323 if (!*str) {
324 log_error("Ignoring empty tag in config file "
325 "activation/volume_list");
326 continue;
327 }
328 /* If any host tag matches any LV or VG tag, activate */
329 if (!strcmp(str, "*")) {
330 if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
331 || str_list_match_list(&cmd->tags,
332 &lv->vg->tags, NULL))
333 return 1;
334 else
335 continue;
336 }
337 /* If supplied tag matches LV or VG tag, activate */
338 if (str_list_match_item(&lv->tags, str) ||
339 str_list_match_item(&lv->vg->tags, str))
340 return 1;
341 else
342 continue;
343 }
344 if (!strchr(str, '/')) {
345 /* vgname supplied */
346 if (!strcmp(str, lv->vg->name))
347 return 1;
348 else
349 continue;
350 }
351 /* vgname/lvname */
352 if (dm_snprintf(path, sizeof(path), "%s/%s", lv->vg->name,
353 lv->name) < 0) {
354 log_error("dm_snprintf error from %s/%s", lv->vg->name,
355 lv->name);
356 continue;
357 }
358 if (!strcmp(path, str))
359 return 1;
360 }
361
362 log_verbose("No item supplied in activation/volume_list configuration "
363 "setting matches %s/%s", lv->vg->name, lv->name);
364
365 return 0;
366 }
367
368 int library_version(char *version, size_t size)
369 {
370 if (!activation())
371 return 0;
372
373 return dm_get_library_version(version, size);
374 }
375
376 int driver_version(char *version, size_t size)
377 {
378 if (!activation())
379 return 0;
380
381 log_very_verbose("Getting driver version");
382
383 return dm_driver_version(version, size);
384 }
385
386 int target_version(const char *target_name, uint32_t *maj,
387 uint32_t *min, uint32_t *patchlevel)
388 {
389 int r = 0;
390 struct dm_task *dmt;
391 struct dm_versions *target, *last_target;
392
393 log_very_verbose("Getting target version for %s", target_name);
394 if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
395 return_0;
396
397 if (!dm_task_run(dmt)) {
398 log_debug("Failed to get %s target version", target_name);
399 /* Assume this was because LIST_VERSIONS isn't supported */
400 return 1;
401 }
402
403 target = dm_task_get_versions(dmt);
404
405 do {
406 last_target = target;
407
408 if (!strcmp(target_name, target->name)) {
409 r = 1;
410 *maj = target->version[0];
411 *min = target->version[1];
412 *patchlevel = target->version[2];
413 goto out;
414 }
415
416 target = (struct dm_versions *)((char *) target + target->next);
417 } while (last_target != target);
418
419 out:
420 dm_task_destroy(dmt);
421
422 return r;
423 }
424
425 int module_present(struct cmd_context *cmd, const char *target_name)
426 {
427 int ret = 0;
428 #ifdef MODPROBE_CMD
429 char module[128];
430 const char *argv[3];
431
432 if (dm_snprintf(module, sizeof(module), "dm-%s", target_name) < 0) {
433 log_error("module_present module name too long: %s",
434 target_name);
435 return 0;
436 }
437
438 argv[0] = MODPROBE_CMD;
439 argv[1] = module;
440 argv[2] = NULL;
441
442 ret = exec_cmd(cmd, argv, NULL, 0);
443 #endif
444 return ret;
445 }
446
447 int target_present(struct cmd_context *cmd, const char *target_name,
448 int use_modprobe)
449 {
450 uint32_t maj, min, patchlevel;
451
452 if (!activation())
453 return 0;
454
455 #ifdef MODPROBE_CMD
456 if (use_modprobe) {
457 if (target_version(target_name, &maj, &min, &patchlevel))
458 return 1;
459
460 if (!module_present(cmd, target_name))
461 return_0;
462 }
463 #endif
464
465 return target_version(target_name, &maj, &min, &patchlevel);
466 }
467
468 /*
469 * Returns 1 if info structure populated, else 0 on failure.
470 */
471 int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, unsigned origin_only,
472 struct lvinfo *info, int with_open_count, int with_read_ahead)
473 {
474 struct dm_info dminfo;
475
476 if (!activation())
477 return 0;
478 /*
479 * If open_count info is requested and we have to be sure our own udev
480 * transactions are finished
481 * For non-clustered locking type we are only interested for non-delete operation
482 * in progress - as only those could lead to opened files
483 */
484 if (with_open_count) {
485 if (locking_is_clustered())
486 sync_local_dev_names(cmd); /* Wait to have udev in sync */
487 else if (fs_has_non_delete_ops())
488 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
489 }
490
491 if (!dev_manager_info(lv->vg->cmd->mem, lv, origin_only ? "real" : NULL, with_open_count,
492 with_read_ahead, &dminfo, &info->read_ahead))
493 return_0;
494
495 info->exists = dminfo.exists;
496 info->suspended = dminfo.suspended;
497 info->open_count = dminfo.open_count;
498 info->major = dminfo.major;
499 info->minor = dminfo.minor;
500 info->read_only = dminfo.read_only;
501 info->live_table = dminfo.live_table;
502 info->inactive_table = dminfo.inactive_table;
503
504 return 1;
505 }
506
507 int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
508 unsigned origin_only,
509 struct lvinfo *info, int with_open_count, int with_read_ahead)
510 {
511 int r;
512 struct logical_volume *lv;
513
514 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
515 return 0;
516
517 if (!lv_is_origin(lv))
518 origin_only = 0;
519
520 r = lv_info(cmd, lv, origin_only, info, with_open_count, with_read_ahead);
521 free_vg(lv->vg);
522
523 return r;
524 }
525
526 /*
527 * Returns 1 if percent set, else 0 on failure.
528 */
529 int lv_check_transient(struct logical_volume *lv)
530 {
531 int r;
532 struct dev_manager *dm;
533
534 if (!activation())
535 return 0;
536
537 log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
538
539 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
540 return_0;
541
542 if (!(r = dev_manager_transient(dm, lv)))
543 stack;
544
545 dev_manager_destroy(dm);
546
547 return r;
548 }
549
550 /*
551 * Returns 1 if percent set, else 0 on failure.
552 */
553 int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
554 {
555 int r;
556 struct dev_manager *dm;
557
558 if (!activation())
559 return 0;
560
561 log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
562
563 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
564 return_0;
565
566 if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
567 stack;
568
569 dev_manager_destroy(dm);
570
571 return r;
572 }
573
574 /* FIXME Merge with snapshot_percent */
575 int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
576 int wait, percent_t *percent, uint32_t *event_nr)
577 {
578 int r;
579 struct dev_manager *dm;
580 struct lvinfo info;
581
582 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
583 * it should be considered in-sync. */
584 if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
585 *percent = PERCENT_100;
586 return 1;
587 }
588
589 if (!activation())
590 return 0;
591
592 log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
593
594 if (!lv_info(cmd, lv, 0, &info, 0, 0))
595 return_0;
596
597 if (!info.exists)
598 return 0;
599
600 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
601 return_0;
602
603 if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
604 stack;
605
606 dev_manager_destroy(dm);
607
608 return r;
609 }
610
611 static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
612 {
613 struct lvinfo info;
614
615 if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
616 stack;
617 return -1;
618 }
619
620 return info.exists;
621 }
622
623 static int _lv_open_count(struct cmd_context *cmd, struct logical_volume *lv)
624 {
625 struct lvinfo info;
626
627 if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
628 stack;
629 return -1;
630 }
631
632 return info.open_count;
633 }
634
635 static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
636 {
637 int r;
638 struct dev_manager *dm;
639
640 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
641 return_0;
642
643 if (!(r = dev_manager_activate(dm, lv, laopts)))
644 stack;
645
646 dev_manager_destroy(dm);
647 return r;
648 }
649
650 static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
651 int *flush_required)
652 {
653 int r;
654 struct dev_manager *dm;
655
656 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
657 return_0;
658
659 if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
660 stack;
661
662 dev_manager_destroy(dm);
663 return r;
664 }
665
666 static int _lv_deactivate(struct logical_volume *lv)
667 {
668 int r;
669 struct dev_manager *dm;
670
671 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
672 return_0;
673
674 if (!(r = dev_manager_deactivate(dm, lv)))
675 stack;
676
677 dev_manager_destroy(dm);
678 return r;
679 }
680
681 static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
682 int lockfs, int flush_required)
683 {
684 int r;
685 struct dev_manager *dm;
686
687 /*
688 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
689 * device directly, we don't want to touch the devices that use it.
690 */
691 if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
692 return_0;
693
694 if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
695 stack;
696
697 dev_manager_destroy(dm);
698 return r;
699 }
700
701 /*
702 * These two functions return the number of visible LVs in the state,
703 * or -1 on error. FIXME Check this.
704 */
705 int lvs_in_vg_activated(struct volume_group *vg)
706 {
707 struct lv_list *lvl;
708 int count = 0;
709
710 if (!activation())
711 return 0;
712
713 dm_list_iterate_items(lvl, &vg->lvs)
714 if (lv_is_visible(lvl->lv))
715 count += (_lv_active(vg->cmd, lvl->lv) == 1);
716
717 log_debug("Counted %d active LVs in VG %s", count, vg->name);
718
719 return count;
720 }
721
722 int lvs_in_vg_opened(const struct volume_group *vg)
723 {
724 const struct lv_list *lvl;
725 int count = 0;
726
727 if (!activation())
728 return 0;
729
730 dm_list_iterate_items(lvl, &vg->lvs)
731 if (lv_is_visible(lvl->lv))
732 count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
733
734 log_debug("Counted %d open LVs in VG %s", count, vg->name);
735
736 return count;
737 }
738
739 /*
740 * _lv_is_active
741 * @lv: logical volume being queried
742 * @locally: set if active locally (when provided)
743 * @exclusive: set if active exclusively (when provided)
744 *
745 * Determine whether an LV is active locally or in a cluster.
746 * In addition to the return code which indicates whether or
747 * not the LV is active somewhere, two other values are set
748 * to yield more information about the status of the activation:
749 * return locally exclusively status
750 * ====== ======= =========== ======
751 * 0 0 0 not active
752 * 1 0 0 active remotely
753 * 1 0 1 exclusive remotely
754 * 1 1 0 active locally and possibly remotely
755 * 1 1 1 exclusive locally (or local && !cluster)
756 * The VG lock must be held to call this function.
757 *
758 * Returns: 0 or 1
759 */
760 static int _lv_is_active(struct logical_volume *lv,
761 int *locally, int *exclusive)
762 {
763 int r, l, e; /* remote, local, and exclusive */
764
765 r = l = e = 0;
766
767 if (_lv_active(lv->vg->cmd, lv))
768 l = 1;
769
770 if (!vg_is_clustered(lv->vg)) {
771 e = 1; /* exclusive by definition */
772 goto out;
773 }
774
775 /* Active locally, and the caller doesn't care about exclusive */
776 if (l && !exclusive)
777 goto out;
778
779 if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
780 goto out;
781
782 /*
783 * If lock query is not supported (due to interfacing with old
784 * code), then we cannot evaluate exclusivity properly.
785 *
786 * Old users of this function will never be affected by this,
787 * since they are only concerned about active vs. not active.
788 * New users of this function who specifically ask for 'exclusive'
789 * will be given an error message.
790 */
791 if (l) {
792 if (exclusive)
793 log_error("Unable to determine exclusivity of %s",
794 lv->name);
795 goto out;
796 }
797
798 /* FIXME: Is this fallback alright? */
799 if (activate_lv_excl(lv->vg->cmd, lv)) {
800 if (!deactivate_lv(lv->vg->cmd, lv))
801 stack;
802 /* FIXME: locally & exclusive are undefined. */
803 return 0;
804 }
805 /* FIXME: Check exclusive value here. */
806 out:
807 if (locally)
808 *locally = l;
809 if (exclusive)
810 *exclusive = e;
811
812 log_very_verbose("%s/%s is %sactive%s%s",
813 lv->vg->name, lv->name,
814 (r || l) ? "" : "not ",
815 (exclusive && e) ? " exclusive" : "",
816 e ? (l ? " locally" : " remotely") : "");
817
818 return r || l;
819 }
820
821 int lv_is_active(struct logical_volume *lv)
822 {
823 return _lv_is_active(lv, NULL, NULL);
824 }
825
826 int lv_is_active_but_not_locally(struct logical_volume *lv)
827 {
828 int l;
829 return _lv_is_active(lv, &l, NULL) && !l;
830 }
831
832 int lv_is_active_exclusive_locally(struct logical_volume *lv)
833 {
834 int l, e;
835
836 return _lv_is_active(lv, &l, &e) && l && e;
837 }
838
839 int lv_is_active_exclusive_remotely(struct logical_volume *lv)
840 {
841 int l, e;
842
843 return _lv_is_active(lv, &l, &e) && !l && e;
844 }
845
846 #ifdef DMEVENTD
847 static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
848 const int timeout, enum dm_event_mask mask)
849 {
850 struct dm_event_handler *dmevh;
851
852 if (!(dmevh = dm_event_handler_create()))
853 return_NULL;
854
855 if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
856 goto_bad;
857
858 if (dm_event_handler_set_dso(dmevh, dso))
859 goto_bad;
860
861 if (dm_event_handler_set_uuid(dmevh, dmuuid))
862 goto_bad;
863
864 dm_event_handler_set_timeout(dmevh, timeout);
865 dm_event_handler_set_event_mask(dmevh, mask);
866
867 return dmevh;
868
869 bad:
870 dm_event_handler_destroy(dmevh);
871 return NULL;
872 }
873
874 char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
875 {
876 char *path;
877
878 if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
879 log_error("Failed to allocate dmeventd library path.");
880 return NULL;
881 }
882
883 get_shared_library_path(cmd, libpath, path, PATH_MAX);
884
885 return path;
886 }
887
888 int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
889 struct logical_volume *lv, int *pending)
890 {
891 char *uuid;
892 enum dm_event_mask evmask = 0;
893 struct dm_event_handler *dmevh;
894
895 *pending = 0;
896
897 if (!dso)
898 return_0;
899
900 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
901 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
902 return_0;
903
904 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
905 return_0;
906
907 if (dm_event_get_registered_device(dmevh, 0)) {
908 dm_event_handler_destroy(dmevh);
909 return 0;
910 }
911
912 evmask = dm_event_handler_get_event_mask(dmevh);
913 if (evmask & DM_EVENT_REGISTRATION_PENDING) {
914 *pending = 1;
915 evmask &= ~DM_EVENT_REGISTRATION_PENDING;
916 }
917
918 dm_event_handler_destroy(dmevh);
919
920 return evmask;
921 }
922
923 int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
924 int evmask __attribute__((unused)), int set, int timeout)
925 {
926 char *uuid;
927 struct dm_event_handler *dmevh;
928 int r;
929
930 if (!dso)
931 return_0;
932
933 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
934 if (!(uuid = build_dm_uuid(cmd->mem, lv->lvid.s, lv_is_origin(lv) ? "real" : NULL)))
935 return_0;
936
937 if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
938 DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
939 return_0;
940
941 r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
942
943 dm_event_handler_destroy(dmevh);
944
945 if (!r)
946 return_0;
947
948 log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
949
950 return 1;
951 }
952
953 #endif
954
955 /*
956 * Returns 0 if an attempt to (un)monitor the device failed.
957 * Returns 1 otherwise.
958 */
959 int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
960 const struct lv_activate_opts *laopts, int monitor)
961 {
962 #ifdef DMEVENTD
963 int i, pending = 0, monitored;
964 int r = 1;
965 struct dm_list *tmp, *snh, *snht;
966 struct lv_segment *seg;
967 struct lv_segment *log_seg;
968 int (*monitor_fn) (struct lv_segment *s, int e);
969 uint32_t s;
970 static const struct lv_activate_opts zlaopts = { 0 };
971
972 if (!laopts)
973 laopts = &zlaopts;
974
975 /* skip dmeventd code altogether */
976 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
977 return 1;
978
979 /*
980 * Nothing to do if dmeventd configured not to be used.
981 */
982 if (monitor && !dmeventd_monitor_mode())
983 return 1;
984
985 /*
986 * In case of a snapshot device, we monitor lv->snapshot->lv,
987 * not the actual LV itself.
988 */
989 if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
990 return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
991
992 /*
993 * In case this LV is a snapshot origin, we instead monitor
994 * each of its respective snapshots. The origin itself may
995 * also need to be monitored if it is a mirror, for example.
996 */
997 if (!laopts->origin_only && lv_is_origin(lv))
998 dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
999 if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
1000 struct lv_segment, origin_list)->cow, NULL, monitor))
1001 r = 0;
1002
1003 /*
1004 * If the volume is mirrored and its log is also mirrored, monitor
1005 * the log volume as well.
1006 */
1007 if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
1008 (log_seg = first_seg(seg->log_lv)) != NULL &&
1009 seg_is_mirrored(log_seg))
1010 if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
1011 r = 0;
1012
1013 dm_list_iterate(tmp, &lv->segments) {
1014 seg = dm_list_item(tmp, struct lv_segment);
1015
1016 /* Recurse for AREA_LV */
1017 for (s = 0; s < seg->area_count; s++) {
1018 if (seg_type(seg, s) != AREA_LV)
1019 continue;
1020 if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
1021 monitor)) {
1022 log_error("Failed to %smonitor %s",
1023 monitor ? "" : "un",
1024 seg_lv(seg, s)->name);
1025 r = 0;
1026 }
1027 }
1028
1029 if (!seg_monitored(seg) || (seg->status & PVMOVE))
1030 continue;
1031
1032 monitor_fn = NULL;
1033
1034 /* Check monitoring status */
1035 if (seg->segtype->ops->target_monitored)
1036 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1037 else
1038 continue; /* segtype doesn't support registration */
1039
1040 /*
1041 * FIXME: We should really try again if pending
1042 */
1043 monitored = (pending) ? 0 : monitored;
1044
1045 if (monitor) {
1046 if (monitored)
1047 log_verbose("%s/%s already monitored.", lv->vg->name, lv->name);
1048 else if (seg->segtype->ops->target_monitor_events)
1049 monitor_fn = seg->segtype->ops->target_monitor_events;
1050 } else {
1051 if (!monitored)
1052 log_verbose("%s/%s already not monitored.", lv->vg->name, lv->name);
1053 else if (seg->segtype->ops->target_unmonitor_events)
1054 monitor_fn = seg->segtype->ops->target_unmonitor_events;
1055 }
1056
1057 /* Do [un]monitor */
1058 if (!monitor_fn)
1059 continue;
1060
1061 log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
1062 test_mode() ? " [Test mode: skipping this]" : "");
1063
1064 /* FIXME Test mode should really continue a bit further. */
1065 if (test_mode())
1066 continue;
1067
1068 /* FIXME specify events */
1069 if (!monitor_fn(seg, 0)) {
1070 log_error("%s/%s: %s segment monitoring function failed.",
1071 lv->vg->name, lv->name, seg->segtype->name);
1072 return 0;
1073 }
1074
1075 /* Check [un]monitor results */
1076 /* Try a couple times if pending, but not forever... */
1077 for (i = 0; i < 10; i++) {
1078 pending = 0;
1079 monitored = seg->segtype->ops->target_monitored(seg, &pending);
1080 if (pending ||
1081 (!monitored && monitor) ||
1082 (monitored && !monitor))
1083 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1084 lv->vg->name, lv->name, monitor ? "" : "un");
1085 else
1086 break;
1087 sleep(1);
1088 }
1089
1090 if (r)
1091 r = (monitored && monitor) || (!monitored && !monitor);
1092 }
1093
1094 return r;
1095 #else
1096 return 1;
1097 #endif
1098 }
1099
1100 struct detached_lv_data {
1101 struct logical_volume *lv_pre;
1102 struct lv_activate_opts *laopts;
1103 int *flush_required;
1104 };
1105
1106 static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
1107 {
1108 struct detached_lv_data *detached = data;
1109 struct lv_list *lvl_pre;
1110
1111 if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
1112 if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) &&
1113 !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
1114 return_0;
1115 }
1116
1117 return 1;
1118 }
1119
1120 static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
1121 struct lv_activate_opts *laopts, int error_if_not_suspended)
1122 {
1123 struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
1124 struct lv_list *lvl_pre;
1125 struct seg_list *sl;
1126 struct lvinfo info;
1127 int r = 0, lockfs = 0, flush_required = 0;
1128 struct detached_lv_data detached;
1129
1130 if (!activation())
1131 return 1;
1132
1133 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1134 goto_out;
1135
1136 /* Use precommitted metadata if present */
1137 if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
1138 goto_out;
1139
1140 /* Ignore origin_only unless LV is origin in both old and new metadata */
1141 if (!lv_is_origin(lv) || !lv_is_origin(lv_pre))
1142 laopts->origin_only = 0;
1143
1144 if (test_mode()) {
1145 _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
1146 r = 1;
1147 goto out;
1148 }
1149
1150 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1151 goto_out;
1152
1153 if (!info.exists || info.suspended) {
1154 if (!error_if_not_suspended) {
1155 r = 1;
1156 if (info.suspended)
1157 critical_section_inc(cmd, "already suspended");
1158 }
1159 goto out;
1160 }
1161
1162 if (!lv_read_replicator_vgs(lv))
1163 goto_out;
1164
1165 lv_calculate_readahead(lv, NULL);
1166
1167 /*
1168 * If VG was precommitted, preload devices for the LV.
1169 * If the PVMOVE LV is being removed, it's only present in the old
1170 * metadata and not the new, so we must explicitly add the new
1171 * tables for all the changed LVs here, as the relationships
1172 * are not found by walking the new metadata.
1173 */
1174 if ((lv_pre->vg->status & PRECOMMITTED)) {
1175 if (!(lv_pre->status & LOCKED) &&
1176 (lv->status & LOCKED) &&
1177 (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
1178 /* Preload all the LVs above the PVMOVE LV */
1179 dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
1180 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
1181 /* FIXME Internal error? */
1182 log_error("LV %s missing from preload metadata", sl->seg->lv->name);
1183 goto out;
1184 }
1185 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1186 goto_out;
1187 }
1188 /* Now preload the PVMOVE LV itself */
1189 if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
1190 /* FIXME Internal error? */
1191 log_error("LV %s missing from preload metadata", pvmove_lv->name);
1192 goto out;
1193 }
1194 if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
1195 goto_out;
1196 } else {
1197 if (!_lv_preload(lv_pre, laopts, &flush_required))
1198 /* FIXME Revert preloading */
1199 goto_out;
1200
1201 /*
1202 * Search for existing LVs that have become detached and preload them.
1203 */
1204 detached.lv_pre = lv_pre;
1205 detached.laopts = laopts;
1206 detached.flush_required = &flush_required;
1207
1208 if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
1209 goto_out;
1210 }
1211 }
1212
1213 if (!monitor_dev_for_events(cmd, lv, laopts, 0))
1214 /* FIXME Consider aborting here */
1215 stack;
1216
1217 critical_section_inc(cmd, "suspending");
1218 if (pvmove_lv)
1219 critical_section_inc(cmd, "suspending pvmove LV");
1220
1221 if (!laopts->origin_only &&
1222 (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
1223 lockfs = 1;
1224
1225 /*
1226 * Suspending an LV directly above a PVMOVE LV also
1227 * suspends other LVs using that same PVMOVE LV.
1228 * FIXME Remove this and delay the 'clear node' until
1229 * after the code knows whether there's a different
1230 * inactive table to load or not instead so lv_suspend
1231 * can be called separately for each LV safely.
1232 */
1233 if ((lv_pre->vg->status & PRECOMMITTED) &&
1234 (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
1235 if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
1236 critical_section_dec(cmd, "failed precommitted suspend");
1237 if (pvmove_lv)
1238 critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
1239 goto_out;
1240 }
1241 } else {
1242 /* Normal suspend */
1243 if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
1244 critical_section_dec(cmd, "failed suspend");
1245 if (pvmove_lv)
1246 critical_section_dec(cmd, "failed suspend (pvmove)");
1247 goto_out;
1248 }
1249 }
1250
1251 r = 1;
1252 out:
1253 if (lv_pre)
1254 free_vg(lv_pre->vg);
1255 if (lv) {
1256 lv_release_replicator_vgs(lv);
1257 free_vg(lv->vg);
1258 }
1259
1260 return r;
1261 }
1262
1263 /* Returns success if the device is not active */
1264 int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1265 {
1266 struct lv_activate_opts laopts = { .origin_only = origin_only };
1267
1268 return _lv_suspend(cmd, lvid_s, &laopts, 0);
1269 }
1270
1271 /* No longer used */
1272 /***********
1273 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1274 {
1275 return _lv_suspend(cmd, lvid_s, 1);
1276 }
1277 ***********/
1278
1279 /*
1280 * _lv_resume
1281 * @cmd
1282 * @lvid_s
1283 * @origin_only
1284 * @exclusive: This parameter only has an affect in cluster-context.
1285 * It forces local target type to be used (instead of
1286 * cluster-aware type).
1287 * @error_if_not_active
1288 */
1289 static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
1290 struct lv_activate_opts *laopts, int error_if_not_active)
1291 {
1292 struct logical_volume *lv;
1293 struct lvinfo info;
1294 int r = 0;
1295
1296 if (!activation())
1297 return 1;
1298
1299 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1300 goto_out;
1301
1302 if (!lv_is_origin(lv))
1303 laopts->origin_only = 0;
1304
1305 if (test_mode()) {
1306 _skip("Resuming %s%s.", lv->name, laopts->origin_only ? " without snapshots" : "");
1307 r = 1;
1308 goto out;
1309 }
1310
1311 log_debug("Resuming LV %s/%s%s%s.", lv->vg->name, lv->name,
1312 error_if_not_active ? "" : " if active",
1313 laopts->origin_only ? " without snapshots" : "");
1314
1315 if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
1316 goto_out;
1317
1318 if (!info.exists || !info.suspended) {
1319 if (error_if_not_active)
1320 goto_out;
1321 r = 1;
1322 if (!info.suspended)
1323 critical_section_dec(cmd, "already resumed");
1324 goto out;
1325 }
1326
1327 if (!_lv_activate_lv(lv, laopts))
1328 goto_out;
1329
1330 critical_section_dec(cmd, "resumed");
1331
1332 if (!monitor_dev_for_events(cmd, lv, laopts, 1))
1333 stack;
1334
1335 r = 1;
1336 out:
1337 if (lv)
1338 free_vg(lv->vg);
1339
1340 return r;
1341 }
1342
1343 /* Returns success if the device is not active */
1344 int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
1345 unsigned origin_only, unsigned exclusive)
1346 {
1347 struct lv_activate_opts laopts = {
1348 .origin_only = origin_only,
1349 /*
1350 * When targets are activated exclusively in a cluster, the
1351 * non-clustered target should be used. This only happens
1352 * if exclusive is set.
1353 */
1354 .exclusive = exclusive
1355 };
1356
1357 return _lv_resume(cmd, lvid_s, &laopts, 0);
1358 }
1359
1360 int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
1361 {
1362 struct lv_activate_opts laopts = { .origin_only = origin_only, };
1363
1364 return _lv_resume(cmd, lvid_s, &laopts, 1);
1365 }
1366
1367 static int _lv_has_open_snapshots(struct logical_volume *lv)
1368 {
1369 struct lv_segment *snap_seg;
1370 struct lvinfo info;
1371 int r = 0;
1372
1373 dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
1374 if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
1375 r = 1;
1376 continue;
1377 }
1378
1379 if (info.exists && info.open_count) {
1380 log_error("LV %s/%s has open snapshot %s: "
1381 "not deactivating", lv->vg->name, lv->name,
1382 snap_seg->cow->name);
1383 r = 1;
1384 }
1385 }
1386
1387 return r;
1388 }
1389
1390 int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
1391 {
1392 struct logical_volume *lv;
1393 struct lvinfo info;
1394 int r = 0;
1395
1396 if (!activation())
1397 return 1;
1398
1399 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1400 goto out;
1401
1402 if (test_mode()) {
1403 _skip("Deactivating '%s'.", lv->name);
1404 r = 1;
1405 goto out;
1406 }
1407
1408 log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
1409
1410 if (!lv_info(cmd, lv, 0, &info, 1, 0))
1411 goto_out;
1412
1413 if (!info.exists) {
1414 r = 1;
1415 goto out;
1416 }
1417
1418 if (lv_is_visible(lv)) {
1419 if (info.open_count) {
1420 log_error("LV %s/%s in use: not deactivating",
1421 lv->vg->name, lv->name);
1422 goto out;
1423 }
1424 if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
1425 goto_out;
1426 }
1427
1428 if (!lv_read_replicator_vgs(lv))
1429 goto_out;
1430
1431 lv_calculate_readahead(lv, NULL);
1432
1433 if (!monitor_dev_for_events(cmd, lv, NULL, 0))
1434 stack;
1435
1436 critical_section_inc(cmd, "deactivating");
1437 r = _lv_deactivate(lv);
1438 critical_section_dec(cmd, "deactivated");
1439
1440 if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
1441 r = 0;
1442 out:
1443 if (lv) {
1444 lv_release_replicator_vgs(lv);
1445 free_vg(lv->vg);
1446 }
1447
1448 return r;
1449 }
1450
1451 /* Test if LV passes filter */
1452 int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
1453 int *activate_lv)
1454 {
1455 struct logical_volume *lv;
1456 int r = 0;
1457
1458 if (!activation()) {
1459 *activate_lv = 1;
1460 return 1;
1461 }
1462
1463 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1464 goto out;
1465
1466 if (!_passes_activation_filter(cmd, lv)) {
1467 log_verbose("Not activating %s/%s since it does not pass "
1468 "activation filter.", lv->vg->name, lv->name);
1469 *activate_lv = 0;
1470 } else
1471 *activate_lv = 1;
1472 r = 1;
1473 out:
1474 if (lv)
1475 free_vg(lv->vg);
1476
1477 return r;
1478 }
1479
1480 static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
1481 struct lv_activate_opts *laopts, int filter)
1482 {
1483 struct logical_volume *lv;
1484 struct lvinfo info;
1485 int r = 0;
1486
1487 if (!activation())
1488 return 1;
1489
1490 if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
1491 goto out;
1492
1493 if (filter && !_passes_activation_filter(cmd, lv)) {
1494 log_error("Not activating %s/%s since it does not pass "
1495 "activation filter.", lv->vg->name, lv->name);
1496 goto out;
1497 }
1498
1499 if ((!lv->vg->cmd->partial_activation) && (lv->status & PARTIAL_LV)) {
1500 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1501 lv->name);
1502 goto_out;
1503 }
1504
1505 if (lv_has_unknown_segments(lv)) {
1506 log_error("Refusing activation of LV %s containing "
1507 "an unrecognised segment.", lv->name);
1508 goto_out;
1509 }
1510
1511 if (test_mode()) {
1512 _skip("Activating '%s'.", lv->name);
1513 r = 1;
1514 goto out;
1515 }
1516
1517 log_debug("Activating %s/%s%s.", lv->vg->name, lv->name,
1518 laopts->exclusive ? " exclusively" : "");
1519
1520 if (!lv_info(cmd, lv, 0, &info, 0, 0))
1521 goto_out;
1522
1523 if (info.exists && !info.suspended && info.live_table) {
1524 r = 1;
1525 goto out;
1526 }
1527
1528 if (!lv_read_replicator_vgs(lv))
1529 goto_out;
1530
1531 lv_calculate_readahead(lv, NULL);
1532
1533 critical_section_inc(cmd, "activating");
1534 if (!(r = _lv_activate_lv(lv, laopts)))
1535 stack;
1536 critical_section_dec(cmd, "activated");
1537
1538 if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
1539 stack;
1540
1541 out:
1542 if (lv) {
1543 lv_release_replicator_vgs(lv);
1544 free_vg(lv->vg);
1545 }
1546
1547 return r;
1548 }
1549
1550 /* Activate LV */
1551 int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1552 {
1553 struct lv_activate_opts laopts = { .exclusive = exclusive };
1554
1555 if (!_lv_activate(cmd, lvid_s, &laopts, 0))
1556 return_0;
1557
1558 return 1;
1559 }
1560
1561 /* Activate LV only if it passes filter */
1562 int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
1563 {
1564 struct lv_activate_opts laopts = { .exclusive = exclusive };
1565
1566 if (!_lv_activate(cmd, lvid_s, &laopts, 1))
1567 return_0;
1568
1569 return 1;
1570 }
1571
1572 int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
1573 {
1574 int r = 1;
1575
1576 if (!lv) {
1577 r = dm_mknodes(NULL);
1578 fs_unlock();
1579 return r;
1580 }
1581
1582 if (!activation())
1583 return 1;
1584
1585 r = dev_manager_mknodes(lv);
1586
1587 fs_unlock();
1588
1589 return r;
1590 }
1591
1592 /*
1593 * Does PV use VG somewhere in its construction?
1594 * Returns 1 on failure.
1595 */
1596 int pv_uses_vg(struct physical_volume *pv,
1597 struct volume_group *vg)
1598 {
1599 if (!activation())
1600 return 0;
1601
1602 if (!dm_is_dm_major(MAJOR(pv->dev->dev)))
1603 return 0;
1604
1605 return dev_manager_device_uses_vg(pv->dev, vg);
1606 }
1607
1608 void activation_release(void)
1609 {
1610 dev_manager_release();
1611 }
1612
1613 void activation_exit(void)
1614 {
1615 dev_manager_exit();
1616 }
1617 #endif
This page took 0.10616 seconds and 6 git commands to generate.