2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
31 #include "sharedlib.h"
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
39 int lvm1_present(struct cmd_context
*cmd
)
41 static char path
[PATH_MAX
];
43 if (dm_snprintf(path
, sizeof(path
), "%s/lvm/global", cmd
->proc_dir
)
45 log_error("LVM1 proc global snprintf failed");
49 if (path_exists(path
))
55 int list_segment_modules(struct dm_pool
*mem
, const struct lv_segment
*seg
,
56 struct dm_list
*modules
)
59 struct lv_segment
*seg2
, *snap_seg
;
62 if (seg
->segtype
->ops
->modules_needed
&&
63 !seg
->segtype
->ops
->modules_needed(mem
, seg
, modules
)) {
64 log_error("module string allocation failed");
68 if (lv_is_origin(seg
->lv
))
69 dm_list_iterate(snh
, &seg
->lv
->snapshot_segs
)
70 if (!list_lv_modules(mem
,
71 dm_list_struct_base(snh
,
77 if (lv_is_cow(seg
->lv
)) {
78 snap_seg
= find_cow(seg
->lv
);
79 if (snap_seg
->segtype
->ops
->modules_needed
&&
80 !snap_seg
->segtype
->ops
->modules_needed(mem
, snap_seg
,
82 log_error("snap_seg module string allocation failed");
87 for (s
= 0; s
< seg
->area_count
; s
++) {
88 switch (seg_type(seg
, s
)) {
90 seg2
= find_seg_by_le(seg_lv(seg
, s
), seg_le(seg
, s
));
91 if (seg2
&& !list_segment_modules(mem
, seg2
, modules
))
103 int list_lv_modules(struct dm_pool
*mem
, const struct logical_volume
*lv
,
104 struct dm_list
*modules
)
106 struct lv_segment
*seg
;
108 dm_list_iterate_items(seg
, &lv
->segments
)
109 if (!list_segment_modules(mem
, seg
, modules
))
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act
)
118 static int warned
= 0;
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
132 int library_version(char *version
, size_t size
)
136 int driver_version(char *version
, size_t size
)
140 int target_version(const char *target_name
, uint32_t *maj
,
141 uint32_t *min
, uint32_t *patchlevel
)
145 int target_present(struct cmd_context
*cmd
, const char *target_name
,
150 int lvm_dm_prefix_check(int major
, int minor
, const char *prefix
)
154 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, int use_layer
,
155 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
159 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
, int use_layer
,
160 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
164 int lv_check_not_in_use(struct cmd_context
*cmd
__attribute__((unused
)),
165 struct logical_volume
*lv
, struct lvinfo
*info
)
169 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
173 int lv_mirror_percent(struct cmd_context
*cmd
, const struct logical_volume
*lv
,
174 int wait
, percent_t
*percent
, uint32_t *event_nr
)
178 int lv_raid_percent(const struct logical_volume
*lv
, percent_t
*percent
)
182 int lv_thin_pool_percent(const struct logical_volume
*lv
, int metadata
,
187 int lv_thin_percent(const struct logical_volume
*lv
, int mapped
,
192 int lv_thin_pool_transaction_id(const struct logical_volume
*lv
,
193 uint64_t *transaction_id
)
197 int lvs_in_vg_activated(const struct volume_group
*vg
)
201 int lvs_in_vg_opened(const struct volume_group
*vg
)
206 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
211 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
, unsigned exclusive
)
215 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
219 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
220 unsigned origin_only
, unsigned exclusive
, unsigned revert
)
224 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
228 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
233 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
237 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
241 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
245 int pv_uses_vg(struct physical_volume
*pv
,
246 struct volume_group
*vg
)
250 void activation_release(void)
253 void activation_exit(void)
257 int lv_is_active(const struct logical_volume
*lv
)
261 int lv_is_active_but_not_locally(const struct logical_volume
*lv
)
265 int lv_is_active_exclusive(const struct logical_volume
*lv
)
269 int lv_is_active_exclusive_locally(const struct logical_volume
*lv
)
273 int lv_is_active_exclusive_remotely(const struct logical_volume
*lv
)
278 int lv_check_transient(struct logical_volume
*lv
)
282 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
283 const struct lv_activate_opts
*laopts
, int monitor
)
293 int add_areas_line(struct dev_manager
*dm
, struct lv_segment
*seg
,
294 struct dm_tree_node
*node
, uint32_t start_area
,
299 int device_is_usable(struct device
*dev
)
303 int lv_has_target_type(struct dm_pool
*mem
, struct logical_volume
*lv
,
304 const char *layer
, const char *target_type
)
308 #else /* DEVMAPPER_SUPPORT */
310 static int _activation
= 1;
312 void set_activation(int act
)
314 if (act
== _activation
)
319 log_verbose("Activation enabled. Device-mapper kernel "
320 "driver will be used.");
322 log_warn("WARNING: Activation disabled. No device-mapper "
323 "interaction will be attempted.");
331 int lv_passes_volumes_filter(struct cmd_context
*cmd
, struct logical_volume
*lv
,
332 const struct dm_config_node
*cn
, const char *config_path
)
334 const struct dm_config_value
*cv
;
336 static char path
[PATH_MAX
];
338 log_verbose("%s configuration setting defined: "
339 "Checking the list to match %s/%s",
340 config_path
, lv
->vg
->name
, lv
->name
);
342 for (cv
= cn
->v
; cv
; cv
= cv
->next
) {
343 if (cv
->type
!= DM_CFG_STRING
) {
344 log_error("Ignoring invalid string in config file %s",
350 log_error("Ignoring empty string in config file %s",
360 log_error("Ignoring empty tag in config file "
364 /* If any host tag matches any LV or VG tag, activate */
365 if (!strcmp(str
, "*")) {
366 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
)
367 || str_list_match_list(&cmd
->tags
,
368 &lv
->vg
->tags
, NULL
))
373 /* If supplied tag matches LV or VG tag, activate */
374 if (str_list_match_item(&lv
->tags
, str
) ||
375 str_list_match_item(&lv
->vg
->tags
, str
))
380 if (!strchr(str
, '/')) {
381 /* vgname supplied */
382 if (!strcmp(str
, lv
->vg
->name
))
388 if (dm_snprintf(path
, sizeof(path
), "%s/%s", lv
->vg
->name
,
390 log_error("dm_snprintf error from %s/%s", lv
->vg
->name
,
394 if (!strcmp(path
, str
))
398 log_verbose("No item supplied in %s configuration setting "
399 "matches %s/%s", config_path
, lv
->vg
->name
, lv
->name
);
404 static int _passes_activation_filter(struct cmd_context
*cmd
,
405 struct logical_volume
*lv
)
407 const struct dm_config_node
*cn
;
409 if (!(cn
= find_config_tree_node(cmd
, "activation/volume_list"))) {
410 log_verbose("activation/volume_list configuration setting "
411 "not defined: Checking only host tags for %s/%s",
412 lv
->vg
->name
, lv
->name
);
414 /* If no host tags defined, activate */
415 if (dm_list_empty(&cmd
->tags
))
418 /* If any host tag matches any LV or VG tag, activate */
419 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
) ||
420 str_list_match_list(&cmd
->tags
, &lv
->vg
->tags
, NULL
))
423 log_verbose("No host tag matches %s/%s",
424 lv
->vg
->name
, lv
->name
);
430 return lv_passes_volumes_filter(cmd
, lv
, cn
, "activation/volume_list");
433 static int _passes_readonly_filter(struct cmd_context
*cmd
,
434 struct logical_volume
*lv
)
436 const struct dm_config_node
*cn
;
438 if (!(cn
= find_config_tree_node(cmd
, "activation/read_only_volume_list")))
441 return lv_passes_volumes_filter(cmd
, lv
, cn
, "activation/read_only_volume_list");
445 int lv_passes_auto_activation_filter(struct cmd_context
*cmd
, struct logical_volume
*lv
)
447 const struct dm_config_node
*cn
;
449 if (!(cn
= find_config_tree_node(cmd
, "activation/auto_activation_volume_list"))) {
450 log_verbose("activation/auto_activation_volume_list configuration setting "
451 "not defined: All logical volumes will be auto-activated.");
455 return lv_passes_volumes_filter(cmd
, lv
, cn
, "activation/auto_activation_volume_list");
458 int library_version(char *version
, size_t size
)
463 return dm_get_library_version(version
, size
);
466 int driver_version(char *version
, size_t size
)
471 log_very_verbose("Getting driver version");
473 return dm_driver_version(version
, size
);
476 int target_version(const char *target_name
, uint32_t *maj
,
477 uint32_t *min
, uint32_t *patchlevel
)
481 struct dm_versions
*target
, *last_target
;
483 log_very_verbose("Getting target version for %s", target_name
);
484 if (!(dmt
= dm_task_create(DM_DEVICE_LIST_VERSIONS
)))
487 if (activation_checks() && !dm_task_enable_checks(dmt
))
490 if (!dm_task_run(dmt
)) {
491 log_debug("Failed to get %s target version", target_name
);
492 /* Assume this was because LIST_VERSIONS isn't supported */
500 target
= dm_task_get_versions(dmt
);
503 last_target
= target
;
505 if (!strcmp(target_name
, target
->name
)) {
507 *maj
= target
->version
[0];
508 *min
= target
->version
[1];
509 *patchlevel
= target
->version
[2];
513 target
= (struct dm_versions
*)((char *) target
+ target
->next
);
514 } while (last_target
!= target
);
517 dm_task_destroy(dmt
);
522 int lvm_dm_prefix_check(int major
, int minor
, const char *prefix
)
528 if (!(dmt
= dm_task_create(DM_DEVICE_STATUS
)))
531 if (!dm_task_set_minor(dmt
, minor
) ||
532 !dm_task_set_major(dmt
, major
) ||
534 !(uuid
= dm_task_get_uuid(dmt
))) {
535 dm_task_destroy(dmt
);
539 r
= strncasecmp(uuid
, prefix
, strlen(prefix
));
540 dm_task_destroy(dmt
);
545 int module_present(struct cmd_context
*cmd
, const char *target_name
)
552 if (dm_snprintf(module
, sizeof(module
), "dm-%s", target_name
) < 0) {
553 log_error("module_present module name too long: %s",
558 argv
[0] = MODPROBE_CMD
;
562 ret
= exec_cmd(cmd
, argv
, NULL
, 0);
567 int target_present(struct cmd_context
*cmd
, const char *target_name
,
570 uint32_t maj
, min
, patchlevel
;
577 if (target_version(target_name
, &maj
, &min
, &patchlevel
))
580 if (!module_present(cmd
, target_name
))
585 return target_version(target_name
, &maj
, &min
, &patchlevel
);
589 * Returns 1 if info structure populated, else 0 on failure.
591 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, int use_layer
,
592 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
594 struct dm_info dminfo
;
600 * If open_count info is requested and we have to be sure our own udev
601 * transactions are finished
602 * For non-clustered locking type we are only interested for non-delete operation
603 * in progress - as only those could lead to opened files
605 if (with_open_count
) {
606 if (locking_is_clustered())
607 sync_local_dev_names(cmd
); /* Wait to have udev in sync */
608 else if (fs_has_non_delete_ops())
609 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
612 if (use_layer
&& lv_is_thin_pool(lv
))
614 else if (use_layer
&& lv_is_origin(lv
))
619 if (!dev_manager_info(lv
->vg
->cmd
->mem
, lv
, layer
, with_open_count
,
620 with_read_ahead
, &dminfo
, &info
->read_ahead
))
623 info
->exists
= dminfo
.exists
;
624 info
->suspended
= dminfo
.suspended
;
625 info
->open_count
= dminfo
.open_count
;
626 info
->major
= dminfo
.major
;
627 info
->minor
= dminfo
.minor
;
628 info
->read_only
= dminfo
.read_only
;
629 info
->live_table
= dminfo
.live_table
;
630 info
->inactive_table
= dminfo
.inactive_table
;
635 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
, int use_layer
,
636 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
639 struct logical_volume
*lv
;
641 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
644 r
= lv_info(cmd
, lv
, use_layer
, info
, with_open_count
, with_read_ahead
);
650 int lv_check_not_in_use(struct cmd_context
*cmd
__attribute__((unused
)),
651 struct logical_volume
*lv
, struct lvinfo
*info
)
656 /* If sysfs is not used, use open_count information only. */
657 if (!*dm_sysfs_dir()) {
658 if (info
->open_count
) {
659 log_error("Logical volume %s/%s in use.",
660 lv
->vg
->name
, lv
->name
);
667 if (dm_device_has_holders(info
->major
, info
->minor
)) {
668 log_error("Logical volume %s/%s is used by another device.",
669 lv
->vg
->name
, lv
->name
);
673 if (dm_device_has_mounted_fs(info
->major
, info
->minor
)) {
674 log_error("Logical volume %s/%s contains a filesystem in use.",
675 lv
->vg
->name
, lv
->name
);
683 * Returns 1 if percent set, else 0 on failure.
685 int lv_check_transient(struct logical_volume
*lv
)
688 struct dev_manager
*dm
;
693 log_debug("Checking transient status for LV %s/%s", lv
->vg
->name
, lv
->name
);
695 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
698 if (!(r
= dev_manager_transient(dm
, lv
)))
701 dev_manager_destroy(dm
);
707 * Returns 1 if percent set, else 0 on failure.
709 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
712 struct dev_manager
*dm
;
717 log_debug("Checking snapshot percent for LV %s/%s", lv
->vg
->name
, lv
->name
);
719 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
722 if (!(r
= dev_manager_snapshot_percent(dm
, lv
, percent
)))
725 dev_manager_destroy(dm
);
730 /* FIXME Merge with snapshot_percent */
731 int lv_mirror_percent(struct cmd_context
*cmd
, const struct logical_volume
*lv
,
732 int wait
, percent_t
*percent
, uint32_t *event_nr
)
735 struct dev_manager
*dm
;
738 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
739 * it should be considered in-sync. */
740 if (dm_list_size(&lv
->segments
) == 1 && first_seg(lv
)->area_count
== 1) {
741 *percent
= PERCENT_100
;
748 log_debug("Checking mirror percent for LV %s/%s", lv
->vg
->name
, lv
->name
);
750 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
756 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
759 if (!(r
= dev_manager_mirror_percent(dm
, lv
, wait
, percent
, event_nr
)))
762 dev_manager_destroy(dm
);
767 int lv_raid_percent(const struct logical_volume
*lv
, percent_t
*percent
)
769 return lv_mirror_percent(lv
->vg
->cmd
, lv
, 0, percent
, NULL
);
773 * Returns data or metadata percent usage, depends on metadata 0/1.
774 * Returns 1 if percent set, else 0 on failure.
776 int lv_thin_pool_percent(const struct logical_volume
*lv
, int metadata
,
780 struct dev_manager
*dm
;
785 log_debug("Checking thin %sdata percent for LV %s/%s",
786 (metadata
) ? "meta" : "", lv
->vg
->name
, lv
->name
);
788 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
791 if (!(r
= dev_manager_thin_pool_percent(dm
, lv
, metadata
, percent
)))
794 dev_manager_destroy(dm
);
800 * Returns 1 if percent set, else 0 on failure.
802 int lv_thin_percent(const struct logical_volume
*lv
,
803 int mapped
, percent_t
*percent
)
806 struct dev_manager
*dm
;
811 log_debug("Checking thin percent for LV %s/%s",
812 lv
->vg
->name
, lv
->name
);
814 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
817 if (!(r
= dev_manager_thin_percent(dm
, lv
, mapped
, percent
)))
820 dev_manager_destroy(dm
);
826 * Returns 1 if transaction_id set, else 0 on failure.
828 int lv_thin_pool_transaction_id(const struct logical_volume
*lv
,
829 uint64_t *transaction_id
)
832 struct dev_manager
*dm
;
833 struct dm_status_thin_pool
*status
;
838 log_debug("Checking thin percent for LV %s/%s",
839 lv
->vg
->name
, lv
->name
);
841 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
844 if (!(r
= dev_manager_thin_pool_status(dm
, lv
, &status
)))
847 *transaction_id
= status
->transaction_id
;
849 dev_manager_destroy(dm
);
854 static int _lv_active(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
858 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0)) {
866 static int _lv_open_count(struct cmd_context
*cmd
, struct logical_volume
*lv
)
870 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0)) {
875 return info
.open_count
;
878 static int _lv_activate_lv(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
)
881 struct dev_manager
*dm
;
883 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
886 if (!(r
= dev_manager_activate(dm
, lv
, laopts
)))
889 dev_manager_destroy(dm
);
893 static int _lv_preload(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
,
897 struct dev_manager
*dm
;
898 int old_readonly
= laopts
->read_only
;
900 laopts
->read_only
= _passes_readonly_filter(lv
->vg
->cmd
, lv
);
902 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
905 if (!(r
= dev_manager_preload(dm
, lv
, laopts
, flush_required
)))
908 dev_manager_destroy(dm
);
910 laopts
->read_only
= old_readonly
;
915 static int _lv_deactivate(struct logical_volume
*lv
)
918 struct dev_manager
*dm
;
920 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
923 if (!(r
= dev_manager_deactivate(dm
, lv
)))
926 dev_manager_destroy(dm
);
930 static int _lv_suspend_lv(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
,
931 int lockfs
, int flush_required
)
934 struct dev_manager
*dm
;
936 laopts
->read_only
= _passes_readonly_filter(lv
->vg
->cmd
, lv
);
939 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
940 * device directly, we don't want to touch the devices that use it.
942 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
945 if (!(r
= dev_manager_suspend(dm
, lv
, laopts
, lockfs
, flush_required
)))
948 dev_manager_destroy(dm
);
953 * These two functions return the number of visible LVs in the state,
954 * or -1 on error. FIXME Check this.
956 int lvs_in_vg_activated(const struct volume_group
*vg
)
964 dm_list_iterate_items(lvl
, &vg
->lvs
)
965 if (lv_is_visible(lvl
->lv
))
966 count
+= (_lv_active(vg
->cmd
, lvl
->lv
) == 1);
968 log_debug("Counted %d active LVs in VG %s", count
, vg
->name
);
973 int lvs_in_vg_opened(const struct volume_group
*vg
)
975 const struct lv_list
*lvl
;
981 dm_list_iterate_items(lvl
, &vg
->lvs
)
982 if (lv_is_visible(lvl
->lv
))
983 count
+= (_lv_open_count(vg
->cmd
, lvl
->lv
) > 0);
985 log_debug("Counted %d open LVs in VG %s", count
, vg
->name
);
992 * @lv: logical volume being queried
993 * @locally: set if active locally (when provided)
994 * @exclusive: set if active exclusively (when provided)
996 * Determine whether an LV is active locally or in a cluster.
997 * In addition to the return code which indicates whether or
998 * not the LV is active somewhere, two other values are set
999 * to yield more information about the status of the activation:
1000 * return locally exclusively status
1001 * ====== ======= =========== ======
1003 * 1 0 0 active remotely
1004 * 1 0 1 exclusive remotely
1005 * 1 1 0 active locally and possibly remotely
1006 * 1 1 1 exclusive locally (or local && !cluster)
1007 * The VG lock must be held to call this function.
1011 static int _lv_is_active(const struct logical_volume
*lv
,
1012 int *locally
, int *exclusive
)
1014 int r
, l
, e
; /* remote, local, and exclusive */
1018 if (_lv_active(lv
->vg
->cmd
, lv
))
1021 if (!vg_is_clustered(lv
->vg
)) {
1023 e
= 1; /* exclusive by definition */
1027 /* Active locally, and the caller doesn't care about exclusive */
1028 if (l
&& !exclusive
)
1031 if ((r
= remote_lock_held(lv
->lvid
.s
, &e
)) >= 0)
1035 * If lock query is not supported (due to interfacing with old
1036 * code), then we cannot evaluate exclusivity properly.
1038 * Old users of this function will never be affected by this,
1039 * since they are only concerned about active vs. not active.
1040 * New users of this function who specifically ask for 'exclusive'
1041 * will be given an error message.
1043 log_error("Unable to determine exclusivity of %s", lv
->name
);
1048 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
1049 * but it's unreliable.
1058 log_very_verbose("%s/%s is %sactive%s%s",
1059 lv
->vg
->name
, lv
->name
,
1060 (r
|| l
) ? "" : "not ",
1061 (exclusive
&& e
) ? " exclusive" : "",
1062 e
? (l
? " locally" : " remotely") : "");
1067 int lv_is_active(const struct logical_volume
*lv
)
1069 return _lv_is_active(lv
, NULL
, NULL
);
1072 int lv_is_active_but_not_locally(const struct logical_volume
*lv
)
1075 return _lv_is_active(lv
, &l
, NULL
) && !l
;
1078 int lv_is_active_exclusive(const struct logical_volume
*lv
)
1082 return _lv_is_active(lv
, NULL
, &e
) && e
;
1085 int lv_is_active_exclusive_locally(const struct logical_volume
*lv
)
1089 return _lv_is_active(lv
, &l
, &e
) && l
&& e
;
1092 int lv_is_active_exclusive_remotely(const struct logical_volume
*lv
)
1096 return _lv_is_active(lv
, &l
, &e
) && !l
&& e
;
1100 static struct dm_event_handler
*_create_dm_event_handler(struct cmd_context
*cmd
, const char *dmuuid
, const char *dso
,
1101 const int timeout
, enum dm_event_mask mask
)
1103 struct dm_event_handler
*dmevh
;
1105 if (!(dmevh
= dm_event_handler_create()))
1108 if (dm_event_handler_set_dmeventd_path(dmevh
, find_config_tree_str(cmd
, "dmeventd/executable", NULL
)))
1111 if (dm_event_handler_set_dso(dmevh
, dso
))
1114 if (dm_event_handler_set_uuid(dmevh
, dmuuid
))
1117 dm_event_handler_set_timeout(dmevh
, timeout
);
1118 dm_event_handler_set_event_mask(dmevh
, mask
);
1123 dm_event_handler_destroy(dmevh
);
1127 char *get_monitor_dso_path(struct cmd_context
*cmd
, const char *libpath
)
1131 if (!(path
= dm_pool_alloc(cmd
->mem
, PATH_MAX
))) {
1132 log_error("Failed to allocate dmeventd library path.");
1136 get_shared_library_path(cmd
, libpath
, path
, PATH_MAX
);
1141 static char *_build_target_uuid(struct cmd_context
*cmd
, struct logical_volume
*lv
)
1145 if (lv_is_thin_pool(lv
))
1146 layer
= "tpool"; /* Monitor "tpool" for the "thin pool". */
1147 else if (lv_is_origin(lv
))
1148 layer
= "real"; /* Monitor "real" for "snapshot-origin". */
1152 return build_dm_uuid(cmd
->mem
, lv
->lvid
.s
, layer
);
1155 int target_registered_with_dmeventd(struct cmd_context
*cmd
, const char *dso
,
1156 struct logical_volume
*lv
, int *pending
)
1159 enum dm_event_mask evmask
= 0;
1160 struct dm_event_handler
*dmevh
;
1166 if (!(uuid
= _build_target_uuid(cmd
, lv
)))
1169 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, 0, DM_EVENT_ALL_ERRORS
)))
1172 if (dm_event_get_registered_device(dmevh
, 0)) {
1173 dm_event_handler_destroy(dmevh
);
1177 evmask
= dm_event_handler_get_event_mask(dmevh
);
1178 if (evmask
& DM_EVENT_REGISTRATION_PENDING
) {
1180 evmask
&= ~DM_EVENT_REGISTRATION_PENDING
;
1183 dm_event_handler_destroy(dmevh
);
1188 int target_register_events(struct cmd_context
*cmd
, const char *dso
, struct logical_volume
*lv
,
1189 int evmask
__attribute__((unused
)), int set
, int timeout
)
1192 struct dm_event_handler
*dmevh
;
1198 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1199 if (!(uuid
= _build_target_uuid(cmd
, lv
)))
1202 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, timeout
,
1203 DM_EVENT_ALL_ERRORS
| (timeout
? DM_EVENT_TIMEOUT
: 0))))
1206 r
= set
? dm_event_register_handler(dmevh
) : dm_event_unregister_handler(dmevh
);
1208 dm_event_handler_destroy(dmevh
);
1213 log_info("%s %s for events", set
? "Monitored" : "Unmonitored", uuid
);
1221 * Returns 0 if an attempt to (un)monitor the device failed.
1222 * Returns 1 otherwise.
1224 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
1225 const struct lv_activate_opts
*laopts
, int monitor
)
1228 int i
, pending
= 0, monitored
;
1230 struct dm_list
*tmp
, *snh
, *snht
;
1231 struct lv_segment
*seg
;
1232 struct lv_segment
*log_seg
;
1233 int (*monitor_fn
) (struct lv_segment
*s
, int e
);
1235 static const struct lv_activate_opts zlaopts
= { 0 };
1236 static const struct lv_activate_opts thinopts
= { .skip_in_use
= 1 };
1242 /* skip dmeventd code altogether */
1243 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE
)
1247 * Nothing to do if dmeventd configured not to be used.
1249 if (monitor
&& !dmeventd_monitor_mode())
1253 * Allow to unmonitor thin pool via explicit pool unmonitor
1254 * or unmonitor before the last thin pool user deactivation
1255 * Skip unmonitor, if invoked via unmonitor of thin volume
1256 * and there is another thin pool user (open_count > 1)
1258 if (laopts
->skip_in_use
&& lv_info(lv
->vg
->cmd
, lv
, 1, &info
, 1, 0) &&
1259 (info
.open_count
!= 1)) {
1260 log_debug("Skipping unmonitor of opened %s (open:%d)",
1261 lv
->name
, info
.open_count
);
1266 * In case of a snapshot device, we monitor lv->snapshot->lv,
1267 * not the actual LV itself.
1269 if (lv_is_cow(lv
) && (laopts
->no_merging
|| !lv_is_merging_cow(lv
)))
1270 return monitor_dev_for_events(cmd
, lv
->snapshot
->lv
, NULL
, monitor
);
1273 * In case this LV is a snapshot origin, we instead monitor
1274 * each of its respective snapshots. The origin itself may
1275 * also need to be monitored if it is a mirror, for example.
1277 if (!laopts
->origin_only
&& lv_is_origin(lv
))
1278 dm_list_iterate_safe(snh
, snht
, &lv
->snapshot_segs
)
1279 if (!monitor_dev_for_events(cmd
, dm_list_struct_base(snh
,
1280 struct lv_segment
, origin_list
)->cow
, NULL
, monitor
))
1284 * If the volume is mirrored and its log is also mirrored, monitor
1285 * the log volume as well.
1287 if ((seg
= first_seg(lv
)) != NULL
&& seg
->log_lv
!= NULL
&&
1288 (log_seg
= first_seg(seg
->log_lv
)) != NULL
&&
1289 seg_is_mirrored(log_seg
))
1290 if (!monitor_dev_for_events(cmd
, seg
->log_lv
, NULL
, monitor
))
1293 dm_list_iterate(tmp
, &lv
->segments
) {
1294 seg
= dm_list_item(tmp
, struct lv_segment
);
1296 /* Recurse for AREA_LV */
1297 for (s
= 0; s
< seg
->area_count
; s
++) {
1298 if (seg_type(seg
, s
) != AREA_LV
)
1300 if (!monitor_dev_for_events(cmd
, seg_lv(seg
, s
), NULL
,
1302 log_error("Failed to %smonitor %s",
1303 monitor
? "" : "un",
1304 seg_lv(seg
, s
)->name
);
1310 * If requested unmonitoring of thin volume, request test
1311 * if there is no other thin pool user
1313 * FIXME: code here looks like _lv_postorder()
1316 !monitor_dev_for_events(cmd
, seg
->pool_lv
,
1317 (!monitor
) ? &thinopts
: NULL
, monitor
))
1320 if (seg
->metadata_lv
&&
1321 !monitor_dev_for_events(cmd
, seg
->metadata_lv
, NULL
, monitor
))
1324 if (!seg_monitored(seg
) || (seg
->status
& PVMOVE
))
1329 /* Check monitoring status */
1330 if (seg
->segtype
->ops
->target_monitored
)
1331 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1333 continue; /* segtype doesn't support registration */
1336 * FIXME: We should really try again if pending
1338 monitored
= (pending
) ? 0 : monitored
;
1342 log_verbose("%s/%s already monitored.", lv
->vg
->name
, lv
->name
);
1343 else if (seg
->segtype
->ops
->target_monitor_events
)
1344 monitor_fn
= seg
->segtype
->ops
->target_monitor_events
;
1347 log_verbose("%s/%s already not monitored.", lv
->vg
->name
, lv
->name
);
1348 else if (seg
->segtype
->ops
->target_unmonitor_events
)
1349 monitor_fn
= seg
->segtype
->ops
->target_unmonitor_events
;
1352 /* Do [un]monitor */
1356 log_verbose("%sonitoring %s/%s%s", monitor
? "M" : "Not m", lv
->vg
->name
, lv
->name
,
1357 test_mode() ? " [Test mode: skipping this]" : "");
1359 /* FIXME Test mode should really continue a bit further. */
1363 /* FIXME specify events */
1364 if (!monitor_fn(seg
, 0)) {
1365 log_error("%s/%s: %s segment monitoring function failed.",
1366 lv
->vg
->name
, lv
->name
, seg
->segtype
->name
);
1370 /* Check [un]monitor results */
1371 /* Try a couple times if pending, but not forever... */
1372 for (i
= 0; i
< 10; i
++) {
1374 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1376 (!monitored
&& monitor
) ||
1377 (monitored
&& !monitor
))
1378 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1379 lv
->vg
->name
, lv
->name
, monitor
? "" : "un");
1386 r
= (monitored
&& monitor
) || (!monitored
&& !monitor
);
1395 struct detached_lv_data
{
1396 struct logical_volume
*lv_pre
;
1397 struct lv_activate_opts
*laopts
;
1398 int *flush_required
;
1401 static int _preload_detached_lv(struct cmd_context
*cmd
, struct logical_volume
*lv
, void *data
)
1403 struct detached_lv_data
*detached
= data
;
1404 struct lv_list
*lvl_pre
;
1406 if ((lvl_pre
= find_lv_in_vg(detached
->lv_pre
->vg
, lv
->name
))) {
1407 if (lv_is_visible(lvl_pre
->lv
) && lv_is_active(lv
) && (!lv_is_cow(lv
) || !lv_is_cow(lvl_pre
->lv
)) &&
1408 !_lv_preload(lvl_pre
->lv
, detached
->laopts
, detached
->flush_required
))
1415 static int _lv_suspend(struct cmd_context
*cmd
, const char *lvid_s
,
1416 struct lv_activate_opts
*laopts
, int error_if_not_suspended
)
1418 struct logical_volume
*lv
= NULL
, *lv_pre
= NULL
, *pvmove_lv
= NULL
;
1419 struct lv_list
*lvl_pre
;
1420 struct seg_list
*sl
;
1421 struct lv_segment
*snap_seg
;
1423 int r
= 0, lockfs
= 0, flush_required
= 0;
1424 struct detached_lv_data detached
;
1429 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1432 /* Use precommitted metadata if present */
1433 if (!(lv_pre
= lv_from_lvid(cmd
, lvid_s
, 1)))
1436 /* Ignore origin_only unless LV is origin in both old and new metadata */
1437 if (!lv_is_thin_volume(lv
) && !(lv_is_origin(lv
) && lv_is_origin(lv_pre
)))
1438 laopts
->origin_only
= 0;
1441 _skip("Suspending %s%s.", lv
->name
, laopts
->origin_only
? " origin without snapshots" : "");
1446 if (!lv_info(cmd
, lv
, laopts
->origin_only
, &info
, 0, 0))
1449 if (!info
.exists
|| info
.suspended
) {
1450 if (!error_if_not_suspended
) {
1453 critical_section_inc(cmd
, "already suspended");
1458 if (!lv_read_replicator_vgs(lv
))
1461 lv_calculate_readahead(lv
, NULL
);
1464 * Preload devices for the LV.
1465 * If the PVMOVE LV is being removed, it's only present in the old
1466 * metadata and not the new, so we must explicitly add the new
1467 * tables for all the changed LVs here, as the relationships
1468 * are not found by walking the new metadata.
1470 if (!(lv_pre
->status
& LOCKED
) &&
1471 (lv
->status
& LOCKED
) &&
1472 (pvmove_lv
= find_pvmove_lv_in_lv(lv
))) {
1473 /* Preload all the LVs above the PVMOVE LV */
1474 dm_list_iterate_items(sl
, &pvmove_lv
->segs_using_this_lv
) {
1475 if (!(lvl_pre
= find_lv_in_vg(lv_pre
->vg
, sl
->seg
->lv
->name
))) {
1476 log_error(INTERNAL_ERROR
"LV %s missing from preload metadata", sl
->seg
->lv
->name
);
1479 if (!_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1482 /* Now preload the PVMOVE LV itself */
1483 if (!(lvl_pre
= find_lv_in_vg(lv_pre
->vg
, pvmove_lv
->name
))) {
1484 log_error(INTERNAL_ERROR
"LV %s missing from preload metadata", pvmove_lv
->name
);
1487 if (!_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1490 if (!_lv_preload(lv_pre
, laopts
, &flush_required
))
1491 /* FIXME Revert preloading */
1495 * Search for existing LVs that have become detached and preload them.
1497 detached
.lv_pre
= lv_pre
;
1498 detached
.laopts
= laopts
;
1499 detached
.flush_required
= &flush_required
;
1501 if (!for_each_sub_lv(cmd
, lv
, &_preload_detached_lv
, &detached
))
1505 * Preload any snapshots that are being removed.
1507 if (!laopts
->origin_only
&& lv_is_origin(lv
)) {
1508 dm_list_iterate_items_gen(snap_seg
, &lv
->snapshot_segs
, origin_list
) {
1509 if (!(lvl_pre
= find_lv_in_vg_by_lvid(lv_pre
->vg
, &snap_seg
->cow
->lvid
))) {
1510 log_error(INTERNAL_ERROR
"LV %s (%s) missing from preload metadata",
1511 snap_seg
->cow
->name
, snap_seg
->cow
->lvid
.id
[1].uuid
);
1514 if (!lv_is_cow(lvl_pre
->lv
) &&
1515 !_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1521 if (!monitor_dev_for_events(cmd
, lv
, laopts
, 0))
1522 /* FIXME Consider aborting here */
1525 critical_section_inc(cmd
, "suspending");
1527 critical_section_inc(cmd
, "suspending pvmove LV");
1529 if (!laopts
->origin_only
&&
1530 (lv_is_origin(lv_pre
) || lv_is_cow(lv_pre
)))
1533 if (laopts
->origin_only
&& lv_is_thin_volume(lv
) && lv_is_thin_volume(lv_pre
))
1537 * Suspending an LV directly above a PVMOVE LV also
1538 * suspends other LVs using that same PVMOVE LV.
1539 * FIXME Remove this and delay the 'clear node' until
1540 * after the code knows whether there's a different
1541 * inactive table to load or not instead so lv_suspend
1542 * can be called separately for each LV safely.
1544 if ((lv_pre
->vg
->status
& PRECOMMITTED
) &&
1545 (lv_pre
->status
& LOCKED
) && find_pvmove_lv_in_lv(lv_pre
)) {
1546 if (!_lv_suspend_lv(lv_pre
, laopts
, lockfs
, flush_required
)) {
1547 critical_section_dec(cmd
, "failed precommitted suspend");
1549 critical_section_dec(cmd
, "failed precommitted suspend (pvmove)");
1553 /* Normal suspend */
1554 if (!_lv_suspend_lv(lv
, laopts
, lockfs
, flush_required
)) {
1555 critical_section_dec(cmd
, "failed suspend");
1557 critical_section_dec(cmd
, "failed suspend (pvmove)");
1565 release_vg(lv_pre
->vg
);
1567 lv_release_replicator_vgs(lv
);
1575 * In a cluster, set exclusive to indicate that only one node is using the
1576 * device. Any preloaded tables may then use non-clustered targets.
1578 * Returns success if the device is not active
1580 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
, unsigned exclusive
)
1582 struct lv_activate_opts laopts
= {
1583 .origin_only
= origin_only
,
1584 .exclusive
= exclusive
1587 return _lv_suspend(cmd
, lvid_s
, &laopts
, 0);
1590 /* No longer used */
1592 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1594 return _lv_suspend(cmd, lvid_s, 1);
1598 static int _lv_resume(struct cmd_context
*cmd
, const char *lvid_s
,
1599 struct lv_activate_opts
*laopts
, int error_if_not_active
)
1601 struct logical_volume
*lv
;
1604 int messages_only
= 0;
1609 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1612 if (lv_is_thin_pool(lv
) && laopts
->origin_only
)
1615 if (!lv_is_origin(lv
) && !lv_is_thin_volume(lv
))
1616 laopts
->origin_only
= 0;
1619 _skip("Resuming %s%s%s.", lv
->name
, laopts
->origin_only
? " without snapshots" : "",
1620 laopts
->revert
? " (reverting)" : "");
1625 log_debug("Resuming LV %s/%s%s%s%s.", lv
->vg
->name
, lv
->name
,
1626 error_if_not_active
? "" : " if active",
1627 laopts
->origin_only
? " without snapshots" : "",
1628 laopts
->revert
? " (reverting)" : "");
1630 if (!lv_info(cmd
, lv
, laopts
->origin_only
, &info
, 0, 0))
1633 if (!info
.exists
|| !(info
.suspended
|| messages_only
)) {
1634 if (error_if_not_active
)
1637 if (!info
.suspended
)
1638 critical_section_dec(cmd
, "already resumed");
1642 laopts
->read_only
= _passes_readonly_filter(cmd
, lv
);
1644 if (!_lv_activate_lv(lv
, laopts
))
1647 critical_section_dec(cmd
, "resumed");
1649 if (!monitor_dev_for_events(cmd
, lv
, laopts
, 1))
1661 * In a cluster, set exclusive to indicate that only one node is using the
1662 * device. Any tables loaded may then use non-clustered targets.
1665 * @exclusive This parameter only has an affect in cluster-context.
1666 * It forces local target type to be used (instead of
1667 * cluster-aware type).
1668 * Returns success if the device is not active
1670 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
1671 unsigned origin_only
, unsigned exclusive
,
1674 struct lv_activate_opts laopts
= {
1675 .origin_only
= origin_only
,
1676 .exclusive
= exclusive
,
1680 return _lv_resume(cmd
, lvid_s
, &laopts
, 0);
1683 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
1685 struct lv_activate_opts laopts
= { .origin_only
= origin_only
, };
1687 return _lv_resume(cmd
, lvid_s
, &laopts
, 1);
1690 static int _lv_has_open_snapshots(struct logical_volume
*lv
)
1692 struct lv_segment
*snap_seg
;
1696 dm_list_iterate_items_gen(snap_seg
, &lv
->snapshot_segs
, origin_list
) {
1697 if (!lv_info(lv
->vg
->cmd
, snap_seg
->cow
, 0, &info
, 1, 0)) {
1702 if (info
.exists
&& info
.open_count
) {
1703 log_error("LV %s/%s has open snapshot %s: "
1704 "not deactivating", lv
->vg
->name
, lv
->name
,
1705 snap_seg
->cow
->name
);
1713 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
1715 struct logical_volume
*lv
;
1722 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1726 _skip("Deactivating '%s'.", lv
->name
);
1731 log_debug("Deactivating %s/%s.", lv
->vg
->name
, lv
->name
);
1733 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0))
1741 if (lv_is_visible(lv
)) {
1742 if (!lv_check_not_in_use(cmd
, lv
, &info
))
1745 if (lv_is_origin(lv
) && _lv_has_open_snapshots(lv
))
1749 if (!lv_read_replicator_vgs(lv
))
1752 lv_calculate_readahead(lv
, NULL
);
1754 if (!monitor_dev_for_events(cmd
, lv
, NULL
, 0))
1757 critical_section_inc(cmd
, "deactivating");
1758 r
= _lv_deactivate(lv
);
1759 critical_section_dec(cmd
, "deactivated");
1761 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0) || info
.exists
)
1765 lv_release_replicator_vgs(lv
);
1772 /* Test if LV passes filter */
1773 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
1776 struct logical_volume
*lv
;
1779 if (!activation()) {
1784 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1787 if (!_passes_activation_filter(cmd
, lv
)) {
1788 log_verbose("Not activating %s/%s since it does not pass "
1789 "activation filter.", lv
->vg
->name
, lv
->name
);
1801 static int _lv_activate(struct cmd_context
*cmd
, const char *lvid_s
,
1802 struct lv_activate_opts
*laopts
, int filter
)
1804 struct logical_volume
*lv
;
1811 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1814 if (filter
&& !_passes_activation_filter(cmd
, lv
)) {
1815 log_error("Not activating %s/%s since it does not pass "
1816 "activation filter.", lv
->vg
->name
, lv
->name
);
1820 if ((!lv
->vg
->cmd
->partial_activation
) && (lv
->status
& PARTIAL_LV
)) {
1821 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1826 if (lv_has_unknown_segments(lv
)) {
1827 log_error("Refusing activation of LV %s containing "
1828 "an unrecognised segment.", lv
->name
);
1833 _skip("Activating '%s'.", lv
->name
);
1839 laopts
->read_only
= _passes_readonly_filter(cmd
, lv
);
1841 log_debug("Activating %s/%s%s%s.", lv
->vg
->name
, lv
->name
,
1842 laopts
->exclusive
? " exclusively" : "",
1843 laopts
->read_only
? " read-only" : "");
1845 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
1851 if (info
.exists
&& !info
.suspended
&& info
.live_table
&&
1852 (info
.read_only
== read_only_lv(lv
, laopts
))) {
1857 if (!lv_read_replicator_vgs(lv
))
1860 lv_calculate_readahead(lv
, NULL
);
1862 critical_section_inc(cmd
, "activating");
1863 if (!(r
= _lv_activate_lv(lv
, laopts
)))
1865 critical_section_dec(cmd
, "activated");
1867 if (r
&& !monitor_dev_for_events(cmd
, lv
, laopts
, 1))
1872 lv_release_replicator_vgs(lv
);
1880 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1882 struct lv_activate_opts laopts
= { .exclusive
= exclusive
};
1884 if (!_lv_activate(cmd
, lvid_s
, &laopts
, 0))
1890 /* Activate LV only if it passes filter */
1891 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1893 struct lv_activate_opts laopts
= { .exclusive
= exclusive
};
1895 if (!_lv_activate(cmd
, lvid_s
, &laopts
, 1))
1901 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
1906 r
= dm_mknodes(NULL
);
1914 r
= dev_manager_mknodes(lv
);
1922 * Does PV use VG somewhere in its construction?
1923 * Returns 1 on failure.
1925 int pv_uses_vg(struct physical_volume
*pv
,
1926 struct volume_group
*vg
)
1928 if (!activation() || !pv
->dev
)
1931 if (!dm_is_dm_major(MAJOR(pv
->dev
->dev
)))
1934 return dev_manager_device_uses_vg(pv
->dev
, vg
);
1937 void activation_release(void)
1939 dev_manager_release();
1942 void activation_exit(void)