2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
31 #include "sharedlib.h"
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
39 int lvm1_present(struct cmd_context
*cmd
)
41 static char path
[PATH_MAX
];
43 if (dm_snprintf(path
, sizeof(path
), "%s/lvm/global", cmd
->proc_dir
)
45 log_error("LVM1 proc global snprintf failed");
49 if (path_exists(path
))
55 int list_segment_modules(struct dm_pool
*mem
, const struct lv_segment
*seg
,
56 struct dm_list
*modules
)
59 struct lv_segment
*seg2
, *snap_seg
;
62 if (seg
->segtype
->ops
->modules_needed
&&
63 !seg
->segtype
->ops
->modules_needed(mem
, seg
, modules
)) {
64 log_error("module string allocation failed");
68 if (lv_is_origin(seg
->lv
))
69 dm_list_iterate(snh
, &seg
->lv
->snapshot_segs
)
70 if (!list_lv_modules(mem
,
71 dm_list_struct_base(snh
,
77 if (lv_is_cow(seg
->lv
)) {
78 snap_seg
= find_cow(seg
->lv
);
79 if (snap_seg
->segtype
->ops
->modules_needed
&&
80 !snap_seg
->segtype
->ops
->modules_needed(mem
, snap_seg
,
82 log_error("snap_seg module string allocation failed");
87 for (s
= 0; s
< seg
->area_count
; s
++) {
88 switch (seg_type(seg
, s
)) {
90 seg2
= find_seg_by_le(seg_lv(seg
, s
), seg_le(seg
, s
));
91 if (seg2
&& !list_segment_modules(mem
, seg2
, modules
))
103 int list_lv_modules(struct dm_pool
*mem
, const struct logical_volume
*lv
,
104 struct dm_list
*modules
)
106 struct lv_segment
*seg
;
108 dm_list_iterate_items(seg
, &lv
->segments
)
109 if (!list_segment_modules(mem
, seg
, modules
))
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act
)
118 static int warned
= 0;
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
132 int library_version(char *version
, size_t size
)
136 int driver_version(char *version
, size_t size
)
140 int target_version(const char *target_name
, uint32_t *maj
,
141 uint32_t *min
, uint32_t *patchlevel
)
145 int target_present(struct cmd_context
*cmd
, const char *target_name
,
150 int lvm_dm_prefix_check(int major
, int minor
, const char *prefix
)
154 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, int use_layer
,
155 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
159 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
, int use_layer
,
160 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
164 int lv_check_not_in_use(struct cmd_context
*cmd
__attribute__((unused
)),
165 struct logical_volume
*lv
, struct lvinfo
*info
)
169 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
173 int lv_mirror_percent(struct cmd_context
*cmd
, const struct logical_volume
*lv
,
174 int wait
, percent_t
*percent
, uint32_t *event_nr
)
178 int lv_raid_percent(const struct logical_volume
*lv
, percent_t
*percent
)
182 int lv_thin_pool_percent(const struct logical_volume
*lv
, int metadata
,
187 int lv_thin_percent(const struct logical_volume
*lv
, int mapped
,
192 int lv_thin_pool_transaction_id(const struct logical_volume
*lv
,
193 uint64_t *transaction_id
)
197 int lvs_in_vg_activated(const struct volume_group
*vg
)
201 int lvs_in_vg_opened(const struct volume_group
*vg
)
206 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
211 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
, unsigned exclusive
)
215 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
219 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
220 unsigned origin_only
, unsigned exclusive
, unsigned revert
)
224 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
228 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
233 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
237 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
241 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
245 int pv_uses_vg(struct physical_volume
*pv
,
246 struct volume_group
*vg
)
250 void activation_release(void)
253 void activation_exit(void)
257 int lv_is_active(const struct logical_volume
*lv
)
261 int lv_is_active_but_not_locally(const struct logical_volume
*lv
)
265 int lv_is_active_exclusive(const struct logical_volume
*lv
)
269 int lv_is_active_exclusive_locally(const struct logical_volume
*lv
)
273 int lv_is_active_exclusive_remotely(const struct logical_volume
*lv
)
278 int lv_check_transient(struct logical_volume
*lv
)
282 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
283 const struct lv_activate_opts
*laopts
, int monitor
)
293 int add_areas_line(struct dev_manager
*dm
, struct lv_segment
*seg
,
294 struct dm_tree_node
*node
, uint32_t start_area
,
299 int device_is_usable(struct device
*dev
)
303 int lv_has_target_type(struct dm_pool
*mem
, struct logical_volume
*lv
,
304 const char *layer
, const char *target_type
)
308 #else /* DEVMAPPER_SUPPORT */
310 static int _activation
= 1;
312 void set_activation(int act
)
314 if (act
== _activation
)
319 log_verbose("Activation enabled. Device-mapper kernel "
320 "driver will be used.");
322 log_warn("WARNING: Activation disabled. No device-mapper "
323 "interaction will be attempted.");
331 static int _lv_passes_volumes_filter(struct cmd_context
*cmd
, struct logical_volume
*lv
,
332 const struct dm_config_node
*cn
, const char *config_path
)
334 const struct dm_config_value
*cv
;
336 static char path
[PATH_MAX
];
338 log_verbose("%s configuration setting defined: "
339 "Checking the list to match %s/%s",
340 config_path
, lv
->vg
->name
, lv
->name
);
342 for (cv
= cn
->v
; cv
; cv
= cv
->next
) {
343 if (cv
->type
!= DM_CFG_STRING
) {
344 log_error("Ignoring invalid string in config file %s",
350 log_error("Ignoring empty string in config file %s",
360 log_error("Ignoring empty tag in config file "
364 /* If any host tag matches any LV or VG tag, activate */
365 if (!strcmp(str
, "*")) {
366 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
)
367 || str_list_match_list(&cmd
->tags
,
368 &lv
->vg
->tags
, NULL
))
373 /* If supplied tag matches LV or VG tag, activate */
374 if (str_list_match_item(&lv
->tags
, str
) ||
375 str_list_match_item(&lv
->vg
->tags
, str
))
380 if (!strchr(str
, '/')) {
381 /* vgname supplied */
382 if (!strcmp(str
, lv
->vg
->name
))
388 if (dm_snprintf(path
, sizeof(path
), "%s/%s", lv
->vg
->name
,
390 log_error("dm_snprintf error from %s/%s", lv
->vg
->name
,
394 if (!strcmp(path
, str
))
398 log_verbose("No item supplied in %s configuration setting "
399 "matches %s/%s", config_path
, lv
->vg
->name
, lv
->name
);
404 static int _passes_activation_filter(struct cmd_context
*cmd
,
405 struct logical_volume
*lv
)
407 const struct dm_config_node
*cn
;
409 if (!(cn
= find_config_tree_node(cmd
, "activation/volume_list"))) {
410 log_verbose("activation/volume_list configuration setting "
411 "not defined: Checking only host tags for %s/%s",
412 lv
->vg
->name
, lv
->name
);
414 /* If no host tags defined, activate */
415 if (dm_list_empty(&cmd
->tags
))
418 /* If any host tag matches any LV or VG tag, activate */
419 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
) ||
420 str_list_match_list(&cmd
->tags
, &lv
->vg
->tags
, NULL
))
423 log_verbose("No host tag matches %s/%s",
424 lv
->vg
->name
, lv
->name
);
430 return _lv_passes_volumes_filter(cmd
, lv
, cn
, "activation/volume_list");
433 static int _passes_readonly_filter(struct cmd_context
*cmd
,
434 struct logical_volume
*lv
)
436 const struct dm_config_node
*cn
;
438 if (!(cn
= find_config_tree_node(cmd
, "activation/read_only_volume_list")))
441 return _lv_passes_volumes_filter(cmd
, lv
, cn
, "activation/read_only_volume_list");
445 int lv_passes_auto_activation_filter(struct cmd_context
*cmd
, struct logical_volume
*lv
)
447 const struct dm_config_node
*cn
;
449 if (!(cn
= find_config_tree_node(cmd
, "activation/auto_activation_volume_list"))) {
450 log_verbose("activation/auto_activation_volume_list configuration setting "
451 "not defined: All logical volumes will be auto-activated.");
455 return _lv_passes_volumes_filter(cmd
, lv
, cn
, "activation/auto_activation_volume_list");
458 int library_version(char *version
, size_t size
)
463 return dm_get_library_version(version
, size
);
466 int driver_version(char *version
, size_t size
)
471 log_very_verbose("Getting driver version");
473 return dm_driver_version(version
, size
);
476 int target_version(const char *target_name
, uint32_t *maj
,
477 uint32_t *min
, uint32_t *patchlevel
)
481 struct dm_versions
*target
, *last_target
;
483 log_very_verbose("Getting target version for %s", target_name
);
484 if (!(dmt
= dm_task_create(DM_DEVICE_LIST_VERSIONS
)))
487 if (activation_checks() && !dm_task_enable_checks(dmt
))
490 if (!dm_task_run(dmt
)) {
491 log_debug("Failed to get %s target version", target_name
);
492 /* Assume this was because LIST_VERSIONS isn't supported */
500 target
= dm_task_get_versions(dmt
);
503 last_target
= target
;
505 if (!strcmp(target_name
, target
->name
)) {
507 *maj
= target
->version
[0];
508 *min
= target
->version
[1];
509 *patchlevel
= target
->version
[2];
513 target
= (struct dm_versions
*)((char *) target
+ target
->next
);
514 } while (last_target
!= target
);
518 log_very_verbose("Found %s target "
519 "v%" PRIu32
".%" PRIu32
".%" PRIu32
".",
520 target_name
, *maj
, *min
, *patchlevel
);
522 dm_task_destroy(dmt
);
527 int lvm_dm_prefix_check(int major
, int minor
, const char *prefix
)
533 if (!(dmt
= dm_task_create(DM_DEVICE_STATUS
)))
536 if (!dm_task_set_minor(dmt
, minor
) ||
537 !dm_task_set_major(dmt
, major
) ||
539 !(uuid
= dm_task_get_uuid(dmt
))) {
540 dm_task_destroy(dmt
);
544 r
= strncasecmp(uuid
, prefix
, strlen(prefix
));
545 dm_task_destroy(dmt
);
550 int module_present(struct cmd_context
*cmd
, const char *target_name
)
557 if (dm_snprintf(module
, sizeof(module
), "dm-%s", target_name
) < 0) {
558 log_error("module_present module name too long: %s",
563 argv
[0] = MODPROBE_CMD
;
567 ret
= exec_cmd(cmd
, argv
, NULL
, 0);
572 int target_present(struct cmd_context
*cmd
, const char *target_name
,
575 uint32_t maj
, min
, patchlevel
;
582 if (target_version(target_name
, &maj
, &min
, &patchlevel
))
585 if (!module_present(cmd
, target_name
))
590 return target_version(target_name
, &maj
, &min
, &patchlevel
);
594 * Returns 1 if info structure populated, else 0 on failure.
596 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, int use_layer
,
597 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
599 struct dm_info dminfo
;
605 * If open_count info is requested and we have to be sure our own udev
606 * transactions are finished
607 * For non-clustered locking type we are only interested for non-delete operation
608 * in progress - as only those could lead to opened files
610 if (with_open_count
) {
611 if (locking_is_clustered())
612 sync_local_dev_names(cmd
); /* Wait to have udev in sync */
613 else if (fs_has_non_delete_ops())
614 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
617 if (use_layer
&& lv_is_thin_pool(lv
))
619 else if (use_layer
&& lv_is_origin(lv
))
624 if (!dev_manager_info(lv
->vg
->cmd
->mem
, lv
, layer
, with_open_count
,
625 with_read_ahead
, &dminfo
, &info
->read_ahead
))
628 info
->exists
= dminfo
.exists
;
629 info
->suspended
= dminfo
.suspended
;
630 info
->open_count
= dminfo
.open_count
;
631 info
->major
= dminfo
.major
;
632 info
->minor
= dminfo
.minor
;
633 info
->read_only
= dminfo
.read_only
;
634 info
->live_table
= dminfo
.live_table
;
635 info
->inactive_table
= dminfo
.inactive_table
;
640 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
, int use_layer
,
641 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
644 struct logical_volume
*lv
;
646 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
649 r
= lv_info(cmd
, lv
, use_layer
, info
, with_open_count
, with_read_ahead
);
655 int lv_check_not_in_use(struct cmd_context
*cmd
__attribute__((unused
)),
656 struct logical_volume
*lv
, struct lvinfo
*info
)
661 /* If sysfs is not used, use open_count information only. */
662 if (!*dm_sysfs_dir()) {
663 if (info
->open_count
) {
664 log_error("Logical volume %s/%s in use.",
665 lv
->vg
->name
, lv
->name
);
672 if (dm_device_has_holders(info
->major
, info
->minor
)) {
673 log_error("Logical volume %s/%s is used by another device.",
674 lv
->vg
->name
, lv
->name
);
678 if (dm_device_has_mounted_fs(info
->major
, info
->minor
)) {
679 log_error("Logical volume %s/%s contains a filesystem in use.",
680 lv
->vg
->name
, lv
->name
);
688 * Returns 1 if percent set, else 0 on failure.
690 int lv_check_transient(struct logical_volume
*lv
)
693 struct dev_manager
*dm
;
698 log_debug("Checking transient status for LV %s/%s", lv
->vg
->name
, lv
->name
);
700 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
703 if (!(r
= dev_manager_transient(dm
, lv
)))
706 dev_manager_destroy(dm
);
712 * Returns 1 if percent set, else 0 on failure.
714 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
717 struct dev_manager
*dm
;
722 log_debug("Checking snapshot percent for LV %s/%s", lv
->vg
->name
, lv
->name
);
724 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
727 if (!(r
= dev_manager_snapshot_percent(dm
, lv
, percent
)))
730 dev_manager_destroy(dm
);
735 /* FIXME Merge with snapshot_percent */
736 int lv_mirror_percent(struct cmd_context
*cmd
, const struct logical_volume
*lv
,
737 int wait
, percent_t
*percent
, uint32_t *event_nr
)
740 struct dev_manager
*dm
;
743 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
744 * it should be considered in-sync. */
745 if (dm_list_size(&lv
->segments
) == 1 && first_seg(lv
)->area_count
== 1) {
746 *percent
= PERCENT_100
;
753 log_debug("Checking mirror percent for LV %s/%s", lv
->vg
->name
, lv
->name
);
755 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
761 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
764 if (!(r
= dev_manager_mirror_percent(dm
, lv
, wait
, percent
, event_nr
)))
767 dev_manager_destroy(dm
);
772 int lv_raid_percent(const struct logical_volume
*lv
, percent_t
*percent
)
774 return lv_mirror_percent(lv
->vg
->cmd
, lv
, 0, percent
, NULL
);
778 * Returns data or metadata percent usage, depends on metadata 0/1.
779 * Returns 1 if percent set, else 0 on failure.
781 int lv_thin_pool_percent(const struct logical_volume
*lv
, int metadata
,
785 struct dev_manager
*dm
;
790 log_debug("Checking thin %sdata percent for LV %s/%s",
791 (metadata
) ? "meta" : "", lv
->vg
->name
, lv
->name
);
793 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
796 if (!(r
= dev_manager_thin_pool_percent(dm
, lv
, metadata
, percent
)))
799 dev_manager_destroy(dm
);
805 * Returns 1 if percent set, else 0 on failure.
807 int lv_thin_percent(const struct logical_volume
*lv
,
808 int mapped
, percent_t
*percent
)
811 struct dev_manager
*dm
;
816 log_debug("Checking thin percent for LV %s/%s",
817 lv
->vg
->name
, lv
->name
);
819 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
822 if (!(r
= dev_manager_thin_percent(dm
, lv
, mapped
, percent
)))
825 dev_manager_destroy(dm
);
831 * Returns 1 if transaction_id set, else 0 on failure.
833 int lv_thin_pool_transaction_id(const struct logical_volume
*lv
,
834 uint64_t *transaction_id
)
837 struct dev_manager
*dm
;
838 struct dm_status_thin_pool
*status
;
843 log_debug("Checking thin percent for LV %s/%s",
844 lv
->vg
->name
, lv
->name
);
846 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
849 if (!(r
= dev_manager_thin_pool_status(dm
, lv
, &status
)))
852 *transaction_id
= status
->transaction_id
;
854 dev_manager_destroy(dm
);
859 static int _lv_active(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
863 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0)) {
871 static int _lv_open_count(struct cmd_context
*cmd
, struct logical_volume
*lv
)
875 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0)) {
880 return info
.open_count
;
883 static int _lv_activate_lv(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
)
886 struct dev_manager
*dm
;
888 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
891 if (!(r
= dev_manager_activate(dm
, lv
, laopts
)))
894 dev_manager_destroy(dm
);
898 static int _lv_preload(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
,
902 struct dev_manager
*dm
;
903 int old_readonly
= laopts
->read_only
;
905 laopts
->read_only
= _passes_readonly_filter(lv
->vg
->cmd
, lv
);
907 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
910 if (!(r
= dev_manager_preload(dm
, lv
, laopts
, flush_required
)))
913 dev_manager_destroy(dm
);
915 laopts
->read_only
= old_readonly
;
920 static int _lv_deactivate(struct logical_volume
*lv
)
923 struct dev_manager
*dm
;
925 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
928 if (!(r
= dev_manager_deactivate(dm
, lv
)))
931 dev_manager_destroy(dm
);
935 static int _lv_suspend_lv(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
,
936 int lockfs
, int flush_required
)
939 struct dev_manager
*dm
;
941 laopts
->read_only
= _passes_readonly_filter(lv
->vg
->cmd
, lv
);
944 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
945 * device directly, we don't want to touch the devices that use it.
947 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
950 if (!(r
= dev_manager_suspend(dm
, lv
, laopts
, lockfs
, flush_required
)))
953 dev_manager_destroy(dm
);
958 * These two functions return the number of visible LVs in the state,
959 * or -1 on error. FIXME Check this.
961 int lvs_in_vg_activated(const struct volume_group
*vg
)
969 dm_list_iterate_items(lvl
, &vg
->lvs
)
970 if (lv_is_visible(lvl
->lv
))
971 count
+= (_lv_active(vg
->cmd
, lvl
->lv
) == 1);
973 log_debug("Counted %d active LVs in VG %s", count
, vg
->name
);
978 int lvs_in_vg_opened(const struct volume_group
*vg
)
980 const struct lv_list
*lvl
;
986 dm_list_iterate_items(lvl
, &vg
->lvs
)
987 if (lv_is_visible(lvl
->lv
))
988 count
+= (_lv_open_count(vg
->cmd
, lvl
->lv
) > 0);
990 log_debug("Counted %d open LVs in VG %s", count
, vg
->name
);
997 * @lv: logical volume being queried
998 * @locally: set if active locally (when provided)
999 * @exclusive: set if active exclusively (when provided)
1001 * Determine whether an LV is active locally or in a cluster.
1002 * In addition to the return code which indicates whether or
1003 * not the LV is active somewhere, two other values are set
1004 * to yield more information about the status of the activation:
1005 * return locally exclusively status
1006 * ====== ======= =========== ======
1008 * 1 0 0 active remotely
1009 * 1 0 1 exclusive remotely
1010 * 1 1 0 active locally and possibly remotely
1011 * 1 1 1 exclusive locally (or local && !cluster)
1012 * The VG lock must be held to call this function.
1016 static int _lv_is_active(const struct logical_volume
*lv
,
1017 int *locally
, int *exclusive
)
1019 int r
, l
, e
; /* remote, local, and exclusive */
1023 if (_lv_active(lv
->vg
->cmd
, lv
))
1026 if (!vg_is_clustered(lv
->vg
)) {
1028 e
= 1; /* exclusive by definition */
1032 /* Active locally, and the caller doesn't care about exclusive */
1033 if (l
&& !exclusive
)
1036 if ((r
= remote_lock_held(lv
->lvid
.s
, &e
)) >= 0)
1040 * If lock query is not supported (due to interfacing with old
1041 * code), then we cannot evaluate exclusivity properly.
1043 * Old users of this function will never be affected by this,
1044 * since they are only concerned about active vs. not active.
1045 * New users of this function who specifically ask for 'exclusive'
1046 * will be given an error message.
1048 log_error("Unable to determine exclusivity of %s", lv
->name
);
1053 * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
1054 * but it's unreliable.
1063 log_very_verbose("%s/%s is %sactive%s%s",
1064 lv
->vg
->name
, lv
->name
,
1065 (r
|| l
) ? "" : "not ",
1066 (exclusive
&& e
) ? " exclusive" : "",
1067 e
? (l
? " locally" : " remotely") : "");
1072 int lv_is_active(const struct logical_volume
*lv
)
1074 return _lv_is_active(lv
, NULL
, NULL
);
1077 int lv_is_active_but_not_locally(const struct logical_volume
*lv
)
1080 return _lv_is_active(lv
, &l
, NULL
) && !l
;
1083 int lv_is_active_exclusive(const struct logical_volume
*lv
)
1087 return _lv_is_active(lv
, NULL
, &e
) && e
;
1090 int lv_is_active_exclusive_locally(const struct logical_volume
*lv
)
1094 return _lv_is_active(lv
, &l
, &e
) && l
&& e
;
1097 int lv_is_active_exclusive_remotely(const struct logical_volume
*lv
)
1101 return _lv_is_active(lv
, &l
, &e
) && !l
&& e
;
1105 static struct dm_event_handler
*_create_dm_event_handler(struct cmd_context
*cmd
, const char *dmuuid
, const char *dso
,
1106 const int timeout
, enum dm_event_mask mask
)
1108 struct dm_event_handler
*dmevh
;
1110 if (!(dmevh
= dm_event_handler_create()))
1113 if (dm_event_handler_set_dmeventd_path(dmevh
, find_config_tree_str(cmd
, "dmeventd/executable", NULL
)))
1116 if (dm_event_handler_set_dso(dmevh
, dso
))
1119 if (dm_event_handler_set_uuid(dmevh
, dmuuid
))
1122 dm_event_handler_set_timeout(dmevh
, timeout
);
1123 dm_event_handler_set_event_mask(dmevh
, mask
);
1128 dm_event_handler_destroy(dmevh
);
1132 char *get_monitor_dso_path(struct cmd_context
*cmd
, const char *libpath
)
1136 if (!(path
= dm_pool_alloc(cmd
->mem
, PATH_MAX
))) {
1137 log_error("Failed to allocate dmeventd library path.");
1141 get_shared_library_path(cmd
, libpath
, path
, PATH_MAX
);
1146 static char *_build_target_uuid(struct cmd_context
*cmd
, struct logical_volume
*lv
)
1150 if (lv_is_thin_pool(lv
))
1151 layer
= "tpool"; /* Monitor "tpool" for the "thin pool". */
1152 else if (lv_is_origin(lv
))
1153 layer
= "real"; /* Monitor "real" for "snapshot-origin". */
1157 return build_dm_uuid(cmd
->mem
, lv
->lvid
.s
, layer
);
1160 int target_registered_with_dmeventd(struct cmd_context
*cmd
, const char *dso
,
1161 struct logical_volume
*lv
, int *pending
)
1164 enum dm_event_mask evmask
= 0;
1165 struct dm_event_handler
*dmevh
;
1171 if (!(uuid
= _build_target_uuid(cmd
, lv
)))
1174 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, 0, DM_EVENT_ALL_ERRORS
)))
1177 if (dm_event_get_registered_device(dmevh
, 0)) {
1178 dm_event_handler_destroy(dmevh
);
1182 evmask
= dm_event_handler_get_event_mask(dmevh
);
1183 if (evmask
& DM_EVENT_REGISTRATION_PENDING
) {
1185 evmask
&= ~DM_EVENT_REGISTRATION_PENDING
;
1188 dm_event_handler_destroy(dmevh
);
1193 int target_register_events(struct cmd_context
*cmd
, const char *dso
, struct logical_volume
*lv
,
1194 int evmask
__attribute__((unused
)), int set
, int timeout
)
1197 struct dm_event_handler
*dmevh
;
1203 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
1204 if (!(uuid
= _build_target_uuid(cmd
, lv
)))
1207 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, timeout
,
1208 DM_EVENT_ALL_ERRORS
| (timeout
? DM_EVENT_TIMEOUT
: 0))))
1211 r
= set
? dm_event_register_handler(dmevh
) : dm_event_unregister_handler(dmevh
);
1213 dm_event_handler_destroy(dmevh
);
1218 log_info("%s %s for events", set
? "Monitored" : "Unmonitored", uuid
);
1226 * Returns 0 if an attempt to (un)monitor the device failed.
1227 * Returns 1 otherwise.
1229 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
1230 const struct lv_activate_opts
*laopts
, int monitor
)
1233 int i
, pending
= 0, monitored
;
1235 struct dm_list
*tmp
, *snh
, *snht
;
1236 struct lv_segment
*seg
;
1237 struct lv_segment
*log_seg
;
1238 int (*monitor_fn
) (struct lv_segment
*s
, int e
);
1240 static const struct lv_activate_opts zlaopts
= { 0 };
1241 static const struct lv_activate_opts thinopts
= { .skip_in_use
= 1 };
1247 /* skip dmeventd code altogether */
1248 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE
)
1252 * Nothing to do if dmeventd configured not to be used.
1254 if (monitor
&& !dmeventd_monitor_mode())
1258 * Allow to unmonitor thin pool via explicit pool unmonitor
1259 * or unmonitor before the last thin pool user deactivation
1260 * Skip unmonitor, if invoked via unmonitor of thin volume
1261 * and there is another thin pool user (open_count > 1)
1263 if (laopts
->skip_in_use
&& lv_info(lv
->vg
->cmd
, lv
, 1, &info
, 1, 0) &&
1264 (info
.open_count
!= 1)) {
1265 log_debug("Skipping unmonitor of opened %s (open:%d)",
1266 lv
->name
, info
.open_count
);
1271 * In case of a snapshot device, we monitor lv->snapshot->lv,
1272 * not the actual LV itself.
1274 if (lv_is_cow(lv
) && (laopts
->no_merging
|| !lv_is_merging_cow(lv
)))
1275 return monitor_dev_for_events(cmd
, lv
->snapshot
->lv
, NULL
, monitor
);
1278 * In case this LV is a snapshot origin, we instead monitor
1279 * each of its respective snapshots. The origin itself may
1280 * also need to be monitored if it is a mirror, for example.
1282 if (!laopts
->origin_only
&& lv_is_origin(lv
))
1283 dm_list_iterate_safe(snh
, snht
, &lv
->snapshot_segs
)
1284 if (!monitor_dev_for_events(cmd
, dm_list_struct_base(snh
,
1285 struct lv_segment
, origin_list
)->cow
, NULL
, monitor
))
1289 * If the volume is mirrored and its log is also mirrored, monitor
1290 * the log volume as well.
1292 if ((seg
= first_seg(lv
)) != NULL
&& seg
->log_lv
!= NULL
&&
1293 (log_seg
= first_seg(seg
->log_lv
)) != NULL
&&
1294 seg_is_mirrored(log_seg
))
1295 if (!monitor_dev_for_events(cmd
, seg
->log_lv
, NULL
, monitor
))
1298 dm_list_iterate(tmp
, &lv
->segments
) {
1299 seg
= dm_list_item(tmp
, struct lv_segment
);
1301 /* Recurse for AREA_LV */
1302 for (s
= 0; s
< seg
->area_count
; s
++) {
1303 if (seg_type(seg
, s
) != AREA_LV
)
1305 if (!monitor_dev_for_events(cmd
, seg_lv(seg
, s
), NULL
,
1307 log_error("Failed to %smonitor %s",
1308 monitor
? "" : "un",
1309 seg_lv(seg
, s
)->name
);
1315 * If requested unmonitoring of thin volume, request test
1316 * if there is no other thin pool user
1318 * FIXME: code here looks like _lv_postorder()
1321 !monitor_dev_for_events(cmd
, seg
->pool_lv
,
1322 (!monitor
) ? &thinopts
: NULL
, monitor
))
1325 if (seg
->metadata_lv
&&
1326 !monitor_dev_for_events(cmd
, seg
->metadata_lv
, NULL
, monitor
))
1329 if (!seg_monitored(seg
) || (seg
->status
& PVMOVE
))
1334 /* Check monitoring status */
1335 if (seg
->segtype
->ops
->target_monitored
)
1336 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1338 continue; /* segtype doesn't support registration */
1341 * FIXME: We should really try again if pending
1343 monitored
= (pending
) ? 0 : monitored
;
1347 log_verbose("%s/%s already monitored.", lv
->vg
->name
, lv
->name
);
1348 else if (seg
->segtype
->ops
->target_monitor_events
)
1349 monitor_fn
= seg
->segtype
->ops
->target_monitor_events
;
1352 log_verbose("%s/%s already not monitored.", lv
->vg
->name
, lv
->name
);
1353 else if (seg
->segtype
->ops
->target_unmonitor_events
)
1354 monitor_fn
= seg
->segtype
->ops
->target_unmonitor_events
;
1357 /* Do [un]monitor */
1361 log_verbose("%sonitoring %s/%s%s", monitor
? "M" : "Not m", lv
->vg
->name
, lv
->name
,
1362 test_mode() ? " [Test mode: skipping this]" : "");
1364 /* FIXME Test mode should really continue a bit further. */
1368 /* FIXME specify events */
1369 if (!monitor_fn(seg
, 0)) {
1370 log_error("%s/%s: %s segment monitoring function failed.",
1371 lv
->vg
->name
, lv
->name
, seg
->segtype
->name
);
1375 /* Check [un]monitor results */
1376 /* Try a couple times if pending, but not forever... */
1377 for (i
= 0; i
< 10; i
++) {
1379 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1381 (!monitored
&& monitor
) ||
1382 (monitored
&& !monitor
))
1383 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1384 lv
->vg
->name
, lv
->name
, monitor
? "" : "un");
1391 r
= (monitored
&& monitor
) || (!monitored
&& !monitor
);
1400 struct detached_lv_data
{
1401 struct logical_volume
*lv_pre
;
1402 struct lv_activate_opts
*laopts
;
1403 int *flush_required
;
1406 static int _preload_detached_lv(struct cmd_context
*cmd
, struct logical_volume
*lv
, void *data
)
1408 struct detached_lv_data
*detached
= data
;
1409 struct lv_list
*lvl_pre
;
1411 if ((lvl_pre
= find_lv_in_vg(detached
->lv_pre
->vg
, lv
->name
))) {
1412 if (lv_is_visible(lvl_pre
->lv
) && lv_is_active(lv
) && (!lv_is_cow(lv
) || !lv_is_cow(lvl_pre
->lv
)) &&
1413 !_lv_preload(lvl_pre
->lv
, detached
->laopts
, detached
->flush_required
))
1420 static int _lv_suspend(struct cmd_context
*cmd
, const char *lvid_s
,
1421 struct lv_activate_opts
*laopts
, int error_if_not_suspended
)
1423 struct logical_volume
*lv
= NULL
, *lv_pre
= NULL
, *pvmove_lv
= NULL
;
1424 struct lv_list
*lvl_pre
;
1425 struct seg_list
*sl
;
1426 struct lv_segment
*snap_seg
;
1428 int r
= 0, lockfs
= 0, flush_required
= 0;
1429 struct detached_lv_data detached
;
1434 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1437 /* Use precommitted metadata if present */
1438 if (!(lv_pre
= lv_from_lvid(cmd
, lvid_s
, 1)))
1441 /* Ignore origin_only unless LV is origin in both old and new metadata */
1442 if (!lv_is_thin_volume(lv
) && !(lv_is_origin(lv
) && lv_is_origin(lv_pre
)))
1443 laopts
->origin_only
= 0;
1446 _skip("Suspending %s%s.", lv
->name
, laopts
->origin_only
? " origin without snapshots" : "");
1451 if (!lv_info(cmd
, lv
, laopts
->origin_only
, &info
, 0, 0))
1454 if (!info
.exists
|| info
.suspended
) {
1455 if (!error_if_not_suspended
) {
1458 critical_section_inc(cmd
, "already suspended");
1463 if (!lv_read_replicator_vgs(lv
))
1466 lv_calculate_readahead(lv
, NULL
);
1469 * Preload devices for the LV.
1470 * If the PVMOVE LV is being removed, it's only present in the old
1471 * metadata and not the new, so we must explicitly add the new
1472 * tables for all the changed LVs here, as the relationships
1473 * are not found by walking the new metadata.
1475 if (!(lv_pre
->status
& LOCKED
) &&
1476 (lv
->status
& LOCKED
) &&
1477 (pvmove_lv
= find_pvmove_lv_in_lv(lv
))) {
1478 /* Preload all the LVs above the PVMOVE LV */
1479 dm_list_iterate_items(sl
, &pvmove_lv
->segs_using_this_lv
) {
1480 if (!(lvl_pre
= find_lv_in_vg(lv_pre
->vg
, sl
->seg
->lv
->name
))) {
1481 log_error(INTERNAL_ERROR
"LV %s missing from preload metadata", sl
->seg
->lv
->name
);
1484 if (!_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1487 /* Now preload the PVMOVE LV itself */
1488 if (!(lvl_pre
= find_lv_in_vg(lv_pre
->vg
, pvmove_lv
->name
))) {
1489 log_error(INTERNAL_ERROR
"LV %s missing from preload metadata", pvmove_lv
->name
);
1492 if (!_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1495 if (!_lv_preload(lv_pre
, laopts
, &flush_required
))
1496 /* FIXME Revert preloading */
1500 * Search for existing LVs that have become detached and preload them.
1502 detached
.lv_pre
= lv_pre
;
1503 detached
.laopts
= laopts
;
1504 detached
.flush_required
= &flush_required
;
1506 if (!for_each_sub_lv(cmd
, lv
, &_preload_detached_lv
, &detached
))
1510 * Preload any snapshots that are being removed.
1512 if (!laopts
->origin_only
&& lv_is_origin(lv
)) {
1513 dm_list_iterate_items_gen(snap_seg
, &lv
->snapshot_segs
, origin_list
) {
1514 if (!(lvl_pre
= find_lv_in_vg_by_lvid(lv_pre
->vg
, &snap_seg
->cow
->lvid
))) {
1515 log_error(INTERNAL_ERROR
"LV %s (%s) missing from preload metadata",
1516 snap_seg
->cow
->name
, snap_seg
->cow
->lvid
.id
[1].uuid
);
1519 if (!lv_is_cow(lvl_pre
->lv
) &&
1520 !_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1526 if (!monitor_dev_for_events(cmd
, lv
, laopts
, 0))
1527 /* FIXME Consider aborting here */
1530 critical_section_inc(cmd
, "suspending");
1532 critical_section_inc(cmd
, "suspending pvmove LV");
1534 if (!laopts
->origin_only
&&
1535 (lv_is_origin(lv_pre
) || lv_is_cow(lv_pre
)))
1538 if (laopts
->origin_only
&& lv_is_thin_volume(lv
) && lv_is_thin_volume(lv_pre
))
1542 * Suspending an LV directly above a PVMOVE LV also
1543 * suspends other LVs using that same PVMOVE LV.
1544 * FIXME Remove this and delay the 'clear node' until
1545 * after the code knows whether there's a different
1546 * inactive table to load or not instead so lv_suspend
1547 * can be called separately for each LV safely.
1549 if ((lv_pre
->vg
->status
& PRECOMMITTED
) &&
1550 (lv_pre
->status
& LOCKED
) && find_pvmove_lv_in_lv(lv_pre
)) {
1551 if (!_lv_suspend_lv(lv_pre
, laopts
, lockfs
, flush_required
)) {
1552 critical_section_dec(cmd
, "failed precommitted suspend");
1554 critical_section_dec(cmd
, "failed precommitted suspend (pvmove)");
1558 /* Normal suspend */
1559 if (!_lv_suspend_lv(lv
, laopts
, lockfs
, flush_required
)) {
1560 critical_section_dec(cmd
, "failed suspend");
1562 critical_section_dec(cmd
, "failed suspend (pvmove)");
1570 release_vg(lv_pre
->vg
);
1572 lv_release_replicator_vgs(lv
);
1580 * In a cluster, set exclusive to indicate that only one node is using the
1581 * device. Any preloaded tables may then use non-clustered targets.
1583 * Returns success if the device is not active
1585 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
, unsigned exclusive
)
1587 struct lv_activate_opts laopts
= {
1588 .origin_only
= origin_only
,
1589 .exclusive
= exclusive
1592 return _lv_suspend(cmd
, lvid_s
, &laopts
, 0);
1595 /* No longer used */
1597 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1599 return _lv_suspend(cmd, lvid_s, 1);
1603 static int _lv_resume(struct cmd_context
*cmd
, const char *lvid_s
,
1604 struct lv_activate_opts
*laopts
, int error_if_not_active
)
1606 struct logical_volume
*lv
;
1609 int messages_only
= 0;
1614 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1617 if (lv_is_thin_pool(lv
) && laopts
->origin_only
)
1620 if (!lv_is_origin(lv
) && !lv_is_thin_volume(lv
))
1621 laopts
->origin_only
= 0;
1624 _skip("Resuming %s%s%s.", lv
->name
, laopts
->origin_only
? " without snapshots" : "",
1625 laopts
->revert
? " (reverting)" : "");
1630 log_debug("Resuming LV %s/%s%s%s%s.", lv
->vg
->name
, lv
->name
,
1631 error_if_not_active
? "" : " if active",
1632 laopts
->origin_only
? " without snapshots" : "",
1633 laopts
->revert
? " (reverting)" : "");
1635 if (!lv_info(cmd
, lv
, laopts
->origin_only
, &info
, 0, 0))
1638 if (!info
.exists
|| !(info
.suspended
|| messages_only
)) {
1639 if (error_if_not_active
)
1642 if (!info
.suspended
)
1643 critical_section_dec(cmd
, "already resumed");
1647 laopts
->read_only
= _passes_readonly_filter(cmd
, lv
);
1649 if (!_lv_activate_lv(lv
, laopts
))
1652 critical_section_dec(cmd
, "resumed");
1654 if (!monitor_dev_for_events(cmd
, lv
, laopts
, 1))
1666 * In a cluster, set exclusive to indicate that only one node is using the
1667 * device. Any tables loaded may then use non-clustered targets.
1670 * @exclusive This parameter only has an affect in cluster-context.
1671 * It forces local target type to be used (instead of
1672 * cluster-aware type).
1673 * Returns success if the device is not active
1675 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
1676 unsigned origin_only
, unsigned exclusive
,
1679 struct lv_activate_opts laopts
= {
1680 .origin_only
= origin_only
,
1681 .exclusive
= exclusive
,
1685 return _lv_resume(cmd
, lvid_s
, &laopts
, 0);
1688 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
1690 struct lv_activate_opts laopts
= { .origin_only
= origin_only
, };
1692 return _lv_resume(cmd
, lvid_s
, &laopts
, 1);
1695 static int _lv_has_open_snapshots(struct logical_volume
*lv
)
1697 struct lv_segment
*snap_seg
;
1701 dm_list_iterate_items_gen(snap_seg
, &lv
->snapshot_segs
, origin_list
) {
1702 if (!lv_info(lv
->vg
->cmd
, snap_seg
->cow
, 0, &info
, 1, 0)) {
1707 if (info
.exists
&& info
.open_count
) {
1708 log_error("LV %s/%s has open snapshot %s: "
1709 "not deactivating", lv
->vg
->name
, lv
->name
,
1710 snap_seg
->cow
->name
);
1718 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
1720 struct logical_volume
*lv
;
1727 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1731 _skip("Deactivating '%s'.", lv
->name
);
1736 log_debug("Deactivating %s/%s.", lv
->vg
->name
, lv
->name
);
1738 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0))
1746 if (lv_is_visible(lv
)) {
1747 if (!lv_check_not_in_use(cmd
, lv
, &info
))
1750 if (lv_is_origin(lv
) && _lv_has_open_snapshots(lv
))
1754 if (!lv_read_replicator_vgs(lv
))
1757 lv_calculate_readahead(lv
, NULL
);
1759 if (!monitor_dev_for_events(cmd
, lv
, NULL
, 0))
1762 critical_section_inc(cmd
, "deactivating");
1763 r
= _lv_deactivate(lv
);
1764 critical_section_dec(cmd
, "deactivated");
1766 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0) || info
.exists
)
1770 lv_release_replicator_vgs(lv
);
1777 /* Test if LV passes filter */
1778 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
1781 struct logical_volume
*lv
;
1784 if (!activation()) {
1789 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1792 if (!_passes_activation_filter(cmd
, lv
)) {
1793 log_verbose("Not activating %s/%s since it does not pass "
1794 "activation filter.", lv
->vg
->name
, lv
->name
);
1806 static int _lv_activate(struct cmd_context
*cmd
, const char *lvid_s
,
1807 struct lv_activate_opts
*laopts
, int filter
)
1809 struct logical_volume
*lv
;
1816 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1819 if (filter
&& !_passes_activation_filter(cmd
, lv
)) {
1820 log_error("Not activating %s/%s since it does not pass "
1821 "activation filter.", lv
->vg
->name
, lv
->name
);
1825 if ((!lv
->vg
->cmd
->partial_activation
) && (lv
->status
& PARTIAL_LV
)) {
1826 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1831 if (lv_has_unknown_segments(lv
)) {
1832 log_error("Refusing activation of LV %s containing "
1833 "an unrecognised segment.", lv
->name
);
1838 _skip("Activating '%s'.", lv
->name
);
1844 laopts
->read_only
= _passes_readonly_filter(cmd
, lv
);
1846 log_debug("Activating %s/%s%s%s.", lv
->vg
->name
, lv
->name
,
1847 laopts
->exclusive
? " exclusively" : "",
1848 laopts
->read_only
? " read-only" : "");
1850 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
1856 if (info
.exists
&& !info
.suspended
&& info
.live_table
&&
1857 (info
.read_only
== read_only_lv(lv
, laopts
))) {
1862 if (!lv_read_replicator_vgs(lv
))
1865 lv_calculate_readahead(lv
, NULL
);
1867 critical_section_inc(cmd
, "activating");
1868 if (!(r
= _lv_activate_lv(lv
, laopts
)))
1870 critical_section_dec(cmd
, "activated");
1872 if (r
&& !monitor_dev_for_events(cmd
, lv
, laopts
, 1))
1877 lv_release_replicator_vgs(lv
);
1885 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1887 struct lv_activate_opts laopts
= { .exclusive
= exclusive
};
1889 if (!_lv_activate(cmd
, lvid_s
, &laopts
, 0))
1895 /* Activate LV only if it passes filter */
1896 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1898 struct lv_activate_opts laopts
= { .exclusive
= exclusive
};
1900 if (!_lv_activate(cmd
, lvid_s
, &laopts
, 1))
1906 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
1911 r
= dm_mknodes(NULL
);
1919 r
= dev_manager_mknodes(lv
);
1927 * Does PV use VG somewhere in its construction?
1928 * Returns 1 on failure.
1930 int pv_uses_vg(struct physical_volume
*pv
,
1931 struct volume_group
*vg
)
1933 if (!activation() || !pv
->dev
)
1936 if (!dm_is_dm_major(MAJOR(pv
->dev
->dev
)))
1939 return dev_manager_device_uses_vg(pv
->dev
, vg
);
1942 void activation_release(void)
1944 dev_manager_release();
1947 void activation_exit(void)