2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
31 #include "sharedlib.h"
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
39 int lvm1_present(struct cmd_context
*cmd
)
43 if (dm_snprintf(path
, sizeof(path
), "%s/lvm/global", cmd
->proc_dir
)
45 log_error("LVM1 proc global snprintf failed");
49 if (path_exists(path
))
55 int list_segment_modules(struct dm_pool
*mem
, const struct lv_segment
*seg
,
56 struct dm_list
*modules
)
59 struct lv_segment
*seg2
, *snap_seg
;
62 if (seg
->segtype
->ops
->modules_needed
&&
63 !seg
->segtype
->ops
->modules_needed(mem
, seg
, modules
)) {
64 log_error("module string allocation failed");
68 if (lv_is_origin(seg
->lv
))
69 dm_list_iterate(snh
, &seg
->lv
->snapshot_segs
)
70 if (!list_lv_modules(mem
,
71 dm_list_struct_base(snh
,
77 if (lv_is_cow(seg
->lv
)) {
78 snap_seg
= find_cow(seg
->lv
);
79 if (snap_seg
->segtype
->ops
->modules_needed
&&
80 !snap_seg
->segtype
->ops
->modules_needed(mem
, snap_seg
,
82 log_error("snap_seg module string allocation failed");
87 for (s
= 0; s
< seg
->area_count
; s
++) {
88 switch (seg_type(seg
, s
)) {
90 seg2
= find_seg_by_le(seg_lv(seg
, s
), seg_le(seg
, s
));
91 if (seg2
&& !list_segment_modules(mem
, seg2
, modules
))
103 int list_lv_modules(struct dm_pool
*mem
, const struct logical_volume
*lv
,
104 struct dm_list
*modules
)
106 struct lv_segment
*seg
;
108 dm_list_iterate_items(seg
, &lv
->segments
)
109 if (!list_segment_modules(mem
, seg
, modules
))
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act
)
118 static int warned
= 0;
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
132 int library_version(char *version
, size_t size
)
136 int driver_version(char *version
, size_t size
)
140 int target_version(const char *target_name
, uint32_t *maj
,
141 uint32_t *min
, uint32_t *patchlevel
)
145 int target_present(struct cmd_context
*cmd
, const char *target_name
,
150 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, unsigned origin_only
,
151 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
155 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
,
156 unsigned origin_only
,
157 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
161 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
165 int lv_mirror_percent(struct cmd_context
*cmd
, struct logical_volume
*lv
,
166 int wait
, percent_t
*percent
, uint32_t *event_nr
)
170 int lvs_in_vg_activated(struct volume_group
*vg
)
174 int lvs_in_vg_opened(struct volume_group
*vg
)
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
184 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
)
188 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
)
192 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
)
196 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
200 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
205 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
209 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
214 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
219 int pv_uses_vg(struct physical_volume
*pv
,
220 struct volume_group
*vg
)
225 void activation_release(void)
230 void activation_exit(void)
235 #else /* DEVMAPPER_SUPPORT */
237 static int _activation
= 1;
239 void set_activation(int act
)
241 if (act
== _activation
)
246 log_verbose("Activation enabled. Device-mapper kernel "
247 "driver will be used.");
249 log_warn("WARNING: Activation disabled. No device-mapper "
250 "interaction will be attempted.");
258 static int _passes_activation_filter(struct cmd_context
*cmd
,
259 struct logical_volume
*lv
)
261 const struct config_node
*cn
;
262 const struct config_value
*cv
;
266 if (!(cn
= find_config_tree_node(cmd
, "activation/volume_list"))) {
267 log_verbose("activation/volume_list configuration setting "
268 "not defined, checking only host tags for %s/%s",
269 lv
->vg
->name
, lv
->name
);
271 /* If no host tags defined, activate */
272 if (dm_list_empty(&cmd
->tags
))
275 /* If any host tag matches any LV or VG tag, activate */
276 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
) ||
277 str_list_match_list(&cmd
->tags
, &lv
->vg
->tags
, NULL
))
280 log_verbose("No host tag matches %s/%s",
281 lv
->vg
->name
, lv
->name
);
287 log_verbose("activation/volume_list configuration setting "
288 "defined, checking the list to match %s/%s",
289 lv
->vg
->name
, lv
->name
);
291 for (cv
= cn
->v
; cv
; cv
= cv
->next
) {
292 if (cv
->type
!= CFG_STRING
) {
293 log_error("Ignoring invalid string in config file "
294 "activation/volume_list");
299 log_error("Ignoring empty string in config file "
300 "activation/volume_list");
309 log_error("Ignoring empty tag in config file "
310 "activation/volume_list");
313 /* If any host tag matches any LV or VG tag, activate */
314 if (!strcmp(str
, "*")) {
315 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
)
316 || str_list_match_list(&cmd
->tags
,
317 &lv
->vg
->tags
, NULL
))
322 /* If supplied tag matches LV or VG tag, activate */
323 if (str_list_match_item(&lv
->tags
, str
) ||
324 str_list_match_item(&lv
->vg
->tags
, str
))
329 if (!strchr(str
, '/')) {
330 /* vgname supplied */
331 if (!strcmp(str
, lv
->vg
->name
))
337 if (dm_snprintf(path
, sizeof(path
), "%s/%s", lv
->vg
->name
,
339 log_error("dm_snprintf error from %s/%s", lv
->vg
->name
,
343 if (!strcmp(path
, str
))
347 log_verbose("No item supplied in activation/volume_list configuration "
348 "setting matches %s/%s", lv
->vg
->name
, lv
->name
);
353 int library_version(char *version
, size_t size
)
358 return dm_get_library_version(version
, size
);
361 int driver_version(char *version
, size_t size
)
366 log_very_verbose("Getting driver version");
368 return dm_driver_version(version
, size
);
371 int target_version(const char *target_name
, uint32_t *maj
,
372 uint32_t *min
, uint32_t *patchlevel
)
376 struct dm_versions
*target
, *last_target
;
378 log_very_verbose("Getting target version for %s", target_name
);
379 if (!(dmt
= dm_task_create(DM_DEVICE_LIST_VERSIONS
)))
382 if (!dm_task_run(dmt
)) {
383 log_debug("Failed to get %s target version", target_name
);
384 /* Assume this was because LIST_VERSIONS isn't supported */
388 target
= dm_task_get_versions(dmt
);
391 last_target
= target
;
393 if (!strcmp(target_name
, target
->name
)) {
395 *maj
= target
->version
[0];
396 *min
= target
->version
[1];
397 *patchlevel
= target
->version
[2];
401 target
= (struct dm_versions
*)((char *) target
+ target
->next
);
402 } while (last_target
!= target
);
405 dm_task_destroy(dmt
);
410 int module_present(struct cmd_context
*cmd
, const char *target_name
)
417 if (dm_snprintf(module
, sizeof(module
), "dm-%s", target_name
) < 0) {
418 log_error("module_present module name too long: %s",
423 argv
[0] = MODPROBE_CMD
;
427 ret
= exec_cmd(cmd
, argv
, NULL
, 0);
432 int target_present(struct cmd_context
*cmd
, const char *target_name
,
435 uint32_t maj
, min
, patchlevel
;
442 if (target_version(target_name
, &maj
, &min
, &patchlevel
))
445 if (!module_present(cmd
, target_name
))
450 return target_version(target_name
, &maj
, &min
, &patchlevel
);
454 * Returns 1 if info structure populated, else 0 on failure.
456 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, unsigned origin_only
,
457 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
459 struct dm_info dminfo
;
464 * If open_count info is requested and we have to be sure our own udev
465 * transactions are finished
466 * For non-clustered locking type we are only interested for non-delete operation
467 * in progress - as only those could lead to opened files
469 if (with_open_count
) {
470 if (locking_is_clustered())
471 sync_local_dev_names(cmd
); /* Wait to have udev in sync */
472 else if (fs_has_non_delete_ops())
473 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
476 if (!dev_manager_info(lv
->vg
->cmd
->mem
, lv
, origin_only
? "real" : NULL
, with_open_count
,
477 with_read_ahead
, &dminfo
, &info
->read_ahead
))
480 info
->exists
= dminfo
.exists
;
481 info
->suspended
= dminfo
.suspended
;
482 info
->open_count
= dminfo
.open_count
;
483 info
->major
= dminfo
.major
;
484 info
->minor
= dminfo
.minor
;
485 info
->read_only
= dminfo
.read_only
;
486 info
->live_table
= dminfo
.live_table
;
487 info
->inactive_table
= dminfo
.inactive_table
;
492 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
,
493 unsigned origin_only
,
494 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
497 struct logical_volume
*lv
;
499 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
502 if (!lv_is_origin(lv
))
505 r
= lv_info(cmd
, lv
, origin_only
, info
, with_open_count
, with_read_ahead
);
512 * Returns 1 if percent set, else 0 on failure.
514 int lv_check_transient(struct logical_volume
*lv
)
517 struct dev_manager
*dm
;
522 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
525 if (!(r
= dev_manager_transient(dm
, lv
)))
528 dev_manager_destroy(dm
);
534 * Returns 1 if percent set, else 0 on failure.
536 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
539 struct dev_manager
*dm
;
544 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
547 if (!(r
= dev_manager_snapshot_percent(dm
, lv
, percent
)))
550 dev_manager_destroy(dm
);
555 /* FIXME Merge with snapshot_percent */
556 int lv_mirror_percent(struct cmd_context
*cmd
, struct logical_volume
*lv
,
557 int wait
, percent_t
*percent
, uint32_t *event_nr
)
560 struct dev_manager
*dm
;
563 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
564 * it should be considered in-sync. */
565 if (dm_list_size(&lv
->segments
) == 1 && first_seg(lv
)->area_count
== 1) {
566 *percent
= PERCENT_100
;
573 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
579 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
582 if (!(r
= dev_manager_mirror_percent(dm
, lv
, wait
, percent
, event_nr
)))
585 dev_manager_destroy(dm
);
590 static int _lv_active(struct cmd_context
*cmd
, struct logical_volume
*lv
)
594 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0)) {
602 static int _lv_open_count(struct cmd_context
*cmd
, struct logical_volume
*lv
)
606 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0)) {
611 return info
.open_count
;
614 static int _lv_activate_lv(struct logical_volume
*lv
, unsigned origin_only
)
617 struct dev_manager
*dm
;
619 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
622 if (!(r
= dev_manager_activate(dm
, lv
, origin_only
)))
625 dev_manager_destroy(dm
);
629 static int _lv_preload(struct logical_volume
*lv
, unsigned origin_only
, int *flush_required
)
632 struct dev_manager
*dm
;
634 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
637 if (!(r
= dev_manager_preload(dm
, lv
, origin_only
, flush_required
)))
640 dev_manager_destroy(dm
);
644 static int _lv_deactivate(struct logical_volume
*lv
)
647 struct dev_manager
*dm
;
649 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
652 if (!(r
= dev_manager_deactivate(dm
, lv
)))
655 dev_manager_destroy(dm
);
659 static int _lv_suspend_lv(struct logical_volume
*lv
, unsigned origin_only
, int lockfs
, int flush_required
)
662 struct dev_manager
*dm
;
664 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
)))
667 if (!(r
= dev_manager_suspend(dm
, lv
, origin_only
, lockfs
, flush_required
)))
670 dev_manager_destroy(dm
);
675 * These two functions return the number of visible LVs in the state,
678 int lvs_in_vg_activated(struct volume_group
*vg
)
686 dm_list_iterate_items(lvl
, &vg
->lvs
) {
687 if (lv_is_visible(lvl
->lv
))
688 count
+= (_lv_active(vg
->cmd
, lvl
->lv
) == 1);
694 int lvs_in_vg_opened(const struct volume_group
*vg
)
696 const struct lv_list
*lvl
;
702 dm_list_iterate_items(lvl
, &vg
->lvs
) {
703 if (lv_is_visible(lvl
->lv
))
704 count
+= (_lv_open_count(vg
->cmd
, lvl
->lv
) > 0);
712 * @lv: logical volume being queried
713 * @locally: set if active locally (when provided)
714 * @exclusive: set if active exclusively (when provided)
716 * Determine whether an LV is active locally or in a cluster.
717 * In addition to the return code which indicates whether or
718 * not the LV is active somewhere, two other values are set
719 * to yield more information about the status of the activation:
720 * return locally exclusively status
721 * ====== ======= =========== ======
723 * 1 0 0 active remotely
724 * 1 0 1 exclusive remotely
725 * 1 1 0 active locally and possibly remotely
726 * 1 1 1 exclusive locally (or local && !cluster)
727 * The VG lock must be held to call this function.
731 static int _lv_is_active(struct logical_volume
*lv
,
732 int *locally
, int *exclusive
)
734 int r
, l
, e
; /* remote, local, and exclusive */
738 if (_lv_active(lv
->vg
->cmd
, lv
))
741 if (!vg_is_clustered(lv
->vg
)) {
742 e
= 1; /* exclusive by definition */
746 /* Active locally, and the caller doesn't care about exclusive */
750 if ((r
= remote_lock_held(lv
->lvid
.s
, &e
)) >= 0)
754 * If lock query is not supported (due to interfacing with old
755 * code), then we cannot evaluate exclusivity properly.
757 * Old users of this function will never be affected by this,
758 * since they are only concerned about active vs. not active.
759 * New users of this function who specifically ask for 'exclusive'
760 * will be given an error message.
764 log_error("Unable to determine exclusivity of %s",
769 if (activate_lv_excl(lv
->vg
->cmd
, lv
)) {
770 if (!deactivate_lv(lv
->vg
->cmd
, lv
))
781 log_very_verbose("%s/%s is %sactive%s%s",
782 lv
->vg
->name
, lv
->name
,
783 (r
|| l
) ? "" : "not ",
784 (exclusive
&& e
) ? " exclusive" : "",
785 e
? (l
? " locally" : " remotely") : "");
790 int lv_is_active(struct logical_volume
*lv
)
792 return _lv_is_active(lv
, NULL
, NULL
);
796 int lv_is_active_locally(struct logical_volume *lv)
799 return _lv_is_active(lv, &l, NULL) && l;
803 int lv_is_active_exclusive_locally(struct logical_volume
*lv
)
806 return _lv_is_active(lv
, &l
, &e
) && l
&& e
;
809 int lv_is_active_exclusive_remotely(struct logical_volume
*lv
)
812 return _lv_is_active(lv
, &l
, &e
) && !l
&& e
;
816 static struct dm_event_handler
*_create_dm_event_handler(struct cmd_context
*cmd
, const char *dmuuid
, const char *dso
,
817 const int timeout
, enum dm_event_mask mask
)
819 struct dm_event_handler
*dmevh
;
821 if (!(dmevh
= dm_event_handler_create()))
824 if (dm_event_handler_set_dmeventd_path(dmevh
, find_config_tree_str(cmd
, "dmeventd/executable", NULL
)))
827 if (dm_event_handler_set_dso(dmevh
, dso
))
830 if (dm_event_handler_set_uuid(dmevh
, dmuuid
))
833 dm_event_handler_set_timeout(dmevh
, timeout
);
834 dm_event_handler_set_event_mask(dmevh
, mask
);
839 dm_event_handler_destroy(dmevh
);
843 char *get_monitor_dso_path(struct cmd_context
*cmd
, const char *libpath
)
847 if (!(path
= dm_pool_alloc(cmd
->mem
, PATH_MAX
))) {
848 log_error("Failed to allocate dmeventd library path.");
852 get_shared_library_path(cmd
, libpath
, path
, PATH_MAX
);
857 int target_registered_with_dmeventd(struct cmd_context
*cmd
, const char *dso
,
858 struct logical_volume
*lv
, int *pending
)
861 enum dm_event_mask evmask
= 0;
862 struct dm_event_handler
*dmevh
;
869 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
870 if (!(uuid
= build_dm_uuid(cmd
->mem
, lv
->lvid
.s
, lv_is_origin(lv
) ? "real" : NULL
)))
873 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, 0, DM_EVENT_ALL_ERRORS
)))
876 if (dm_event_get_registered_device(dmevh
, 0)) {
877 dm_event_handler_destroy(dmevh
);
881 evmask
= dm_event_handler_get_event_mask(dmevh
);
882 if (evmask
& DM_EVENT_REGISTRATION_PENDING
) {
884 evmask
&= ~DM_EVENT_REGISTRATION_PENDING
;
887 dm_event_handler_destroy(dmevh
);
892 int target_register_events(struct cmd_context
*cmd
, const char *dso
, struct logical_volume
*lv
,
893 int evmask
__attribute__((unused
)), int set
, int timeout
)
896 struct dm_event_handler
*dmevh
;
902 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
903 if (!(uuid
= build_dm_uuid(cmd
->mem
, lv
->lvid
.s
, lv_is_origin(lv
) ? "real" : NULL
)))
906 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, timeout
,
907 DM_EVENT_ALL_ERRORS
| (timeout
? DM_EVENT_TIMEOUT
: 0))))
910 r
= set
? dm_event_register_handler(dmevh
) : dm_event_unregister_handler(dmevh
);
912 dm_event_handler_destroy(dmevh
);
917 log_info("%s %s for events", set
? "Monitored" : "Unmonitored", uuid
);
925 * Returns 0 if an attempt to (un)monitor the device failed.
926 * Returns 1 otherwise.
928 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
929 unsigned origin_only
, int monitor
)
932 int i
, pending
= 0, monitored
;
934 struct dm_list
*tmp
, *snh
, *snht
;
935 struct lv_segment
*seg
;
936 struct lv_segment
*log_seg
;
937 int (*monitor_fn
) (struct lv_segment
*s
, int e
);
940 /* skip dmeventd code altogether */
941 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE
)
945 * Nothing to do if dmeventd configured not to be used.
947 if (monitor
&& !dmeventd_monitor_mode())
951 * In case of a snapshot device, we monitor lv->snapshot->lv,
952 * not the actual LV itself.
954 if (lv_is_cow(lv
) && !lv_is_merging_cow(lv
))
955 return monitor_dev_for_events(cmd
, lv
->snapshot
->lv
, 0, monitor
);
958 * In case this LV is a snapshot origin, we instead monitor
959 * each of its respective snapshots. The origin itself may
960 * also need to be monitored if it is a mirror, for example.
962 if (!origin_only
&& lv_is_origin(lv
))
963 dm_list_iterate_safe(snh
, snht
, &lv
->snapshot_segs
)
964 if (!monitor_dev_for_events(cmd
, dm_list_struct_base(snh
,
965 struct lv_segment
, origin_list
)->cow
, 0, monitor
))
969 * If the volume is mirrored and its log is also mirrored, monitor
970 * the log volume as well.
972 if ((seg
= first_seg(lv
)) != NULL
&& seg
->log_lv
!= NULL
&&
973 (log_seg
= first_seg(seg
->log_lv
)) != NULL
&&
974 seg_is_mirrored(log_seg
))
975 if (!monitor_dev_for_events(cmd
, seg
->log_lv
, 0, monitor
))
978 dm_list_iterate(tmp
, &lv
->segments
) {
979 seg
= dm_list_item(tmp
, struct lv_segment
);
981 /* Recurse for AREA_LV */
982 for (s
= 0; s
< seg
->area_count
; s
++) {
983 if (seg_type(seg
, s
) != AREA_LV
)
985 if (!monitor_dev_for_events(cmd
, seg_lv(seg
, s
), 0,
987 log_error("Failed to %smonitor %s",
989 seg_lv(seg
, s
)->name
);
994 if (!seg_monitored(seg
) || (seg
->status
& PVMOVE
))
999 /* Check monitoring status */
1000 if (seg
->segtype
->ops
->target_monitored
)
1001 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1003 continue; /* segtype doesn't support registration */
1006 * FIXME: We should really try again if pending
1008 monitored
= (pending
) ? 0 : monitored
;
1012 log_verbose("%s/%s already monitored.", lv
->vg
->name
, lv
->name
);
1013 else if (seg
->segtype
->ops
->target_monitor_events
)
1014 monitor_fn
= seg
->segtype
->ops
->target_monitor_events
;
1017 log_verbose("%s/%s already not monitored.", lv
->vg
->name
, lv
->name
);
1018 else if (seg
->segtype
->ops
->target_unmonitor_events
)
1019 monitor_fn
= seg
->segtype
->ops
->target_unmonitor_events
;
1022 /* Do [un]monitor */
1026 log_verbose("%sonitoring %s/%s%s", monitor
? "M" : "Not m", lv
->vg
->name
, lv
->name
,
1027 test_mode() ? " [Test mode: skipping this]" : "");
1029 /* FIXME Test mode should really continue a bit further. */
1033 /* FIXME specify events */
1034 if (!monitor_fn(seg
, 0)) {
1035 log_error("%s/%s: %s segment monitoring function failed.",
1036 lv
->vg
->name
, lv
->name
, seg
->segtype
->name
);
1040 /* Check [un]monitor results */
1041 /* Try a couple times if pending, but not forever... */
1042 for (i
= 0; i
< 10; i
++) {
1044 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1046 (!monitored
&& monitor
) ||
1047 (monitored
&& !monitor
))
1048 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1049 lv
->vg
->name
, lv
->name
, monitor
? "" : "un");
1056 r
= (monitored
&& monitor
) || (!monitored
&& !monitor
);
1065 static int _lv_suspend(struct cmd_context
*cmd
, const char *lvid_s
,
1066 unsigned origin_only
, int error_if_not_suspended
)
1068 struct logical_volume
*lv
= NULL
, *lv_pre
= NULL
;
1070 int r
= 0, lockfs
= 0, flush_required
= 0;
1075 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1078 /* Use precommitted metadata if present */
1079 if (!(lv_pre
= lv_from_lvid(cmd
, lvid_s
, 1)))
1082 /* Ignore origin_only unless LV is origin in both old and new metadata */
1083 if (!lv_is_origin(lv
) || !lv_is_origin(lv_pre
))
1087 _skip("Suspending %s%s.", lv
->name
, origin_only
? " origin without snapshots" : "");
1092 if (!lv_info(cmd
, lv
, origin_only
, &info
, 0, 0))
1095 if (!info
.exists
|| info
.suspended
) {
1096 if (!error_if_not_suspended
) {
1104 if (!lv_read_replicator_vgs(lv
))
1107 lv_calculate_readahead(lv
, NULL
);
1109 /* If VG was precommitted, preload devices for the LV */
1110 if ((lv_pre
->vg
->status
& PRECOMMITTED
)) {
1111 if (!_lv_preload(lv_pre
, origin_only
, &flush_required
)) {
1112 /* FIXME Revert preloading */
1117 if (!monitor_dev_for_events(cmd
, lv
, origin_only
, 0))
1118 /* FIXME Consider aborting here */
1124 (lv_is_origin(lv_pre
) || lv_is_cow(lv_pre
)))
1127 if (!_lv_suspend_lv(lv
, origin_only
, lockfs
, flush_required
)) {
1136 free_vg(lv_pre
->vg
);
1138 lv_release_replicator_vgs(lv
);
1145 /* Returns success if the device is not active */
1146 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
1148 return _lv_suspend(cmd
, lvid_s
, origin_only
, 0);
1151 /* No longer used */
1153 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1155 return _lv_suspend(cmd, lvid_s, 1);
1164 * @exclusive: This parameter only has an affect in cluster-context.
1165 * It forces local target type to be used (instead of
1166 * cluster-aware type).
1167 * @error_if_not_active
1169 static int _lv_resume(struct cmd_context
*cmd
, const char *lvid_s
,
1170 unsigned origin_only
, unsigned exclusive
,
1171 int error_if_not_active
)
1173 struct logical_volume
*lv
;
1180 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1183 if (!lv_is_origin(lv
))
1187 _skip("Resuming %s%s.", lv
->name
, origin_only
? " without snapshots" : "");
1192 if (!lv_info(cmd
, lv
, origin_only
, &info
, 0, 0))
1195 if (!info
.exists
|| !info
.suspended
) {
1196 if (error_if_not_active
)
1203 * When targets are activated exclusively in a cluster, the
1204 * non-clustered target should be used. This only happens
1205 * if ACTIVATE_EXCL is set in lv->status.
1208 lv
->status
|= ACTIVATE_EXCL
;
1210 if (!_lv_activate_lv(lv
, origin_only
))
1215 if (!monitor_dev_for_events(cmd
, lv
, origin_only
, 1))
1226 /* Returns success if the device is not active */
1227 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
1228 unsigned origin_only
, unsigned exclusive
)
1230 return _lv_resume(cmd
, lvid_s
, origin_only
, exclusive
, 0);
1233 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
1235 return _lv_resume(cmd
, lvid_s
, origin_only
, 0, 1);
1238 static int _lv_has_open_snapshots(struct logical_volume
*lv
)
1240 struct lv_segment
*snap_seg
;
1244 dm_list_iterate_items_gen(snap_seg
, &lv
->snapshot_segs
, origin_list
) {
1245 if (!lv_info(lv
->vg
->cmd
, snap_seg
->cow
, 0, &info
, 1, 0)) {
1250 if (info
.exists
&& info
.open_count
) {
1251 log_error("LV %s/%s has open snapshot %s: "
1252 "not deactivating", lv
->vg
->name
, lv
->name
,
1253 snap_seg
->cow
->name
);
1261 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
1263 struct logical_volume
*lv
;
1270 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1274 _skip("Deactivating '%s'.", lv
->name
);
1279 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0))
1287 if (lv_is_visible(lv
)) {
1288 if (info
.open_count
) {
1289 log_error("LV %s/%s in use: not deactivating",
1290 lv
->vg
->name
, lv
->name
);
1293 if (lv_is_origin(lv
) && _lv_has_open_snapshots(lv
))
1297 if (!lv_read_replicator_vgs(lv
))
1300 lv_calculate_readahead(lv
, NULL
);
1302 if (!monitor_dev_for_events(cmd
, lv
, 0, 0))
1306 r
= _lv_deactivate(lv
);
1309 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0) || info
.exists
)
1313 lv_release_replicator_vgs(lv
);
1320 /* Test if LV passes filter */
1321 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
1324 struct logical_volume
*lv
;
1327 if (!activation()) {
1332 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1335 if (!_passes_activation_filter(cmd
, lv
)) {
1336 log_verbose("Not activating %s/%s since it does not pass "
1337 "activation filter.", lv
->vg
->name
, lv
->name
);
1349 static int _lv_activate(struct cmd_context
*cmd
, const char *lvid_s
,
1350 int exclusive
, int filter
)
1352 struct logical_volume
*lv
;
1359 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1362 if (filter
&& !_passes_activation_filter(cmd
, lv
)) {
1363 log_error("Not activating %s/%s since it does not pass "
1364 "activation filter.", lv
->vg
->name
, lv
->name
);
1368 if ((!lv
->vg
->cmd
->partial_activation
) && (lv
->status
& PARTIAL_LV
)) {
1369 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1374 if (lv_has_unknown_segments(lv
)) {
1375 log_error("Refusing activation of LV %s containing "
1376 "an unrecognised segment.", lv
->name
);
1381 _skip("Activating '%s'.", lv
->name
);
1386 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
1389 if (info
.exists
&& !info
.suspended
&& info
.live_table
) {
1394 if (!lv_read_replicator_vgs(lv
))
1397 lv_calculate_readahead(lv
, NULL
);
1400 lv
->status
|= ACTIVATE_EXCL
;
1403 if (!(r
= _lv_activate_lv(lv
, 0)))
1407 if (r
&& !monitor_dev_for_events(cmd
, lv
, 0, 1))
1412 lv_release_replicator_vgs(lv
);
1420 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1422 if (!_lv_activate(cmd
, lvid_s
, exclusive
, 0))
1428 /* Activate LV only if it passes filter */
1429 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1431 if (!_lv_activate(cmd
, lvid_s
, exclusive
, 1))
1437 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
1442 r
= dm_mknodes(NULL
);
1450 r
= dev_manager_mknodes(lv
);
1458 * Does PV use VG somewhere in its construction?
1459 * Returns 1 on failure.
1461 int pv_uses_vg(struct physical_volume
*pv
,
1462 struct volume_group
*vg
)
1467 if (!dm_is_dm_major(MAJOR(pv
->dev
->dev
)))
1470 return dev_manager_device_uses_vg(pv
->dev
, vg
);
1473 void activation_release(void)
1475 dev_manager_release();
1478 void activation_exit(void)