2 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
3 * Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
5 * This file is part of LVM2.
7 * This copyrighted material is made available to anyone wishing to use,
8 * modify, copy, or redistribute it subject to the terms and conditions
9 * of the GNU Lesser General Public License v.2.1.
11 * You should have received a copy of the GNU Lesser General Public License
12 * along with this program; if not, write to the Free Software Foundation,
13 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include "lvm-string.h"
25 #include "toolcontext.h"
26 #include "dev_manager.h"
31 #include "sharedlib.h"
37 #define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
39 int lvm1_present(struct cmd_context
*cmd
)
43 if (dm_snprintf(path
, sizeof(path
), "%s/lvm/global", cmd
->proc_dir
)
45 log_error("LVM1 proc global snprintf failed");
49 if (path_exists(path
))
55 int list_segment_modules(struct dm_pool
*mem
, const struct lv_segment
*seg
,
56 struct dm_list
*modules
)
59 struct lv_segment
*seg2
, *snap_seg
;
62 if (seg
->segtype
->ops
->modules_needed
&&
63 !seg
->segtype
->ops
->modules_needed(mem
, seg
, modules
)) {
64 log_error("module string allocation failed");
68 if (lv_is_origin(seg
->lv
))
69 dm_list_iterate(snh
, &seg
->lv
->snapshot_segs
)
70 if (!list_lv_modules(mem
,
71 dm_list_struct_base(snh
,
77 if (lv_is_cow(seg
->lv
)) {
78 snap_seg
= find_cow(seg
->lv
);
79 if (snap_seg
->segtype
->ops
->modules_needed
&&
80 !snap_seg
->segtype
->ops
->modules_needed(mem
, snap_seg
,
82 log_error("snap_seg module string allocation failed");
87 for (s
= 0; s
< seg
->area_count
; s
++) {
88 switch (seg_type(seg
, s
)) {
90 seg2
= find_seg_by_le(seg_lv(seg
, s
), seg_le(seg
, s
));
91 if (seg2
&& !list_segment_modules(mem
, seg2
, modules
))
103 int list_lv_modules(struct dm_pool
*mem
, const struct logical_volume
*lv
,
104 struct dm_list
*modules
)
106 struct lv_segment
*seg
;
108 dm_list_iterate_items(seg
, &lv
->segments
)
109 if (!list_segment_modules(mem
, seg
, modules
))
115 #ifndef DEVMAPPER_SUPPORT
116 void set_activation(int act
)
118 static int warned
= 0;
123 log_error("Compiled without libdevmapper support. "
124 "Can't enable activation.");
132 int library_version(char *version
, size_t size
)
136 int driver_version(char *version
, size_t size
)
140 int target_version(const char *target_name
, uint32_t *maj
,
141 uint32_t *min
, uint32_t *patchlevel
)
145 int target_present(struct cmd_context
*cmd
, const char *target_name
,
150 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, unsigned origin_only
,
151 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
155 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
,
156 unsigned origin_only
,
157 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
161 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
165 int lv_mirror_percent(struct cmd_context
*cmd
, const struct logical_volume
*lv
,
166 int wait
, percent_t
*percent
, uint32_t *event_nr
)
170 int lvs_in_vg_activated(struct volume_group
*vg
)
174 int lvs_in_vg_opened(const struct volume_group
*vg
)
179 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
184 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
188 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
192 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
193 unsigned origin_only
, unsigned exclusive
)
197 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
201 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
206 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
210 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
214 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
218 int pv_uses_vg(struct physical_volume
*pv
,
219 struct volume_group
*vg
)
223 void activation_release(void)
226 void activation_exit(void)
229 int lv_is_active(struct logical_volume
*lv
)
233 int lv_is_active_exclusive_locally(struct logical_volume
*lv
)
237 int lv_is_active_exclusive_remotely(struct logical_volume
*lv
)
241 int lv_check_transient(struct logical_volume
*lv
)
245 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
246 struct lv_activate_opts
*laopts
, int monitor
)
250 #else /* DEVMAPPER_SUPPORT */
252 static int _activation
= 1;
254 void set_activation(int act
)
256 if (act
== _activation
)
261 log_verbose("Activation enabled. Device-mapper kernel "
262 "driver will be used.");
264 log_warn("WARNING: Activation disabled. No device-mapper "
265 "interaction will be attempted.");
273 static int _passes_activation_filter(struct cmd_context
*cmd
,
274 struct logical_volume
*lv
)
276 const struct config_node
*cn
;
277 const struct config_value
*cv
;
281 if (!(cn
= find_config_tree_node(cmd
, "activation/volume_list"))) {
282 log_verbose("activation/volume_list configuration setting "
283 "not defined, checking only host tags for %s/%s",
284 lv
->vg
->name
, lv
->name
);
286 /* If no host tags defined, activate */
287 if (dm_list_empty(&cmd
->tags
))
290 /* If any host tag matches any LV or VG tag, activate */
291 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
) ||
292 str_list_match_list(&cmd
->tags
, &lv
->vg
->tags
, NULL
))
295 log_verbose("No host tag matches %s/%s",
296 lv
->vg
->name
, lv
->name
);
302 log_verbose("activation/volume_list configuration setting "
303 "defined, checking the list to match %s/%s",
304 lv
->vg
->name
, lv
->name
);
306 for (cv
= cn
->v
; cv
; cv
= cv
->next
) {
307 if (cv
->type
!= CFG_STRING
) {
308 log_error("Ignoring invalid string in config file "
309 "activation/volume_list");
314 log_error("Ignoring empty string in config file "
315 "activation/volume_list");
324 log_error("Ignoring empty tag in config file "
325 "activation/volume_list");
328 /* If any host tag matches any LV or VG tag, activate */
329 if (!strcmp(str
, "*")) {
330 if (str_list_match_list(&cmd
->tags
, &lv
->tags
, NULL
)
331 || str_list_match_list(&cmd
->tags
,
332 &lv
->vg
->tags
, NULL
))
337 /* If supplied tag matches LV or VG tag, activate */
338 if (str_list_match_item(&lv
->tags
, str
) ||
339 str_list_match_item(&lv
->vg
->tags
, str
))
344 if (!strchr(str
, '/')) {
345 /* vgname supplied */
346 if (!strcmp(str
, lv
->vg
->name
))
352 if (dm_snprintf(path
, sizeof(path
), "%s/%s", lv
->vg
->name
,
354 log_error("dm_snprintf error from %s/%s", lv
->vg
->name
,
358 if (!strcmp(path
, str
))
362 log_verbose("No item supplied in activation/volume_list configuration "
363 "setting matches %s/%s", lv
->vg
->name
, lv
->name
);
368 int library_version(char *version
, size_t size
)
373 return dm_get_library_version(version
, size
);
376 int driver_version(char *version
, size_t size
)
381 log_very_verbose("Getting driver version");
383 return dm_driver_version(version
, size
);
386 int target_version(const char *target_name
, uint32_t *maj
,
387 uint32_t *min
, uint32_t *patchlevel
)
391 struct dm_versions
*target
, *last_target
;
393 log_very_verbose("Getting target version for %s", target_name
);
394 if (!(dmt
= dm_task_create(DM_DEVICE_LIST_VERSIONS
)))
397 if (!dm_task_run(dmt
)) {
398 log_debug("Failed to get %s target version", target_name
);
399 /* Assume this was because LIST_VERSIONS isn't supported */
403 target
= dm_task_get_versions(dmt
);
406 last_target
= target
;
408 if (!strcmp(target_name
, target
->name
)) {
410 *maj
= target
->version
[0];
411 *min
= target
->version
[1];
412 *patchlevel
= target
->version
[2];
416 target
= (struct dm_versions
*)((char *) target
+ target
->next
);
417 } while (last_target
!= target
);
420 dm_task_destroy(dmt
);
425 int module_present(struct cmd_context
*cmd
, const char *target_name
)
432 if (dm_snprintf(module
, sizeof(module
), "dm-%s", target_name
) < 0) {
433 log_error("module_present module name too long: %s",
438 argv
[0] = MODPROBE_CMD
;
442 ret
= exec_cmd(cmd
, argv
, NULL
, 0);
447 int target_present(struct cmd_context
*cmd
, const char *target_name
,
450 uint32_t maj
, min
, patchlevel
;
457 if (target_version(target_name
, &maj
, &min
, &patchlevel
))
460 if (!module_present(cmd
, target_name
))
465 return target_version(target_name
, &maj
, &min
, &patchlevel
);
469 * Returns 1 if info structure populated, else 0 on failure.
471 int lv_info(struct cmd_context
*cmd
, const struct logical_volume
*lv
, unsigned origin_only
,
472 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
474 struct dm_info dminfo
;
479 * If open_count info is requested and we have to be sure our own udev
480 * transactions are finished
481 * For non-clustered locking type we are only interested for non-delete operation
482 * in progress - as only those could lead to opened files
484 if (with_open_count
) {
485 if (locking_is_clustered())
486 sync_local_dev_names(cmd
); /* Wait to have udev in sync */
487 else if (fs_has_non_delete_ops())
488 fs_unlock(); /* For non clustered - wait if there are non-delete ops */
491 if (!dev_manager_info(lv
->vg
->cmd
->mem
, lv
, origin_only
? "real" : NULL
, with_open_count
,
492 with_read_ahead
, &dminfo
, &info
->read_ahead
))
495 info
->exists
= dminfo
.exists
;
496 info
->suspended
= dminfo
.suspended
;
497 info
->open_count
= dminfo
.open_count
;
498 info
->major
= dminfo
.major
;
499 info
->minor
= dminfo
.minor
;
500 info
->read_only
= dminfo
.read_only
;
501 info
->live_table
= dminfo
.live_table
;
502 info
->inactive_table
= dminfo
.inactive_table
;
507 int lv_info_by_lvid(struct cmd_context
*cmd
, const char *lvid_s
,
508 unsigned origin_only
,
509 struct lvinfo
*info
, int with_open_count
, int with_read_ahead
)
512 struct logical_volume
*lv
;
514 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
517 if (!lv_is_origin(lv
))
520 r
= lv_info(cmd
, lv
, origin_only
, info
, with_open_count
, with_read_ahead
);
527 * Returns 1 if percent set, else 0 on failure.
529 int lv_check_transient(struct logical_volume
*lv
)
532 struct dev_manager
*dm
;
537 log_debug("Checking transient status for LV %s/%s", lv
->vg
->name
, lv
->name
);
539 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
542 if (!(r
= dev_manager_transient(dm
, lv
)))
545 dev_manager_destroy(dm
);
551 * Returns 1 if percent set, else 0 on failure.
553 int lv_snapshot_percent(const struct logical_volume
*lv
, percent_t
*percent
)
556 struct dev_manager
*dm
;
561 log_debug("Checking snapshot percent for LV %s/%s", lv
->vg
->name
, lv
->name
);
563 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
566 if (!(r
= dev_manager_snapshot_percent(dm
, lv
, percent
)))
569 dev_manager_destroy(dm
);
574 /* FIXME Merge with snapshot_percent */
575 int lv_mirror_percent(struct cmd_context
*cmd
, const struct logical_volume
*lv
,
576 int wait
, percent_t
*percent
, uint32_t *event_nr
)
579 struct dev_manager
*dm
;
582 /* If mirrored LV is temporarily shrinked to 1 area (= linear),
583 * it should be considered in-sync. */
584 if (dm_list_size(&lv
->segments
) == 1 && first_seg(lv
)->area_count
== 1) {
585 *percent
= PERCENT_100
;
592 log_debug("Checking mirror percent for LV %s/%s", lv
->vg
->name
, lv
->name
);
594 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
600 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
603 if (!(r
= dev_manager_mirror_percent(dm
, lv
, wait
, percent
, event_nr
)))
606 dev_manager_destroy(dm
);
611 static int _lv_active(struct cmd_context
*cmd
, struct logical_volume
*lv
)
615 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0)) {
623 static int _lv_open_count(struct cmd_context
*cmd
, struct logical_volume
*lv
)
627 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0)) {
632 return info
.open_count
;
635 static int _lv_activate_lv(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
)
638 struct dev_manager
*dm
;
640 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
643 if (!(r
= dev_manager_activate(dm
, lv
, laopts
)))
646 dev_manager_destroy(dm
);
650 static int _lv_preload(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
,
654 struct dev_manager
*dm
;
656 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
659 if (!(r
= dev_manager_preload(dm
, lv
, laopts
, flush_required
)))
662 dev_manager_destroy(dm
);
666 static int _lv_deactivate(struct logical_volume
*lv
)
669 struct dev_manager
*dm
;
671 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, 1)))
674 if (!(r
= dev_manager_deactivate(dm
, lv
)))
677 dev_manager_destroy(dm
);
681 static int _lv_suspend_lv(struct logical_volume
*lv
, struct lv_activate_opts
*laopts
,
682 int lockfs
, int flush_required
)
685 struct dev_manager
*dm
;
688 * When we are asked to manipulate (normally suspend/resume) the PVMOVE
689 * device directly, we don't want to touch the devices that use it.
691 if (!(dm
= dev_manager_create(lv
->vg
->cmd
, lv
->vg
->name
, (lv
->status
& PVMOVE
) ? 0 : 1)))
694 if (!(r
= dev_manager_suspend(dm
, lv
, laopts
, lockfs
, flush_required
)))
697 dev_manager_destroy(dm
);
702 * These two functions return the number of visible LVs in the state,
703 * or -1 on error. FIXME Check this.
705 int lvs_in_vg_activated(struct volume_group
*vg
)
713 dm_list_iterate_items(lvl
, &vg
->lvs
)
714 if (lv_is_visible(lvl
->lv
))
715 count
+= (_lv_active(vg
->cmd
, lvl
->lv
) == 1);
717 log_debug("Counted %d active LVs in VG %s", count
, vg
->name
);
722 int lvs_in_vg_opened(const struct volume_group
*vg
)
724 const struct lv_list
*lvl
;
730 dm_list_iterate_items(lvl
, &vg
->lvs
)
731 if (lv_is_visible(lvl
->lv
))
732 count
+= (_lv_open_count(vg
->cmd
, lvl
->lv
) > 0);
734 log_debug("Counted %d open LVs in VG %s", count
, vg
->name
);
741 * @lv: logical volume being queried
742 * @locally: set if active locally (when provided)
743 * @exclusive: set if active exclusively (when provided)
745 * Determine whether an LV is active locally or in a cluster.
746 * In addition to the return code which indicates whether or
747 * not the LV is active somewhere, two other values are set
748 * to yield more information about the status of the activation:
749 * return locally exclusively status
750 * ====== ======= =========== ======
752 * 1 0 0 active remotely
753 * 1 0 1 exclusive remotely
754 * 1 1 0 active locally and possibly remotely
755 * 1 1 1 exclusive locally (or local && !cluster)
756 * The VG lock must be held to call this function.
760 static int _lv_is_active(struct logical_volume
*lv
,
761 int *locally
, int *exclusive
)
763 int r
, l
, e
; /* remote, local, and exclusive */
767 if (_lv_active(lv
->vg
->cmd
, lv
))
770 if (!vg_is_clustered(lv
->vg
)) {
771 e
= 1; /* exclusive by definition */
775 /* Active locally, and the caller doesn't care about exclusive */
779 if ((r
= remote_lock_held(lv
->lvid
.s
, &e
)) >= 0)
783 * If lock query is not supported (due to interfacing with old
784 * code), then we cannot evaluate exclusivity properly.
786 * Old users of this function will never be affected by this,
787 * since they are only concerned about active vs. not active.
788 * New users of this function who specifically ask for 'exclusive'
789 * will be given an error message.
793 log_error("Unable to determine exclusivity of %s",
798 /* FIXME: Is this fallback alright? */
799 if (activate_lv_excl(lv
->vg
->cmd
, lv
)) {
800 if (!deactivate_lv(lv
->vg
->cmd
, lv
))
802 /* FIXME: locally & exclusive are undefined. */
805 /* FIXME: Check exclusive value here. */
812 log_very_verbose("%s/%s is %sactive%s%s",
813 lv
->vg
->name
, lv
->name
,
814 (r
|| l
) ? "" : "not ",
815 (exclusive
&& e
) ? " exclusive" : "",
816 e
? (l
? " locally" : " remotely") : "");
821 int lv_is_active(struct logical_volume
*lv
)
823 return _lv_is_active(lv
, NULL
, NULL
);
827 int lv_is_active_locally(struct logical_volume *lv)
830 return _lv_is_active(lv, &l, NULL) && l;
834 int lv_is_active_exclusive_locally(struct logical_volume
*lv
)
838 return _lv_is_active(lv
, &l
, &e
) && l
&& e
;
841 int lv_is_active_exclusive_remotely(struct logical_volume
*lv
)
845 return _lv_is_active(lv
, &l
, &e
) && !l
&& e
;
849 static struct dm_event_handler
*_create_dm_event_handler(struct cmd_context
*cmd
, const char *dmuuid
, const char *dso
,
850 const int timeout
, enum dm_event_mask mask
)
852 struct dm_event_handler
*dmevh
;
854 if (!(dmevh
= dm_event_handler_create()))
857 if (dm_event_handler_set_dmeventd_path(dmevh
, find_config_tree_str(cmd
, "dmeventd/executable", NULL
)))
860 if (dm_event_handler_set_dso(dmevh
, dso
))
863 if (dm_event_handler_set_uuid(dmevh
, dmuuid
))
866 dm_event_handler_set_timeout(dmevh
, timeout
);
867 dm_event_handler_set_event_mask(dmevh
, mask
);
872 dm_event_handler_destroy(dmevh
);
876 char *get_monitor_dso_path(struct cmd_context
*cmd
, const char *libpath
)
880 if (!(path
= dm_pool_alloc(cmd
->mem
, PATH_MAX
))) {
881 log_error("Failed to allocate dmeventd library path.");
885 get_shared_library_path(cmd
, libpath
, path
, PATH_MAX
);
890 int target_registered_with_dmeventd(struct cmd_context
*cmd
, const char *dso
,
891 struct logical_volume
*lv
, int *pending
)
894 enum dm_event_mask evmask
= 0;
895 struct dm_event_handler
*dmevh
;
902 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
903 if (!(uuid
= build_dm_uuid(cmd
->mem
, lv
->lvid
.s
, lv_is_origin(lv
) ? "real" : NULL
)))
906 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, 0, DM_EVENT_ALL_ERRORS
)))
909 if (dm_event_get_registered_device(dmevh
, 0)) {
910 dm_event_handler_destroy(dmevh
);
914 evmask
= dm_event_handler_get_event_mask(dmevh
);
915 if (evmask
& DM_EVENT_REGISTRATION_PENDING
) {
917 evmask
&= ~DM_EVENT_REGISTRATION_PENDING
;
920 dm_event_handler_destroy(dmevh
);
925 int target_register_events(struct cmd_context
*cmd
, const char *dso
, struct logical_volume
*lv
,
926 int evmask
__attribute__((unused
)), int set
, int timeout
)
929 struct dm_event_handler
*dmevh
;
935 /* We always monitor the "real" device, never the "snapshot-origin" itself. */
936 if (!(uuid
= build_dm_uuid(cmd
->mem
, lv
->lvid
.s
, lv_is_origin(lv
) ? "real" : NULL
)))
939 if (!(dmevh
= _create_dm_event_handler(cmd
, uuid
, dso
, timeout
,
940 DM_EVENT_ALL_ERRORS
| (timeout
? DM_EVENT_TIMEOUT
: 0))))
943 r
= set
? dm_event_register_handler(dmevh
) : dm_event_unregister_handler(dmevh
);
945 dm_event_handler_destroy(dmevh
);
950 log_info("%s %s for events", set
? "Monitored" : "Unmonitored", uuid
);
958 * Returns 0 if an attempt to (un)monitor the device failed.
959 * Returns 1 otherwise.
961 int monitor_dev_for_events(struct cmd_context
*cmd
, struct logical_volume
*lv
,
962 const struct lv_activate_opts
*laopts
, int monitor
)
965 int i
, pending
= 0, monitored
;
967 struct dm_list
*tmp
, *snh
, *snht
;
968 struct lv_segment
*seg
;
969 struct lv_segment
*log_seg
;
970 int (*monitor_fn
) (struct lv_segment
*s
, int e
);
972 static const struct lv_activate_opts zlaopts
= { 0 };
977 /* skip dmeventd code altogether */
978 if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE
)
982 * Nothing to do if dmeventd configured not to be used.
984 if (monitor
&& !dmeventd_monitor_mode())
988 * In case of a snapshot device, we monitor lv->snapshot->lv,
989 * not the actual LV itself.
991 if (lv_is_cow(lv
) && (laopts
->no_merging
|| !lv_is_merging_cow(lv
)))
992 return monitor_dev_for_events(cmd
, lv
->snapshot
->lv
, NULL
, monitor
);
995 * In case this LV is a snapshot origin, we instead monitor
996 * each of its respective snapshots. The origin itself may
997 * also need to be monitored if it is a mirror, for example.
999 if (!laopts
->origin_only
&& lv_is_origin(lv
))
1000 dm_list_iterate_safe(snh
, snht
, &lv
->snapshot_segs
)
1001 if (!monitor_dev_for_events(cmd
, dm_list_struct_base(snh
,
1002 struct lv_segment
, origin_list
)->cow
, NULL
, monitor
))
1006 * If the volume is mirrored and its log is also mirrored, monitor
1007 * the log volume as well.
1009 if ((seg
= first_seg(lv
)) != NULL
&& seg
->log_lv
!= NULL
&&
1010 (log_seg
= first_seg(seg
->log_lv
)) != NULL
&&
1011 seg_is_mirrored(log_seg
))
1012 if (!monitor_dev_for_events(cmd
, seg
->log_lv
, NULL
, monitor
))
1015 dm_list_iterate(tmp
, &lv
->segments
) {
1016 seg
= dm_list_item(tmp
, struct lv_segment
);
1018 /* Recurse for AREA_LV */
1019 for (s
= 0; s
< seg
->area_count
; s
++) {
1020 if (seg_type(seg
, s
) != AREA_LV
)
1022 if (!monitor_dev_for_events(cmd
, seg_lv(seg
, s
), NULL
,
1024 log_error("Failed to %smonitor %s",
1025 monitor
? "" : "un",
1026 seg_lv(seg
, s
)->name
);
1031 if (!seg_monitored(seg
) || (seg
->status
& PVMOVE
))
1036 /* Check monitoring status */
1037 if (seg
->segtype
->ops
->target_monitored
)
1038 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1040 continue; /* segtype doesn't support registration */
1043 * FIXME: We should really try again if pending
1045 monitored
= (pending
) ? 0 : monitored
;
1049 log_verbose("%s/%s already monitored.", lv
->vg
->name
, lv
->name
);
1050 else if (seg
->segtype
->ops
->target_monitor_events
)
1051 monitor_fn
= seg
->segtype
->ops
->target_monitor_events
;
1054 log_verbose("%s/%s already not monitored.", lv
->vg
->name
, lv
->name
);
1055 else if (seg
->segtype
->ops
->target_unmonitor_events
)
1056 monitor_fn
= seg
->segtype
->ops
->target_unmonitor_events
;
1059 /* Do [un]monitor */
1063 log_verbose("%sonitoring %s/%s%s", monitor
? "M" : "Not m", lv
->vg
->name
, lv
->name
,
1064 test_mode() ? " [Test mode: skipping this]" : "");
1066 /* FIXME Test mode should really continue a bit further. */
1070 /* FIXME specify events */
1071 if (!monitor_fn(seg
, 0)) {
1072 log_error("%s/%s: %s segment monitoring function failed.",
1073 lv
->vg
->name
, lv
->name
, seg
->segtype
->name
);
1077 /* Check [un]monitor results */
1078 /* Try a couple times if pending, but not forever... */
1079 for (i
= 0; i
< 10; i
++) {
1081 monitored
= seg
->segtype
->ops
->target_monitored(seg
, &pending
);
1083 (!monitored
&& monitor
) ||
1084 (monitored
&& !monitor
))
1085 log_very_verbose("%s/%s %smonitoring still pending: waiting...",
1086 lv
->vg
->name
, lv
->name
, monitor
? "" : "un");
1093 r
= (monitored
&& monitor
) || (!monitored
&& !monitor
);
1102 static int _lv_suspend(struct cmd_context
*cmd
, const char *lvid_s
,
1103 struct lv_activate_opts
*laopts
, int error_if_not_suspended
)
1105 struct logical_volume
*lv
= NULL
, *lv_pre
= NULL
, *pvmove_lv
= NULL
;
1106 struct lv_list
*lvl_pre
;
1107 struct seg_list
*sl
;
1109 int r
= 0, lockfs
= 0, flush_required
= 0;
1114 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1117 /* Use precommitted metadata if present */
1118 if (!(lv_pre
= lv_from_lvid(cmd
, lvid_s
, 1)))
1121 /* Ignore origin_only unless LV is origin in both old and new metadata */
1122 if (!lv_is_origin(lv
) || !lv_is_origin(lv_pre
))
1123 laopts
->origin_only
= 0;
1126 _skip("Suspending %s%s.", lv
->name
, laopts
->origin_only
? " origin without snapshots" : "");
1131 if (!lv_info(cmd
, lv
, laopts
->origin_only
, &info
, 0, 0))
1134 if (!info
.exists
|| info
.suspended
) {
1135 if (!error_if_not_suspended
) {
1138 critical_section_inc(cmd
, "already suspended");
1143 if (!lv_read_replicator_vgs(lv
))
1146 lv_calculate_readahead(lv
, NULL
);
1149 * If VG was precommitted, preload devices for the LV.
1150 * If the PVMOVE LV is being removed, it's only present in the old
1151 * metadata and not the new, so we must explicitly add the new
1152 * tables for all the changed LVs here, as the relationships
1153 * are not found by walking the new metadata.
1155 if ((lv_pre
->vg
->status
& PRECOMMITTED
)) {
1156 if (!(lv_pre
->status
& LOCKED
) &&
1157 (lv
->status
& LOCKED
) &&
1158 (pvmove_lv
= find_pvmove_lv_in_lv(lv
))) {
1159 /* Preload all the LVs above the PVMOVE LV */
1160 dm_list_iterate_items(sl
, &pvmove_lv
->segs_using_this_lv
) {
1161 if (!(lvl_pre
= find_lv_in_vg(lv_pre
->vg
, sl
->seg
->lv
->name
))) {
1162 /* FIXME Internal error? */
1163 log_error("LV %s missing from preload metadata", sl
->seg
->lv
->name
);
1166 if (!_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1169 /* Now preload the PVMOVE LV itself */
1170 if (!(lvl_pre
= find_lv_in_vg(lv_pre
->vg
, pvmove_lv
->name
))) {
1171 /* FIXME Internal error? */
1172 log_error("LV %s missing from preload metadata", pvmove_lv
->name
);
1175 if (!_lv_preload(lvl_pre
->lv
, laopts
, &flush_required
))
1177 } else if (!_lv_preload(lv_pre
, laopts
, &flush_required
))
1178 /* FIXME Revert preloading */
1182 if (!monitor_dev_for_events(cmd
, lv
, laopts
, 0))
1183 /* FIXME Consider aborting here */
1186 critical_section_inc(cmd
, "suspending");
1188 critical_section_inc(cmd
, "suspending pvmove LV");
1190 if (!laopts
->origin_only
&&
1191 (lv_is_origin(lv_pre
) || lv_is_cow(lv_pre
)))
1195 * Suspending an LV directly above a PVMOVE LV also
1196 * suspends other LVs using that same PVMOVE LV.
1197 * FIXME Remove this and delay the 'clear node' until
1198 * after the code knows whether there's a different
1199 * inactive table to load or not instead so lv_suspend
1200 * can be called separately for each LV safely.
1202 if ((lv_pre
->vg
->status
& PRECOMMITTED
) &&
1203 (lv_pre
->status
& LOCKED
) && find_pvmove_lv_in_lv(lv_pre
)) {
1204 if (!_lv_suspend_lv(lv_pre
, laopts
, lockfs
, flush_required
)) {
1205 critical_section_dec(cmd
, "failed precommitted suspend");
1207 critical_section_dec(cmd
, "failed precommitted suspend (pvmove)");
1211 /* Normal suspend */
1212 if (!_lv_suspend_lv(lv
, laopts
, lockfs
, flush_required
)) {
1213 critical_section_dec(cmd
, "failed suspend");
1215 critical_section_dec(cmd
, "failed suspend (pvmove)");
1223 free_vg(lv_pre
->vg
);
1225 lv_release_replicator_vgs(lv
);
1232 /* Returns success if the device is not active */
1233 int lv_suspend_if_active(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
1235 struct lv_activate_opts laopts
= { .origin_only
= origin_only
};
1237 return _lv_suspend(cmd
, lvid_s
, &laopts
, 0);
1240 /* No longer used */
1242 int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
1244 return _lv_suspend(cmd, lvid_s, 1);
1253 * @exclusive: This parameter only has an affect in cluster-context.
1254 * It forces local target type to be used (instead of
1255 * cluster-aware type).
1256 * @error_if_not_active
1258 static int _lv_resume(struct cmd_context
*cmd
, const char *lvid_s
,
1259 struct lv_activate_opts
*laopts
, int error_if_not_active
)
1261 struct logical_volume
*lv
;
1268 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1271 if (!lv_is_origin(lv
))
1272 laopts
->origin_only
= 0;
1275 _skip("Resuming %s%s.", lv
->name
, laopts
->origin_only
? " without snapshots" : "");
1280 log_debug("Resuming LV %s/%s%s%s.", lv
->vg
->name
, lv
->name
,
1281 error_if_not_active
? "" : " if active",
1282 laopts
->origin_only
? " without snapshots" : "");
1284 if (!lv_info(cmd
, lv
, laopts
->origin_only
, &info
, 0, 0))
1287 if (!info
.exists
|| !info
.suspended
) {
1288 if (error_if_not_active
)
1291 if (!info
.suspended
)
1292 critical_section_dec(cmd
, "already resumed");
1296 if (!_lv_activate_lv(lv
, laopts
))
1299 critical_section_dec(cmd
, "resumed");
1301 if (!monitor_dev_for_events(cmd
, lv
, laopts
, 1))
1312 /* Returns success if the device is not active */
1313 int lv_resume_if_active(struct cmd_context
*cmd
, const char *lvid_s
,
1314 unsigned origin_only
, unsigned exclusive
)
1316 struct lv_activate_opts laopts
= {
1317 .origin_only
= origin_only
,
1319 * When targets are activated exclusively in a cluster, the
1320 * non-clustered target should be used. This only happens
1321 * if exclusive is set.
1323 .exclusive
= exclusive
1326 return _lv_resume(cmd
, lvid_s
, &laopts
, 0);
1329 int lv_resume(struct cmd_context
*cmd
, const char *lvid_s
, unsigned origin_only
)
1331 struct lv_activate_opts laopts
= { .origin_only
= origin_only
, };
1333 return _lv_resume(cmd
, lvid_s
, &laopts
, 1);
1336 static int _lv_has_open_snapshots(struct logical_volume
*lv
)
1338 struct lv_segment
*snap_seg
;
1342 dm_list_iterate_items_gen(snap_seg
, &lv
->snapshot_segs
, origin_list
) {
1343 if (!lv_info(lv
->vg
->cmd
, snap_seg
->cow
, 0, &info
, 1, 0)) {
1348 if (info
.exists
&& info
.open_count
) {
1349 log_error("LV %s/%s has open snapshot %s: "
1350 "not deactivating", lv
->vg
->name
, lv
->name
,
1351 snap_seg
->cow
->name
);
1359 int lv_deactivate(struct cmd_context
*cmd
, const char *lvid_s
)
1361 struct logical_volume
*lv
;
1368 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1372 _skip("Deactivating '%s'.", lv
->name
);
1377 log_debug("Deactivating %s/%s.", lv
->vg
->name
, lv
->name
);
1379 if (!lv_info(cmd
, lv
, 0, &info
, 1, 0))
1387 if (lv_is_visible(lv
)) {
1388 if (info
.open_count
) {
1389 log_error("LV %s/%s in use: not deactivating",
1390 lv
->vg
->name
, lv
->name
);
1393 if (lv_is_origin(lv
) && _lv_has_open_snapshots(lv
))
1397 if (!lv_read_replicator_vgs(lv
))
1400 lv_calculate_readahead(lv
, NULL
);
1402 if (!monitor_dev_for_events(cmd
, lv
, NULL
, 0))
1405 critical_section_inc(cmd
, "deactivating");
1406 r
= _lv_deactivate(lv
);
1407 critical_section_dec(cmd
, "deactivated");
1409 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0) || info
.exists
)
1413 lv_release_replicator_vgs(lv
);
1420 /* Test if LV passes filter */
1421 int lv_activation_filter(struct cmd_context
*cmd
, const char *lvid_s
,
1424 struct logical_volume
*lv
;
1427 if (!activation()) {
1432 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1435 if (!_passes_activation_filter(cmd
, lv
)) {
1436 log_verbose("Not activating %s/%s since it does not pass "
1437 "activation filter.", lv
->vg
->name
, lv
->name
);
1449 static int _lv_activate(struct cmd_context
*cmd
, const char *lvid_s
,
1450 struct lv_activate_opts
*laopts
, int filter
)
1452 struct logical_volume
*lv
;
1459 if (!(lv
= lv_from_lvid(cmd
, lvid_s
, 0)))
1462 if (filter
&& !_passes_activation_filter(cmd
, lv
)) {
1463 log_error("Not activating %s/%s since it does not pass "
1464 "activation filter.", lv
->vg
->name
, lv
->name
);
1468 if ((!lv
->vg
->cmd
->partial_activation
) && (lv
->status
& PARTIAL_LV
)) {
1469 log_error("Refusing activation of partial LV %s. Use --partial to override.",
1474 if (lv_has_unknown_segments(lv
)) {
1475 log_error("Refusing activation of LV %s containing "
1476 "an unrecognised segment.", lv
->name
);
1481 _skip("Activating '%s'.", lv
->name
);
1486 log_debug("Activating %s/%s%s.", lv
->vg
->name
, lv
->name
,
1487 laopts
->exclusive
? " exclusively" : "");
1489 if (!lv_info(cmd
, lv
, 0, &info
, 0, 0))
1492 if (info
.exists
&& !info
.suspended
&& info
.live_table
) {
1497 if (!lv_read_replicator_vgs(lv
))
1500 lv_calculate_readahead(lv
, NULL
);
1502 critical_section_inc(cmd
, "activating");
1503 if (!(r
= _lv_activate_lv(lv
, laopts
)))
1505 critical_section_dec(cmd
, "activated");
1507 if (r
&& !monitor_dev_for_events(cmd
, lv
, laopts
, 1))
1512 lv_release_replicator_vgs(lv
);
1520 int lv_activate(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1522 struct lv_activate_opts laopts
= { .exclusive
= exclusive
};
1524 if (!_lv_activate(cmd
, lvid_s
, &laopts
, 0))
1530 /* Activate LV only if it passes filter */
1531 int lv_activate_with_filter(struct cmd_context
*cmd
, const char *lvid_s
, int exclusive
)
1533 struct lv_activate_opts laopts
= { .exclusive
= exclusive
};
1535 if (!_lv_activate(cmd
, lvid_s
, &laopts
, 1))
1541 int lv_mknodes(struct cmd_context
*cmd
, const struct logical_volume
*lv
)
1546 r
= dm_mknodes(NULL
);
1554 r
= dev_manager_mknodes(lv
);
1562 * Does PV use VG somewhere in its construction?
1563 * Returns 1 on failure.
1565 int pv_uses_vg(struct physical_volume
*pv
,
1566 struct volume_group
*vg
)
1571 if (!dm_is_dm_major(MAJOR(pv
->dev
->dev
)))
1574 return dev_manager_device_uses_vg(pv
->dev
, vg
);
1577 void activation_release(void)
1579 dev_manager_release();
1582 void activation_exit(void)