/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
#include "config.h"
#include "filter.h"
#include "segtype.h"
+#include "sharedlib.h"
#include <limits.h>
#include <fcntl.h>
int lvm1_present(struct cmd_context *cmd)
{
- char path[PATH_MAX];
+ static char path[PATH_MAX];
if (dm_snprintf(path, sizeof(path), "%s/lvm/global", cmd->proc_dir)
< 0) {
{
return 0;
}
-int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, struct lvinfo *info,
- int with_open_count, int with_read_ahead)
+int lvm_dm_prefix_check(int major, int minor, const char *prefix)
{
return 0;
}
-int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
+int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
+ struct lvinfo *info, int with_open_count, int with_read_ahead)
+{
+ return 0;
+}
+int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
struct lvinfo *info, int with_open_count, int with_read_ahead)
{
return 0;
}
-int lv_snapshot_percent(const struct logical_volume *lv, float *percent,
- percent_range_t *percent_range)
+int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
+ struct logical_volume *lv, struct lvinfo *info)
+{
+ return 0;
+}
+int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
+{
+ return 0;
+}
+int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
+ int wait, percent_t *percent, uint32_t *event_nr)
+{
+ return 0;
+}
+int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
+{
+ return 0;
+}
+int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
+ percent_t *percent)
{
return 0;
}
-int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
- int wait, float *percent, percent_range_t *percent_range,
- uint32_t *event_nr)
+int lv_thin_percent(const struct logical_volume *lv, int mapped,
+ percent_t *percent)
{
return 0;
}
-int lvs_in_vg_activated(struct volume_group *vg)
+int lv_thin_pool_transaction_id(const struct logical_volume *lv,
+ uint64_t *transaction_id)
{
return 0;
}
-int lvs_in_vg_opened(struct volume_group *vg)
+int lvs_in_vg_activated(const struct volume_group *vg)
{
return 0;
}
+int lvs_in_vg_opened(const struct volume_group *vg)
+{
+ return 0;
+}
+/******
int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
{
return 1;
}
-int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
+*******/
+int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
{
return 1;
}
-int lv_resume(struct cmd_context *cmd, const char *lvid_s)
+int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
{
return 1;
}
-int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
+int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
+ unsigned origin_only, unsigned exclusive, unsigned revert)
{
return 1;
}
{
return 1;
}
-
int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv)
{
return 1;
}
-
int pv_uses_vg(struct physical_volume *pv,
struct volume_group *vg)
{
return 0;
}
-
void activation_release(void)
{
- return;
}
-
void activation_exit(void)
{
- return;
}
+int lv_is_active(const struct logical_volume *lv)
+{
+ return 0;
+}
+int lv_is_active_but_not_locally(const struct logical_volume *lv)
+{
+ return 0;
+}
+int lv_is_active_exclusive(const struct logical_volume *lv)
+{
+ return 0;
+}
+int lv_is_active_exclusive_locally(const struct logical_volume *lv)
+{
+ return 0;
+}
+int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
+{
+ return 0;
+}
+
+int lv_check_transient(struct logical_volume *lv)
+{
+ return 1;
+}
+int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
+ const struct lv_activate_opts *laopts, int monitor)
+{
+ return 1;
+}
+/* fs.c */
+void fs_unlock(void)
+{
+}
+/* dev_manager.c */
+#include "targets.h"
+int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
+ struct dm_tree_node *node, uint32_t start_area,
+ uint32_t areas)
+{
+ return 0;
+}
+int device_is_usable(struct device *dev)
+{
+ return 0;
+}
+int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
+ const char *layer, const char *target_type)
+{
+ return 0;
+}
#else /* DEVMAPPER_SUPPORT */
static int _activation = 1;
return _activation;
}
-static int _passes_activation_filter(struct cmd_context *cmd,
- struct logical_volume *lv)
+static int _lv_passes_volumes_filter(struct cmd_context *cmd, struct logical_volume *lv,
+ const struct dm_config_node *cn, const char *config_path)
{
- const struct config_node *cn;
- struct config_value *cv;
- char *str;
- char path[PATH_MAX];
+ const struct dm_config_value *cv;
+ const char *str;
+ static char path[PATH_MAX];
- if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
- /* If no host tags defined, activate */
- if (dm_list_empty(&cmd->tags))
- return 1;
-
- /* If any host tag matches any LV or VG tag, activate */
- if (str_list_match_list(&cmd->tags, &lv->tags) ||
- str_list_match_list(&cmd->tags, &lv->vg->tags))
- return 1;
-
- /* Don't activate */
- return 0;
- }
+ log_verbose("%s configuration setting defined: "
+ "Checking the list to match %s/%s",
+ config_path, lv->vg->name, lv->name);
for (cv = cn->v; cv; cv = cv->next) {
- if (cv->type != CFG_STRING) {
- log_error("Ignoring invalid string in config file "
- "activation/volume_list");
+ if (cv->type != DM_CFG_STRING) {
+ log_error("Ignoring invalid string in config file %s",
+ config_path);
continue;
}
str = cv->v.str;
if (!*str) {
- log_error("Ignoring empty string in config file "
- "activation/volume_list");
+ log_error("Ignoring empty string in config file %s",
+ config_path);
continue;
}
+
/* Tag? */
if (*str == '@') {
str++;
if (!*str) {
log_error("Ignoring empty tag in config file "
- "activation/volume_list");
+ "%s", config_path);
continue;
}
/* If any host tag matches any LV or VG tag, activate */
if (!strcmp(str, "*")) {
- if (str_list_match_list(&cmd->tags, &lv->tags)
+ if (str_list_match_list(&cmd->tags, &lv->tags, NULL)
|| str_list_match_list(&cmd->tags,
- &lv->vg->tags))
+ &lv->vg->tags, NULL))
return 1;
else
continue;
return 1;
}
+ log_verbose("No item supplied in %s configuration setting "
+ "matches %s/%s", config_path, lv->vg->name, lv->name);
+
return 0;
}
+static int _passes_activation_filter(struct cmd_context *cmd,
+ struct logical_volume *lv)
+{
+ const struct dm_config_node *cn;
+
+ if (!(cn = find_config_tree_node(cmd, "activation/volume_list"))) {
+ log_verbose("activation/volume_list configuration setting "
+ "not defined: Checking only host tags for %s/%s",
+ lv->vg->name, lv->name);
+
+ /* If no host tags defined, activate */
+ if (dm_list_empty(&cmd->tags))
+ return 1;
+
+ /* If any host tag matches any LV or VG tag, activate */
+ if (str_list_match_list(&cmd->tags, &lv->tags, NULL) ||
+ str_list_match_list(&cmd->tags, &lv->vg->tags, NULL))
+ return 1;
+
+ log_verbose("No host tag matches %s/%s",
+ lv->vg->name, lv->name);
+
+ /* Don't activate */
+ return 0;
+ }
+
+ return _lv_passes_volumes_filter(cmd, lv, cn, "activation/volume_list");
+}
+
+static int _passes_readonly_filter(struct cmd_context *cmd,
+ struct logical_volume *lv)
+{
+ const struct dm_config_node *cn;
+
+ if (!(cn = find_config_tree_node(cmd, "activation/read_only_volume_list")))
+ return 0;
+
+ return _lv_passes_volumes_filter(cmd, lv, cn, "activation/read_only_volume_list");
+}
+
+
+int lv_passes_auto_activation_filter(struct cmd_context *cmd, struct logical_volume *lv)
+{
+ const struct dm_config_node *cn;
+
+ if (!(cn = find_config_tree_node(cmd, "activation/auto_activation_volume_list"))) {
+ log_verbose("activation/auto_activation_volume_list configuration setting "
+ "not defined: All logical volumes will be auto-activated.");
+ return 1;
+ }
+
+ return _lv_passes_volumes_filter(cmd, lv, cn, "activation/auto_activation_volume_list");
+}
+
int library_version(char *version, size_t size)
{
if (!activation())
if (!(dmt = dm_task_create(DM_DEVICE_LIST_VERSIONS)))
return_0;
+ if (activation_checks() && !dm_task_enable_checks(dmt))
+ goto_out;
+
if (!dm_task_run(dmt)) {
log_debug("Failed to get %s target version", target_name);
/* Assume this was because LIST_VERSIONS isn't supported */
- return 1;
+ *maj = 0;
+ *min = 0;
+ *patchlevel = 0;
+ r = 1;
+ goto out;
}
target = dm_task_get_versions(dmt);
goto out;
}
- target = (void *) target + target->next;
+ target = (struct dm_versions *)((char *) target + target->next);
} while (last_target != target);
out:
+ if (r)
+ log_very_verbose("Found %s target "
+ "v%" PRIu32 ".%" PRIu32 ".%" PRIu32 ".",
+ target_name, *maj, *min, *patchlevel);
+
dm_task_destroy(dmt);
return r;
}
+int lvm_dm_prefix_check(int major, int minor, const char *prefix)
+{
+ struct dm_task *dmt;
+ const char *uuid;
+ int r;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
+ return_0;
+
+ if (!dm_task_set_minor(dmt, minor) ||
+ !dm_task_set_major(dmt, major) ||
+ !dm_task_run(dmt) ||
+ !(uuid = dm_task_get_uuid(dmt))) {
+ dm_task_destroy(dmt);
+ return 0;
+ }
+
+ r = strncasecmp(uuid, prefix, strlen(prefix));
+ dm_task_destroy(dmt);
+
+ return r ? 0 : 1;
+}
+
int module_present(struct cmd_context *cmd, const char *target_name)
{
int ret = 0;
argv[1] = module;
argv[2] = NULL;
- ret = exec_cmd(cmd, argv);
+ ret = exec_cmd(cmd, argv, NULL, 0);
#endif
return ret;
}
/*
* Returns 1 if info structure populated, else 0 on failure.
*/
-int lv_info(struct cmd_context *cmd, const struct logical_volume *lv,
+int lv_info(struct cmd_context *cmd, const struct logical_volume *lv, int use_layer,
struct lvinfo *info, int with_open_count, int with_read_ahead)
{
struct dm_info dminfo;
+ const char *layer;
if (!activation())
return 0;
+ /*
+ * If open_count info is requested and we have to be sure our own udev
+ * transactions are finished
+ * For non-clustered locking type we are only interested for non-delete operation
+ * in progress - as only those could lead to opened files
+ */
+ if (with_open_count) {
+ if (locking_is_clustered())
+ sync_local_dev_names(cmd); /* Wait to have udev in sync */
+ else if (fs_has_non_delete_ops())
+ fs_unlock(); /* For non clustered - wait if there are non-delete ops */
+ }
- if (!dev_manager_info(lv->vg->cmd->mem, lv, with_open_count,
+ if (use_layer && lv_is_thin_pool(lv))
+ layer = "tpool";
+ else if (use_layer && lv_is_origin(lv))
+ layer = "real";
+ else
+ layer = NULL;
+
+ if (!dev_manager_info(lv->vg->cmd->mem, lv, layer, with_open_count,
with_read_ahead, &dminfo, &info->read_ahead))
return_0;
return 1;
}
-int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s,
+int lv_info_by_lvid(struct cmd_context *cmd, const char *lvid_s, int use_layer,
struct lvinfo *info, int with_open_count, int with_read_ahead)
{
int r;
if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
return 0;
- r = lv_info(cmd, lv, info, with_open_count, with_read_ahead);
- vg_release(lv->vg);
+ r = lv_info(cmd, lv, use_layer, info, with_open_count, with_read_ahead);
+ release_vg(lv->vg);
return r;
}
+int lv_check_not_in_use(struct cmd_context *cmd __attribute__((unused)),
+ struct logical_volume *lv, struct lvinfo *info)
+{
+ if (!info->exists)
+ return 1;
+
+ /* If sysfs is not used, use open_count information only. */
+ if (!*dm_sysfs_dir()) {
+ if (info->open_count) {
+ log_error("Logical volume %s/%s in use.",
+ lv->vg->name, lv->name);
+ return 0;
+ }
+
+ return 1;
+ }
+
+ if (dm_device_has_holders(info->major, info->minor)) {
+ log_error("Logical volume %s/%s is used by another device.",
+ lv->vg->name, lv->name);
+ return 0;
+ }
+
+ if (dm_device_has_mounted_fs(info->major, info->minor)) {
+ log_error("Logical volume %s/%s contains a filesystem in use.",
+ lv->vg->name, lv->name);
+ return 0;
+ }
+
+ return 1;
+}
+
/*
* Returns 1 if percent set, else 0 on failure.
*/
if (!activation())
return 0;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
+ log_debug("Checking transient status for LV %s/%s", lv->vg->name, lv->name);
+
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
return_0;
if (!(r = dev_manager_transient(dm, lv)))
/*
* Returns 1 if percent set, else 0 on failure.
*/
-int lv_snapshot_percent(const struct logical_volume *lv, float *percent,
- percent_range_t *percent_range)
+int lv_snapshot_percent(const struct logical_volume *lv, percent_t *percent)
{
int r;
struct dev_manager *dm;
if (!activation())
return 0;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
+ log_debug("Checking snapshot percent for LV %s/%s", lv->vg->name, lv->name);
+
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
return_0;
- if (!(r = dev_manager_snapshot_percent(dm, lv, percent, percent_range)))
+ if (!(r = dev_manager_snapshot_percent(dm, lv, percent)))
stack;
dev_manager_destroy(dm);
}
/* FIXME Merge with snapshot_percent */
-int lv_mirror_percent(struct cmd_context *cmd, struct logical_volume *lv,
- int wait, float *percent, percent_range_t *percent_range,
- uint32_t *event_nr)
+int lv_mirror_percent(struct cmd_context *cmd, const struct logical_volume *lv,
+ int wait, percent_t *percent, uint32_t *event_nr)
{
int r;
struct dev_manager *dm;
/* If mirrored LV is temporarily shrinked to 1 area (= linear),
* it should be considered in-sync. */
if (dm_list_size(&lv->segments) == 1 && first_seg(lv)->area_count == 1) {
- *percent = 100.0;
+ *percent = PERCENT_100;
return 1;
}
if (!activation())
return 0;
- if (!lv_info(cmd, lv, &info, 0, 0))
+ log_debug("Checking mirror percent for LV %s/%s", lv->vg->name, lv->name);
+
+ if (!lv_info(cmd, lv, 0, &info, 0, 0))
return_0;
if (!info.exists)
return 0;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
+ return_0;
+
+ if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent, event_nr)))
+ stack;
+
+ dev_manager_destroy(dm);
+
+ return r;
+}
+
+int lv_raid_percent(const struct logical_volume *lv, percent_t *percent)
+{
+ return lv_mirror_percent(lv->vg->cmd, lv, 0, percent, NULL);
+}
+
+/*
+ * Returns data or metadata percent usage, depends on metadata 0/1.
+ * Returns 1 if percent set, else 0 on failure.
+ */
+int lv_thin_pool_percent(const struct logical_volume *lv, int metadata,
+ percent_t *percent)
+{
+ int r;
+ struct dev_manager *dm;
+
+ if (!activation())
+ return 0;
+
+ log_debug("Checking thin %sdata percent for LV %s/%s",
+ (metadata) ? "meta" : "", lv->vg->name, lv->name);
+
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
+ return_0;
+
+ if (!(r = dev_manager_thin_pool_percent(dm, lv, metadata, percent)))
+ stack;
+
+ dev_manager_destroy(dm);
+
+ return r;
+}
+
+/*
+ * Returns 1 if percent set, else 0 on failure.
+ */
+int lv_thin_percent(const struct logical_volume *lv,
+ int mapped, percent_t *percent)
+{
+ int r;
+ struct dev_manager *dm;
+
+ if (!activation())
+ return 0;
+
+ log_debug("Checking thin percent for LV %s/%s",
+ lv->vg->name, lv->name);
+
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
+ return_0;
+
+ if (!(r = dev_manager_thin_percent(dm, lv, mapped, percent)))
+ stack;
+
+ dev_manager_destroy(dm);
+
+ return r;
+}
+
+/*
+ * Returns 1 if transaction_id set, else 0 on failure.
+ */
+int lv_thin_pool_transaction_id(const struct logical_volume *lv,
+ uint64_t *transaction_id)
+{
+ int r;
+ struct dev_manager *dm;
+ struct dm_status_thin_pool *status;
+
+ if (!activation())
+ return 0;
+
+ log_debug("Checking thin percent for LV %s/%s",
+ lv->vg->name, lv->name);
+
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
return_0;
- if (!(r = dev_manager_mirror_percent(dm, lv, wait, percent,
- percent_range, event_nr)))
+ if (!(r = dev_manager_thin_pool_status(dm, lv, &status)))
stack;
+ else
+ *transaction_id = status->transaction_id;
dev_manager_destroy(dm);
return r;
}
-static int _lv_active(struct cmd_context *cmd, struct logical_volume *lv)
+static int _lv_active(struct cmd_context *cmd, const struct logical_volume *lv)
{
struct lvinfo info;
- if (!lv_info(cmd, lv, &info, 0, 0)) {
+ if (!lv_info(cmd, lv, 0, &info, 0, 0)) {
stack;
return -1;
}
{
struct lvinfo info;
- if (!lv_info(cmd, lv, &info, 1, 0)) {
+ if (!lv_info(cmd, lv, 0, &info, 1, 0)) {
stack;
return -1;
}
return info.open_count;
}
-static int _lv_activate_lv(struct logical_volume *lv)
+static int _lv_activate_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
{
int r;
struct dev_manager *dm;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
return_0;
- if (!(r = dev_manager_activate(dm, lv)))
+ if (!(r = dev_manager_activate(dm, lv, laopts)))
stack;
dev_manager_destroy(dm);
return r;
}
-static int _lv_preload(struct logical_volume *lv, int *flush_required)
+static int _lv_preload(struct logical_volume *lv, struct lv_activate_opts *laopts,
+ int *flush_required)
{
- int r;
+ int r = 0;
struct dev_manager *dm;
+ int old_readonly = laopts->read_only;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
- return_0;
+ laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
- if (!(r = dev_manager_preload(dm, lv, flush_required)))
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
+ goto_out;
+
+ if (!(r = dev_manager_preload(dm, lv, laopts, flush_required)))
stack;
dev_manager_destroy(dm);
+
+ laopts->read_only = old_readonly;
+out:
return r;
}
int r;
struct dev_manager *dm;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, 1)))
return_0;
if (!(r = dev_manager_deactivate(dm, lv)))
return r;
}
-static int _lv_suspend_lv(struct logical_volume *lv, int lockfs, int flush_required)
+static int _lv_suspend_lv(struct logical_volume *lv, struct lv_activate_opts *laopts,
+ int lockfs, int flush_required)
{
int r;
struct dev_manager *dm;
- if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name)))
+ laopts->read_only = _passes_readonly_filter(lv->vg->cmd, lv);
+
+ /*
+ * When we are asked to manipulate (normally suspend/resume) the PVMOVE
+ * device directly, we don't want to touch the devices that use it.
+ */
+ if (!(dm = dev_manager_create(lv->vg->cmd, lv->vg->name, (lv->status & PVMOVE) ? 0 : 1)))
return_0;
- if (!(r = dev_manager_suspend(dm, lv, lockfs, flush_required)))
+ if (!(r = dev_manager_suspend(dm, lv, laopts, lockfs, flush_required)))
stack;
dev_manager_destroy(dm);
/*
* These two functions return the number of visible LVs in the state,
- * or -1 on error.
+ * or -1 on error. FIXME Check this.
*/
-int lvs_in_vg_activated(struct volume_group *vg)
+int lvs_in_vg_activated(const struct volume_group *vg)
{
struct lv_list *lvl;
int count = 0;
if (!activation())
return 0;
- dm_list_iterate_items(lvl, &vg->lvs) {
+ dm_list_iterate_items(lvl, &vg->lvs)
if (lv_is_visible(lvl->lv))
count += (_lv_active(vg->cmd, lvl->lv) == 1);
- }
+
+ log_debug("Counted %d active LVs in VG %s", count, vg->name);
return count;
}
if (!activation())
return 0;
- dm_list_iterate_items(lvl, &vg->lvs) {
+ dm_list_iterate_items(lvl, &vg->lvs)
if (lv_is_visible(lvl->lv))
count += (_lv_open_count(vg->cmd, lvl->lv) > 0);
- }
+
+ log_debug("Counted %d open LVs in VG %s", count, vg->name);
return count;
}
/*
+ * _lv_is_active
+ * @lv: logical volume being queried
+ * @locally: set if active locally (when provided)
+ * @exclusive: set if active exclusively (when provided)
+ *
* Determine whether an LV is active locally or in a cluster.
- * Assumes vg lock held.
- * Returns:
- * 0 - not active locally or on any node in cluster
- * 1 - active either locally or some node in the cluster
+ * In addition to the return code which indicates whether or
+ * not the LV is active somewhere, two other values are set
+ * to yield more information about the status of the activation:
+ * return locally exclusively status
+ * ====== ======= =========== ======
+ * 0 0 0 not active
+ * 1 0 0 active remotely
+ * 1 0 1 exclusive remotely
+ * 1 1 0 active locally and possibly remotely
+ * 1 1 1 exclusive locally (or local && !cluster)
+ * The VG lock must be held to call this function.
+ *
+ * Returns: 0 or 1
*/
-int lv_is_active(struct logical_volume *lv)
+static int _lv_is_active(const struct logical_volume *lv,
+ int *locally, int *exclusive)
{
- int ret;
+ int r, l, e; /* remote, local, and exclusive */
+
+ r = l = e = 0;
if (_lv_active(lv->vg->cmd, lv))
- return 1;
+ l = 1;
- if (!vg_is_clustered(lv->vg))
- return 0;
+ if (!vg_is_clustered(lv->vg)) {
+ if (l)
+ e = 1; /* exclusive by definition */
+ goto out;
+ }
- if ((ret = remote_lock_held(lv->lvid.s)) >= 0)
- return ret;
+ /* Active locally, and the caller doesn't care about exclusive */
+ if (l && !exclusive)
+ goto out;
+
+ if ((r = remote_lock_held(lv->lvid.s, &e)) >= 0)
+ goto out;
/*
- * Old compatibility code if locking doesn't support lock query
- * FIXME: check status to not deactivate already activate device
+ * If lock query is not supported (due to interfacing with old
+ * code), then we cannot evaluate exclusivity properly.
+ *
+ * Old users of this function will never be affected by this,
+ * since they are only concerned about active vs. not active.
+ * New users of this function who specifically ask for 'exclusive'
+ * will be given an error message.
*/
- if (activate_lv_excl(lv->vg->cmd, lv)) {
- if (!deactivate_lv(lv->vg->cmd, lv))
- stack;
- return 0;
- }
+ log_error("Unable to determine exclusivity of %s", lv->name);
+
+ e = 0;
/*
- * Exclusive local activation failed so assume it is active elsewhere.
+ * We used to attempt activate_lv_excl_local(lv->vg->cmd, lv) here,
+ * but it's unreliable.
*/
+
+out:
+ if (locally)
+ *locally = l;
+ if (exclusive)
+ *exclusive = e;
+
+ log_very_verbose("%s/%s is %sactive%s%s",
+ lv->vg->name, lv->name,
+ (r || l) ? "" : "not ",
+ (exclusive && e) ? " exclusive" : "",
+ e ? (l ? " locally" : " remotely") : "");
+
+ return r || l;
+}
+
+int lv_is_active(const struct logical_volume *lv)
+{
+ return _lv_is_active(lv, NULL, NULL);
+}
+
+int lv_is_active_but_not_locally(const struct logical_volume *lv)
+{
+ int l;
+ return _lv_is_active(lv, &l, NULL) && !l;
+}
+
+int lv_is_active_exclusive(const struct logical_volume *lv)
+{
+ int e;
+
+ return _lv_is_active(lv, NULL, &e) && e;
+}
+
+int lv_is_active_exclusive_locally(const struct logical_volume *lv)
+{
+ int l, e;
+
+ return _lv_is_active(lv, &l, &e) && l && e;
+}
+
+int lv_is_active_exclusive_remotely(const struct logical_volume *lv)
+{
+ int l, e;
+
+ return _lv_is_active(lv, &l, &e) && !l && e;
+}
+
+#ifdef DMEVENTD
+static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd, const char *dmuuid, const char *dso,
+ const int timeout, enum dm_event_mask mask)
+{
+ struct dm_event_handler *dmevh;
+
+ if (!(dmevh = dm_event_handler_create()))
+ return_NULL;
+
+ if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, "dmeventd/executable", NULL)))
+ goto_bad;
+
+ if (dm_event_handler_set_dso(dmevh, dso))
+ goto_bad;
+
+ if (dm_event_handler_set_uuid(dmevh, dmuuid))
+ goto_bad;
+
+ dm_event_handler_set_timeout(dmevh, timeout);
+ dm_event_handler_set_event_mask(dmevh, mask);
+
+ return dmevh;
+
+bad:
+ dm_event_handler_destroy(dmevh);
+ return NULL;
+}
+
+char *get_monitor_dso_path(struct cmd_context *cmd, const char *libpath)
+{
+ char *path;
+
+ if (!(path = dm_pool_alloc(cmd->mem, PATH_MAX))) {
+ log_error("Failed to allocate dmeventd library path.");
+ return NULL;
+ }
+
+ get_shared_library_path(cmd, libpath, path, PATH_MAX);
+
+ return path;
+}
+
+static char *_build_target_uuid(struct cmd_context *cmd, struct logical_volume *lv)
+{
+ const char *layer;
+
+ if (lv_is_thin_pool(lv))
+ layer = "tpool"; /* Monitor "tpool" for the "thin pool". */
+ else if (lv_is_origin(lv))
+ layer = "real"; /* Monitor "real" for "snapshot-origin". */
+ else
+ layer = NULL;
+
+ return build_dm_uuid(cmd->mem, lv->lvid.s, layer);
+}
+
+int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
+ struct logical_volume *lv, int *pending)
+{
+ char *uuid;
+ enum dm_event_mask evmask = 0;
+ struct dm_event_handler *dmevh;
+ *pending = 0;
+
+ if (!dso)
+ return_0;
+
+ if (!(uuid = _build_target_uuid(cmd, lv)))
+ return_0;
+
+ if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, 0, DM_EVENT_ALL_ERRORS)))
+ return_0;
+
+ if (dm_event_get_registered_device(dmevh, 0)) {
+ dm_event_handler_destroy(dmevh);
+ return 0;
+ }
+
+ evmask = dm_event_handler_get_event_mask(dmevh);
+ if (evmask & DM_EVENT_REGISTRATION_PENDING) {
+ *pending = 1;
+ evmask &= ~DM_EVENT_REGISTRATION_PENDING;
+ }
+
+ dm_event_handler_destroy(dmevh);
+
+ return evmask;
+}
+
+int target_register_events(struct cmd_context *cmd, const char *dso, struct logical_volume *lv,
+ int evmask __attribute__((unused)), int set, int timeout)
+{
+ char *uuid;
+ struct dm_event_handler *dmevh;
+ int r;
+
+ if (!dso)
+ return_0;
+
+ /* We always monitor the "real" device, never the "snapshot-origin" itself. */
+ if (!(uuid = _build_target_uuid(cmd, lv)))
+ return_0;
+
+ if (!(dmevh = _create_dm_event_handler(cmd, uuid, dso, timeout,
+ DM_EVENT_ALL_ERRORS | (timeout ? DM_EVENT_TIMEOUT : 0))))
+ return_0;
+
+ r = set ? dm_event_register_handler(dmevh) : dm_event_unregister_handler(dmevh);
+
+ dm_event_handler_destroy(dmevh);
+
+ if (!r)
+ return_0;
+
+ log_info("%s %s for events", set ? "Monitored" : "Unmonitored", uuid);
+
return 1;
}
+#endif
+
/*
* Returns 0 if an attempt to (un)monitor the device failed.
* Returns 1 otherwise.
*/
-int monitor_dev_for_events(struct cmd_context *cmd,
- struct logical_volume *lv, int monitor)
+int monitor_dev_for_events(struct cmd_context *cmd, struct logical_volume *lv,
+ const struct lv_activate_opts *laopts, int monitor)
{
#ifdef DMEVENTD
int i, pending = 0, monitored;
struct lv_segment *log_seg;
int (*monitor_fn) (struct lv_segment *s, int e);
uint32_t s;
+ static const struct lv_activate_opts zlaopts = { 0 };
+ static const struct lv_activate_opts thinopts = { .skip_in_use = 1 };
+ struct lvinfo info;
+
+ if (!laopts)
+ laopts = &zlaopts;
/* skip dmeventd code altogether */
if (dmeventd_monitor_mode() == DMEVENTD_MONITOR_IGNORE)
if (monitor && !dmeventd_monitor_mode())
return 1;
+ /*
+ * Allow to unmonitor thin pool via explicit pool unmonitor
+ * or unmonitor before the last thin pool user deactivation
+ * Skip unmonitor, if invoked via unmonitor of thin volume
+ * and there is another thin pool user (open_count > 1)
+ */
+ if (laopts->skip_in_use && lv_info(lv->vg->cmd, lv, 1, &info, 1, 0) &&
+ (info.open_count != 1)) {
+ log_debug("Skipping unmonitor of opened %s (open:%d)",
+ lv->name, info.open_count);
+ return 1;
+ }
+
/*
* In case of a snapshot device, we monitor lv->snapshot->lv,
* not the actual LV itself.
*/
- if (lv_is_cow(lv) && !lv_is_merging_cow(lv))
- return monitor_dev_for_events(cmd, lv->snapshot->lv, monitor);
+ if (lv_is_cow(lv) && (laopts->no_merging || !lv_is_merging_cow(lv)))
+ return monitor_dev_for_events(cmd, lv->snapshot->lv, NULL, monitor);
/*
* In case this LV is a snapshot origin, we instead monitor
- * each of its respective snapshots (the origin itself does
- * not need to be monitored).
- *
- * TODO: This may change when snapshots of mirrors are allowed.
+ * each of its respective snapshots. The origin itself may
+ * also need to be monitored if it is a mirror, for example.
*/
- if (lv_is_origin(lv)) {
+ if (!laopts->origin_only && lv_is_origin(lv))
dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
if (!monitor_dev_for_events(cmd, dm_list_struct_base(snh,
- struct lv_segment, origin_list)->cow, monitor))
+ struct lv_segment, origin_list)->cow, NULL, monitor))
r = 0;
- return r;
- }
/*
* If the volume is mirrored and its log is also mirrored, monitor
if ((seg = first_seg(lv)) != NULL && seg->log_lv != NULL &&
(log_seg = first_seg(seg->log_lv)) != NULL &&
seg_is_mirrored(log_seg))
- if (!monitor_dev_for_events(cmd, seg->log_lv, monitor))
+ if (!monitor_dev_for_events(cmd, seg->log_lv, NULL, monitor))
r = 0;
dm_list_iterate(tmp, &lv->segments) {
for (s = 0; s < seg->area_count; s++) {
if (seg_type(seg, s) != AREA_LV)
continue;
- if (!monitor_dev_for_events(cmd, seg_lv(seg, s),
+ if (!monitor_dev_for_events(cmd, seg_lv(seg, s), NULL,
monitor)) {
log_error("Failed to %smonitor %s",
monitor ? "" : "un",
}
}
+ /*
+ * If requested unmonitoring of thin volume, request test
+ * if there is no other thin pool user
+ *
+ * FIXME: code here looks like _lv_postorder()
+ */
+ if (seg->pool_lv &&
+ !monitor_dev_for_events(cmd, seg->pool_lv,
+ (!monitor) ? &thinopts : NULL, monitor))
+ r = 0;
+
+ if (seg->metadata_lv &&
+ !monitor_dev_for_events(cmd, seg->metadata_lv, NULL, monitor))
+ r = 0;
+
if (!seg_monitored(seg) || (seg->status & PVMOVE))
continue;
if (!monitor_fn)
continue;
- log_verbose("%sonitoring %s/%s", monitor ? "M" : "Not m", lv->vg->name, lv->name);
+ log_verbose("%sonitoring %s/%s%s", monitor ? "M" : "Not m", lv->vg->name, lv->name,
+ test_mode() ? " [Test mode: skipping this]" : "");
+
+ /* FIXME Test mode should really continue a bit further. */
+ if (test_mode())
+ continue;
/* FIXME specify events */
if (!monitor_fn(seg, 0)) {
sleep(1);
}
- r = (monitored && monitor) || (!monitored && !monitor);
+ if (r)
+ r = (monitored && monitor) || (!monitored && !monitor);
}
return r;
#endif
}
+struct detached_lv_data {
+ struct logical_volume *lv_pre;
+ struct lv_activate_opts *laopts;
+ int *flush_required;
+};
+
+static int _preload_detached_lv(struct cmd_context *cmd, struct logical_volume *lv, void *data)
+{
+ struct detached_lv_data *detached = data;
+ struct lv_list *lvl_pre;
+
+ if ((lvl_pre = find_lv_in_vg(detached->lv_pre->vg, lv->name))) {
+ if (lv_is_visible(lvl_pre->lv) && lv_is_active(lv) && (!lv_is_cow(lv) || !lv_is_cow(lvl_pre->lv)) &&
+ !_lv_preload(lvl_pre->lv, detached->laopts, detached->flush_required))
+ return_0;
+ }
+
+ return 1;
+}
+
static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
- int error_if_not_suspended)
+ struct lv_activate_opts *laopts, int error_if_not_suspended)
{
- struct logical_volume *lv = NULL, *lv_pre = NULL;
+ struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
+ struct lv_list *lvl_pre;
+ struct seg_list *sl;
+ struct lv_segment *snap_seg;
struct lvinfo info;
int r = 0, lockfs = 0, flush_required = 0;
+ struct detached_lv_data detached;
if (!activation())
return 1;
if (!(lv_pre = lv_from_lvid(cmd, lvid_s, 1)))
goto_out;
+ /* Ignore origin_only unless LV is origin in both old and new metadata */
+ if (!lv_is_thin_volume(lv) && !(lv_is_origin(lv) && lv_is_origin(lv_pre)))
+ laopts->origin_only = 0;
+
if (test_mode()) {
- _skip("Suspending '%s'.", lv->name);
+ _skip("Suspending %s%s.", lv->name, laopts->origin_only ? " origin without snapshots" : "");
r = 1;
goto out;
}
- if (!lv_info(cmd, lv, &info, 0, 0))
+ if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
goto_out;
if (!info.exists || info.suspended) {
if (!error_if_not_suspended) {
r = 1;
if (info.suspended)
- memlock_inc(cmd);
+ critical_section_inc(cmd, "already suspended");
}
goto out;
}
lv_calculate_readahead(lv, NULL);
- /* If VG was precommitted, preload devices for the LV */
- if ((lv_pre->vg->status & PRECOMMITTED)) {
- if (!_lv_preload(lv_pre, &flush_required)) {
+ /*
+ * Preload devices for the LV.
+ * If the PVMOVE LV is being removed, it's only present in the old
+ * metadata and not the new, so we must explicitly add the new
+ * tables for all the changed LVs here, as the relationships
+ * are not found by walking the new metadata.
+ */
+ if (!(lv_pre->status & LOCKED) &&
+ (lv->status & LOCKED) &&
+ (pvmove_lv = find_pvmove_lv_in_lv(lv))) {
+ /* Preload all the LVs above the PVMOVE LV */
+ dm_list_iterate_items(sl, &pvmove_lv->segs_using_this_lv) {
+ if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, sl->seg->lv->name))) {
+ log_error(INTERNAL_ERROR "LV %s missing from preload metadata", sl->seg->lv->name);
+ goto out;
+ }
+ if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
+ goto_out;
+ }
+ /* Now preload the PVMOVE LV itself */
+ if (!(lvl_pre = find_lv_in_vg(lv_pre->vg, pvmove_lv->name))) {
+ log_error(INTERNAL_ERROR "LV %s missing from preload metadata", pvmove_lv->name);
+ goto out;
+ }
+ if (!_lv_preload(lvl_pre->lv, laopts, &flush_required))
+ goto_out;
+ } else {
+ if (!_lv_preload(lv_pre, laopts, &flush_required))
/* FIXME Revert preloading */
goto_out;
+
+ /*
+ * Search for existing LVs that have become detached and preload them.
+ */
+ detached.lv_pre = lv_pre;
+ detached.laopts = laopts;
+ detached.flush_required = &flush_required;
+
+ if (!for_each_sub_lv(cmd, lv, &_preload_detached_lv, &detached))
+ goto_out;
+
+ /*
+ * Preload any snapshots that are being removed.
+ */
+ if (!laopts->origin_only && lv_is_origin(lv)) {
+ dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
+ if (!(lvl_pre = find_lv_in_vg_by_lvid(lv_pre->vg, &snap_seg->cow->lvid))) {
+ log_error(INTERNAL_ERROR "LV %s (%s) missing from preload metadata",
+ snap_seg->cow->name, snap_seg->cow->lvid.id[1].uuid);
+ goto out;
+ }
+ if (!lv_is_cow(lvl_pre->lv) &&
+ !_lv_preload(lvl_pre->lv, laopts, &flush_required))
+ goto_out;
+ }
}
}
- if (!monitor_dev_for_events(cmd, lv, 0))
+ if (!monitor_dev_for_events(cmd, lv, laopts, 0))
/* FIXME Consider aborting here */
stack;
- memlock_inc(cmd);
+ critical_section_inc(cmd, "suspending");
+ if (pvmove_lv)
+ critical_section_inc(cmd, "suspending pvmove LV");
- if (lv_is_origin(lv_pre) || lv_is_cow(lv_pre))
+ if (!laopts->origin_only &&
+ (lv_is_origin(lv_pre) || lv_is_cow(lv_pre)))
lockfs = 1;
- if (!_lv_suspend_lv(lv, lockfs, flush_required)) {
- memlock_dec(cmd);
- fs_unlock();
- goto out;
+ if (laopts->origin_only && lv_is_thin_volume(lv) && lv_is_thin_volume(lv_pre))
+ lockfs = 1;
+
+ /*
+ * Suspending an LV directly above a PVMOVE LV also
+ * suspends other LVs using that same PVMOVE LV.
+ * FIXME Remove this and delay the 'clear node' until
+ * after the code knows whether there's a different
+ * inactive table to load or not instead so lv_suspend
+ * can be called separately for each LV safely.
+ */
+ if ((lv_pre->vg->status & PRECOMMITTED) &&
+ (lv_pre->status & LOCKED) && find_pvmove_lv_in_lv(lv_pre)) {
+ if (!_lv_suspend_lv(lv_pre, laopts, lockfs, flush_required)) {
+ critical_section_dec(cmd, "failed precommitted suspend");
+ if (pvmove_lv)
+ critical_section_dec(cmd, "failed precommitted suspend (pvmove)");
+ goto_out;
+ }
+ } else {
+ /* Normal suspend */
+ if (!_lv_suspend_lv(lv, laopts, lockfs, flush_required)) {
+ critical_section_dec(cmd, "failed suspend");
+ if (pvmove_lv)
+ critical_section_dec(cmd, "failed suspend (pvmove)");
+ goto_out;
+ }
}
r = 1;
out:
if (lv_pre)
- vg_release(lv_pre->vg);
+ release_vg(lv_pre->vg);
if (lv) {
lv_release_replicator_vgs(lv);
- vg_release(lv->vg);
+ release_vg(lv->vg);
}
return r;
}
-/* Returns success if the device is not active */
-int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s)
+/*
+ * In a cluster, set exclusive to indicate that only one node is using the
+ * device. Any preloaded tables may then use non-clustered targets.
+ *
+ * Returns success if the device is not active
+ */
+int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
{
- return _lv_suspend(cmd, lvid_s, 0);
+ struct lv_activate_opts laopts = {
+ .origin_only = origin_only,
+ .exclusive = exclusive
+ };
+
+ return _lv_suspend(cmd, lvid_s, &laopts, 0);
}
+/* No longer used */
+/***********
int lv_suspend(struct cmd_context *cmd, const char *lvid_s)
{
return _lv_suspend(cmd, lvid_s, 1);
}
+***********/
static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
- int error_if_not_active)
+ struct lv_activate_opts *laopts, int error_if_not_active)
{
struct logical_volume *lv;
struct lvinfo info;
int r = 0;
+ int messages_only = 0;
if (!activation())
return 1;
if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
goto_out;
+ if (lv_is_thin_pool(lv) && laopts->origin_only)
+ messages_only = 1;
+
+ if (!lv_is_origin(lv) && !lv_is_thin_volume(lv))
+ laopts->origin_only = 0;
+
if (test_mode()) {
- _skip("Resuming '%s'.", lv->name);
+ _skip("Resuming %s%s%s.", lv->name, laopts->origin_only ? " without snapshots" : "",
+ laopts->revert ? " (reverting)" : "");
r = 1;
goto out;
}
- if (!lv_info(cmd, lv, &info, 0, 0))
- goto_out;
+ log_debug("Resuming LV %s/%s%s%s%s.", lv->vg->name, lv->name,
+ error_if_not_active ? "" : " if active",
+ laopts->origin_only ? " without snapshots" : "",
+ laopts->revert ? " (reverting)" : "");
- if (!info.exists || !info.suspended) {
- r = error_if_not_active ? 0 : 1;
+ if (!lv_info(cmd, lv, laopts->origin_only, &info, 0, 0))
goto_out;
+
+ if (!info.exists || !(info.suspended || messages_only)) {
+ if (error_if_not_active)
+ goto_out;
+ r = 1;
+ if (!info.suspended)
+ critical_section_dec(cmd, "already resumed");
+ goto out;
}
- if (!_lv_activate_lv(lv))
+ laopts->read_only = _passes_readonly_filter(cmd, lv);
+
+ if (!_lv_activate_lv(lv, laopts))
goto_out;
- memlock_dec(cmd);
- fs_unlock();
+ critical_section_dec(cmd, "resumed");
- if (!monitor_dev_for_events(cmd, lv, 1))
+ if (!monitor_dev_for_events(cmd, lv, laopts, 1))
stack;
r = 1;
out:
if (lv)
- vg_release(lv->vg);
+ release_vg(lv->vg);
return r;
}
-/* Returns success if the device is not active */
-int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s)
+/*
+ * In a cluster, set exclusive to indicate that only one node is using the
+ * device. Any tables loaded may then use non-clustered targets.
+ *
+ * @origin_only
+ * @exclusive This parameter only has an affect in cluster-context.
+ * It forces local target type to be used (instead of
+ * cluster-aware type).
+ * Returns success if the device is not active
+ */
+int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
+ unsigned origin_only, unsigned exclusive,
+ unsigned revert)
{
- return _lv_resume(cmd, lvid_s, 0);
+ struct lv_activate_opts laopts = {
+ .origin_only = origin_only,
+ .exclusive = exclusive,
+ .revert = revert
+ };
+
+ return _lv_resume(cmd, lvid_s, &laopts, 0);
}
-int lv_resume(struct cmd_context *cmd, const char *lvid_s)
+int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
{
- return _lv_resume(cmd, lvid_s, 1);
+ struct lv_activate_opts laopts = { .origin_only = origin_only, };
+
+ return _lv_resume(cmd, lvid_s, &laopts, 1);
}
static int _lv_has_open_snapshots(struct logical_volume *lv)
int r = 0;
dm_list_iterate_items_gen(snap_seg, &lv->snapshot_segs, origin_list) {
- if (!lv_info(lv->vg->cmd, snap_seg->cow, &info, 1, 0)) {
+ if (!lv_info(lv->vg->cmd, snap_seg->cow, 0, &info, 1, 0)) {
r = 1;
continue;
}
goto out;
}
- if (!lv_info(cmd, lv, &info, 1, 0))
+ log_debug("Deactivating %s/%s.", lv->vg->name, lv->name);
+
+ if (!lv_info(cmd, lv, 0, &info, 1, 0))
goto_out;
if (!info.exists) {
}
if (lv_is_visible(lv)) {
- if (info.open_count) {
- log_error("LV %s/%s in use: not deactivating",
- lv->vg->name, lv->name);
- goto out;
- }
+ if (!lv_check_not_in_use(cmd, lv, &info))
+ goto_out;
+
if (lv_is_origin(lv) && _lv_has_open_snapshots(lv))
goto_out;
}
lv_calculate_readahead(lv, NULL);
- if (!monitor_dev_for_events(cmd, lv, 0))
+ if (!monitor_dev_for_events(cmd, lv, NULL, 0))
stack;
- memlock_inc(cmd);
+ critical_section_inc(cmd, "deactivating");
r = _lv_deactivate(lv);
- memlock_dec(cmd);
- fs_unlock();
+ critical_section_dec(cmd, "deactivated");
- if (!lv_info(cmd, lv, &info, 1, 0) || info.exists)
+ if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
r = 0;
out:
if (lv) {
lv_release_replicator_vgs(lv);
- vg_release(lv->vg);
+ release_vg(lv->vg);
}
return r;
goto out;
if (!_passes_activation_filter(cmd, lv)) {
- log_verbose("Not activating %s/%s due to config file settings",
- lv->vg->name, lv->name);
+ log_verbose("Not activating %s/%s since it does not pass "
+ "activation filter.", lv->vg->name, lv->name);
*activate_lv = 0;
} else
*activate_lv = 1;
r = 1;
out:
if (lv)
- vg_release(lv->vg);
+ release_vg(lv->vg);
return r;
}
static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
- int exclusive, int filter)
+ struct lv_activate_opts *laopts, int filter)
{
struct logical_volume *lv;
struct lvinfo info;
goto out;
if (filter && !_passes_activation_filter(cmd, lv)) {
- log_verbose("Not activating %s/%s due to config file settings",
- lv->vg->name, lv->name);
+ log_error("Not activating %s/%s since it does not pass "
+ "activation filter.", lv->vg->name, lv->name);
goto out;
}
goto out;
}
- if (!lv_info(cmd, lv, &info, 0, 0))
+ if (filter)
+ laopts->read_only = _passes_readonly_filter(cmd, lv);
+
+ log_debug("Activating %s/%s%s%s.", lv->vg->name, lv->name,
+ laopts->exclusive ? " exclusively" : "",
+ laopts->read_only ? " read-only" : "");
+
+ if (!lv_info(cmd, lv, 0, &info, 0, 0))
goto_out;
- if (info.exists && !info.suspended && info.live_table) {
+ /*
+ * Nothing to do?
+ */
+ if (info.exists && !info.suspended && info.live_table &&
+ (info.read_only == read_only_lv(lv, laopts))) {
r = 1;
goto out;
}
lv_calculate_readahead(lv, NULL);
- if (exclusive)
- lv->status |= ACTIVATE_EXCL;
-
- memlock_inc(cmd);
- if (!(r = _lv_activate_lv(lv)))
+ critical_section_inc(cmd, "activating");
+ if (!(r = _lv_activate_lv(lv, laopts)))
stack;
- memlock_dec(cmd);
- fs_unlock();
+ critical_section_dec(cmd, "activated");
- if (r && !monitor_dev_for_events(cmd, lv, 1))
+ if (r && !monitor_dev_for_events(cmd, lv, laopts, 1))
stack;
out:
if (lv) {
lv_release_replicator_vgs(lv);
- vg_release(lv->vg);
+ release_vg(lv->vg);
}
return r;
/* Activate LV */
int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
{
- if (!_lv_activate(cmd, lvid_s, exclusive, 0))
+ struct lv_activate_opts laopts = { .exclusive = exclusive };
+
+ if (!_lv_activate(cmd, lvid_s, &laopts, 0))
return_0;
return 1;
/* Activate LV only if it passes filter */
int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
{
- if (!_lv_activate(cmd, lvid_s, exclusive, 1))
+ struct lv_activate_opts laopts = { .exclusive = exclusive };
+
+ if (!_lv_activate(cmd, lvid_s, &laopts, 1))
return_0;
return 1;
int pv_uses_vg(struct physical_volume *pv,
struct volume_group *vg)
{
- if (!activation())
+ if (!activation() || !pv->dev)
return 0;
if (!dm_is_dm_major(MAJOR(pv->dev->dev)))