}
/* Does the config file want us to activate this LV ? */
- if (!lv_activation_filter(cmd, resource, &activate_lv))
+ if (!lv_activation_filter(cmd, resource, &activate_lv, NULL))
return EIO;
if (!activate_lv)
if (lvi.suspended) {
critical_section_inc(cmd, "resuming");
- if (!lv_resume(cmd, resource, 0)) {
+ if (!lv_resume(cmd, resource, 0, NULL)) {
critical_section_dec(cmd, "resumed");
goto error;
}
}
/* Now activate it */
- if (!lv_activate(cmd, resource, exclusive))
+ if (!lv_activate(cmd, resource, exclusive, NULL))
goto error;
return 0;
exclusive = (oldmode == LCK_EXCL) ? 1 : 0;
revert = (lock_flags & LCK_REVERT_MODE) ? 1 : 0;
- if (!lv_resume_if_active(cmd, resource, origin_only, exclusive, revert))
+ if (!lv_resume_if_active(cmd, resource, origin_only, exclusive, revert, NULL))
return EIO;
return 0;
exclusive = (oldmode == LCK_EXCL) ? 1 : 0;
/* Always call lv_suspend to read commited and precommited data */
- if (!lv_suspend_if_active(cmd, resource, origin_only, exclusive))
+ if (!lv_suspend_if_active(cmd, resource, origin_only, exclusive, NULL))
return EIO;
return 0;
return 0; /* We don't need to do anything */
}
- if (!lv_deactivate(cmd, resource))
+ if (!lv_deactivate(cmd, resource, NULL))
return EIO;
if (command & LCK_CLUSTER_VG) {
}
static int _lv_suspend(struct cmd_context *cmd, const char *lvid_s,
- struct lv_activate_opts *laopts, int error_if_not_suspended)
+ struct lv_activate_opts *laopts, int error_if_not_suspended,
+ struct logical_volume *lv)
{
- struct logical_volume *lv = NULL, *lv_pre = NULL, *pvmove_lv = NULL;
+ struct logical_volume *lv_pre = NULL, *pvmove_lv = NULL, *lv_to_free = NULL;
struct lv_list *lvl_pre;
struct seg_list *sl;
struct lv_segment *snap_seg;
if (!activation())
return 1;
- if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
+ if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto_out;
/* Use precommitted metadata if present */
out:
if (lv_pre)
release_vg(lv_pre->vg);
- if (lv) {
- lv_release_replicator_vgs(lv);
- release_vg(lv->vg);
+ if (lv_to_free) {
+ lv_release_replicator_vgs(lv_to_free);
+ release_vg(lv_to_free->vg);
}
return r;
*
* Returns success if the device is not active
*/
-int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive)
+int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive, struct logical_volume *lv)
{
struct lv_activate_opts laopts = {
.origin_only = origin_only,
.exclusive = exclusive
};
- return _lv_suspend(cmd, lvid_s, &laopts, 0);
+ return _lv_suspend(cmd, lvid_s, &laopts, 0, lv);
}
/* No longer used */
***********/
static int _lv_resume(struct cmd_context *cmd, const char *lvid_s,
- struct lv_activate_opts *laopts, int error_if_not_active)
+ struct lv_activate_opts *laopts, int error_if_not_active,
+ struct logical_volume *lv)
{
- struct logical_volume *lv;
+ struct logical_volume *lv_to_free = NULL;
struct lvinfo info;
int r = 0;
int messages_only = 0;
if (!activation())
return 1;
- if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
+ if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto_out;
if (lv_is_thin_pool(lv) && laopts->origin_only)
r = 1;
out:
- if (lv)
- release_vg(lv->vg);
+ if (lv_to_free)
+ release_vg(lv_to_free->vg);
return r;
}
*/
int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
unsigned origin_only, unsigned exclusive,
- unsigned revert)
+ unsigned revert, struct logical_volume *lv)
{
struct lv_activate_opts laopts = {
.origin_only = origin_only,
.revert = revert
};
- return _lv_resume(cmd, lvid_s, &laopts, 0);
+ return _lv_resume(cmd, lvid_s, &laopts, 0, lv);
}
-int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only)
+int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, struct logical_volume *lv)
{
struct lv_activate_opts laopts = { .origin_only = origin_only, };
- return _lv_resume(cmd, lvid_s, &laopts, 1);
+ return _lv_resume(cmd, lvid_s, &laopts, 1, lv);
}
static int _lv_has_open_snapshots(struct logical_volume *lv)
return r;
}
-int lv_deactivate(struct cmd_context *cmd, const char *lvid_s)
+int lv_deactivate(struct cmd_context *cmd, const char *lvid_s, struct logical_volume *lv)
{
- struct logical_volume *lv;
+ struct logical_volume *lv_to_free = NULL;
struct lvinfo info;
int r = 0;
if (!activation())
return 1;
- if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
+ if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto out;
if (test_mode()) {
if (!lv_info(cmd, lv, 0, &info, 0, 0) || info.exists)
r = 0;
out:
- if (lv) {
- lv_release_replicator_vgs(lv);
- release_vg(lv->vg);
+ if (lv_to_free) {
+ lv_release_replicator_vgs(lv_to_free);
+ release_vg(lv_to_free->vg);
}
return r;
/* Test if LV passes filter */
int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
- int *activate_lv)
+ int *activate_lv, struct logical_volume *lv)
{
- struct logical_volume *lv;
+ struct logical_volume *lv_to_free = NULL;
int r = 0;
if (!activation()) {
return 1;
}
- if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
+ if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto out;
if (!_passes_activation_filter(cmd, lv)) {
*activate_lv = 1;
r = 1;
out:
- if (lv)
- release_vg(lv->vg);
+ if (lv_to_free)
+ release_vg(lv_to_free->vg);
return r;
}
static int _lv_activate(struct cmd_context *cmd, const char *lvid_s,
- struct lv_activate_opts *laopts, int filter)
+ struct lv_activate_opts *laopts, int filter,
+ struct logical_volume *lv)
{
- struct logical_volume *lv;
+ struct logical_volume *lv_to_free = NULL;
struct lvinfo info;
int r = 0;
if (!activation())
return 1;
- if (!(lv = lv_from_lvid(cmd, lvid_s, 0)))
+ if (!lv && !(lv_to_free = lv = lv_from_lvid(cmd, lvid_s, 0)))
goto out;
if (filter && !_passes_activation_filter(cmd, lv)) {
stack;
out:
- if (lv) {
- lv_release_replicator_vgs(lv);
- release_vg(lv->vg);
+ if (lv_to_free) {
+ lv_release_replicator_vgs(lv_to_free);
+ release_vg(lv_to_free->vg);
}
return r;
}
/* Activate LV */
-int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive)
+int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive, struct logical_volume *lv)
{
struct lv_activate_opts laopts = { .exclusive = exclusive };
- if (!_lv_activate(cmd, lvid_s, &laopts, 0))
+ if (!_lv_activate(cmd, lvid_s, &laopts, 0, lv))
return_0;
return 1;
}
/* Activate LV only if it passes filter */
-int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive)
+int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s, int exclusive, struct logical_volume *lv)
{
struct lv_activate_opts laopts = { .exclusive = exclusive };
- if (!_lv_activate(cmd, lvid_s, &laopts, 1))
+ if (!_lv_activate(cmd, lvid_s, &laopts, 1, lv))
return_0;
return 1;
void activation_exit(void);
/* int lv_suspend(struct cmd_context *cmd, const char *lvid_s); */
-int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive);
-int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only);
+int lv_suspend_if_active(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, unsigned exclusive, struct logical_volume *lv);
+int lv_resume(struct cmd_context *cmd, const char *lvid_s, unsigned origin_only, struct logical_volume *lv);
int lv_resume_if_active(struct cmd_context *cmd, const char *lvid_s,
- unsigned origin_only, unsigned exclusive, unsigned revert);
-int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive);
+ unsigned origin_only, unsigned exclusive, unsigned revert, struct logical_volume *lv);
+int lv_activate(struct cmd_context *cmd, const char *lvid_s, int exclusive, struct logical_volume *lv);
int lv_activate_with_filter(struct cmd_context *cmd, const char *lvid_s,
- int exclusive);
-int lv_deactivate(struct cmd_context *cmd, const char *lvid_s);
+ int exclusive, struct logical_volume *lv);
+int lv_deactivate(struct cmd_context *cmd, const char *lvid_s, struct logical_volume *lv);
int lv_mknodes(struct cmd_context *cmd, const struct logical_volume *lv);
* Returns 1 if activate_lv has been set: 1 = activate; 0 = don't.
*/
int lv_activation_filter(struct cmd_context *cmd, const char *lvid_s,
- int *activate_lv);
+ int *activate_lv, struct logical_volume *lv);
/*
* Checks against the auto_activation_volume_list and
* returns 1 if the LV should be activated, 0 otherwise.
/* API entry point for LVM */
#ifdef CLUSTER_LOCKING_INTERNAL
static int _lock_resource(struct cmd_context *cmd, const char *resource,
- uint32_t flags)
+ uint32_t flags, struct logical_volume *lv __attribute__((unused)))
#else
-int lock_resource(struct cmd_context *cmd, const char *resource, uint32_t flags)
+ int lock_resource(struct cmd_context *cmd, const char *resource, uint32_t flags, struct logical_volume *lv __attribute__((unused)))
#endif
{
char lockname[PATH_MAX];
static int (*_lock_query_fn) (const char *resource, int *mode) = NULL;
static int _lock_resource(struct cmd_context *cmd, const char *resource,
- uint32_t flags)
+ uint32_t flags, struct logical_volume *lv __attribute__((unused)))
{
if (!_lock_fn)
return 0;
}
static int _file_lock_resource(struct cmd_context *cmd, const char *resource,
- uint32_t flags)
+ uint32_t flags, struct logical_volume *lv)
{
char lockfile[PATH_MAX];
unsigned origin_only = (flags & LCK_ORIGIN_ONLY) ? 1 : 0;
switch (flags & LCK_TYPE_MASK) {
case LCK_UNLOCK:
log_very_verbose("Unlocking LV %s%s%s", resource, origin_only ? " without snapshots" : "", revert ? " (reverting)" : "");
- if (!lv_resume_if_active(cmd, resource, origin_only, 0, revert))
+ if (!lv_resume_if_active(cmd, resource, origin_only, 0, revert, NULL))
return 0;
break;
case LCK_NULL:
log_very_verbose("Locking LV %s (NL)", resource);
- if (!lv_deactivate(cmd, resource))
+ if (!lv_deactivate(cmd, resource, NULL))
return 0;
break;
case LCK_READ:
log_very_verbose("Locking LV %s (R)", resource);
- if (!lv_activate_with_filter(cmd, resource, 0))
+ if (!lv_activate_with_filter(cmd, resource, 0, NULL))
return 0;
break;
case LCK_PREAD:
break;
case LCK_WRITE:
log_very_verbose("Locking LV %s (W)%s", resource, origin_only ? " without snapshots" : "");
- if (!lv_suspend_if_active(cmd, resource, origin_only, 0))
+ if (!lv_suspend_if_active(cmd, resource, origin_only, 0, NULL))
return 0;
break;
case LCK_EXCL:
log_very_verbose("Locking LV %s (EX)", resource);
- if (!lv_activate_with_filter(cmd, resource, 1))
+ if (!lv_activate_with_filter(cmd, resource, 1, NULL))
return 0;
break;
default:
* FIXME This should become VG uuid.
*/
static int _lock_vol(struct cmd_context *cmd, const char *resource,
- uint32_t flags, lv_operation_t lv_op)
+ uint32_t flags, lv_operation_t lv_op, struct logical_volume *lv)
{
uint32_t lck_type = flags & LCK_TYPE_MASK;
uint32_t lck_scope = flags & LCK_SCOPE_MASK;
return 0;
}
- if ((ret = _locking.lock_resource(cmd, resource, flags))) {
+ if ((ret = _locking.lock_resource(cmd, resource, flags, lv))) {
if (lck_scope == LCK_VG && !(flags & LCK_CACHE)) {
if (lck_type != LCK_UNLOCK)
lvmcache_lock_vgname(resource, lck_type == LCK_READ);
return ret;
}
-int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags)
+int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, struct logical_volume *lv)
{
char resource[258] __attribute__((aligned(8)));
lv_operation_t lv_op;
strncpy(resource, vol, sizeof(resource) - 1);
resource[sizeof(resource) - 1] = '\0';
- if (!_lock_vol(cmd, resource, flags, lv_op))
+ if (!_lock_vol(cmd, resource, flags, lv_op, lv))
return_0;
/*
(flags & (LCK_CACHE | LCK_HOLD)))
return 1;
- if (!_lock_vol(cmd, resource, (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op))
+ if (!_lock_vol(cmd, resource, (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op, lv))
return_0;
return 1;
{
memlock_unlock(cmd);
- return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC_LOCAL);
+ return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC_LOCAL, NULL);
}
int sync_dev_names(struct cmd_context* cmd)
{
memlock_unlock(cmd);
- return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC);
+ return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC, NULL);
}
#include "uuid.h"
#include "config.h"
+struct logical_volume;
+
int init_locking(int type, struct cmd_context *cmd, int suppress_messages);
void fin_locking(void);
void reset_locking(void);
* Lock/unlock an individual logical volume
* char *vol holds lvid
*/
-int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags);
+int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, struct logical_volume *lv);
/*
* Internal locking representation.
#define lock_lv_vol(cmd, lv, flags) \
(find_replicator_vgs((lv)) ? \
- lock_vol(cmd, (lv)->lvid.s, flags | LCK_LV_CLUSTERED(lv)) : \
+ lock_vol(cmd, (lv)->lvid.s, flags | LCK_LV_CLUSTERED(lv), lv) : \
0)
#define unlock_vg(cmd, vol) \
do { \
if (is_real_vg(vol)) \
sync_dev_names(cmd); \
- (void) lock_vol(cmd, vol, LCK_VG_UNLOCK); \
+ (void) lock_vol(cmd, vol, LCK_VG_UNLOCK, NULL); \
} while (0)
#define unlock_and_release_vg(cmd, vg, vol) \
do { \
#define deactivate_lv_local(cmd, lv) \
lock_lv_vol(cmd, lv, LCK_LV_DEACTIVATE | LCK_LOCAL)
#define drop_cached_metadata(vg) \
- lock_vol((vg)->cmd, (vg)->name, LCK_VG_DROP_CACHE)
+ lock_vol((vg)->cmd, (vg)->name, LCK_VG_DROP_CACHE, NULL)
#define remote_commit_cached_metadata(vg) \
- lock_vol((vg)->cmd, (vg)->name, LCK_VG_COMMIT)
+ lock_vol((vg)->cmd, (vg)->name, LCK_VG_COMMIT, NULL)
#define remote_revert_cached_metadata(vg) \
- lock_vol((vg)->cmd, (vg)->name, LCK_VG_REVERT)
+ lock_vol((vg)->cmd, (vg)->name, LCK_VG_REVERT, NULL)
#define remote_backup_metadata(vg) \
- lock_vol((vg)->cmd, (vg)->name, LCK_VG_BACKUP)
+ lock_vol((vg)->cmd, (vg)->name, LCK_VG_BACKUP, NULL)
int sync_local_dev_names(struct cmd_context* cmd);
int sync_dev_names(struct cmd_context* cmd);
#include "config.h"
typedef int (*lock_resource_fn) (struct cmd_context * cmd, const char *resource,
- uint32_t flags);
+ uint32_t flags, struct logical_volume *lv);
typedef int (*query_resource_fn) (const char *resource, int *mode);
typedef void (*fin_lock_fn) (void);
}
static int _no_lock_resource(struct cmd_context *cmd, const char *resource,
- uint32_t flags)
+ uint32_t flags, struct logical_volume *lv)
{
switch (flags & LCK_SCOPE_MASK) {
case LCK_VG:
case LCK_LV:
switch (flags & LCK_TYPE_MASK) {
case LCK_NULL:
- return lv_deactivate(cmd, resource);
+ return lv_deactivate(cmd, resource, lv);
case LCK_UNLOCK:
- return lv_resume_if_active(cmd, resource, (flags & LCK_ORIGIN_ONLY) ? 1: 0, 0, (flags & LCK_REVERT) ? 1 : 0);
+ return lv_resume_if_active(cmd, resource, (flags & LCK_ORIGIN_ONLY) ? 1: 0, 0, (flags & LCK_REVERT) ? 1 : 0, NULL);
case LCK_READ:
- return lv_activate_with_filter(cmd, resource, 0);
+ return lv_activate_with_filter(cmd, resource, 0, NULL);
case LCK_WRITE:
- return lv_suspend_if_active(cmd, resource, (flags & LCK_ORIGIN_ONLY) ? 1 : 0, 0);
+ return lv_suspend_if_active(cmd, resource, (flags & LCK_ORIGIN_ONLY) ? 1 : 0, 0, lv);
case LCK_EXCL:
- return lv_activate_with_filter(cmd, resource, 1);
+ return lv_activate_with_filter(cmd, resource, 1, NULL);
default:
break;
}
static int _readonly_lock_resource(struct cmd_context *cmd,
const char *resource,
- uint32_t flags)
+ uint32_t flags, struct logical_volume *lv)
{
if ((flags & LCK_TYPE_MASK) == LCK_WRITE &&
(flags & LCK_SCOPE_MASK) == LCK_VG &&
return 0;
}
- return _no_lock_resource(cmd, resource, flags);
+ return _no_lock_resource(cmd, resource, flags, lv);
}
int init_no_locking(struct locking_type *locking, struct cmd_context *cmd __attribute__((unused)),
struct pv_list *pvl;
int ret = 1;
- if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return 0;
}
dev_close_all();
- if (!lock_vol(cmd, vg_name, LCK_VG_WRITE))
+ if (!lock_vol(cmd, vg_name, LCK_VG_WRITE, NULL))
return_NULL;
if (!(vg = vg_read_internal(cmd, vg_name, vgid, 1, &consistent)))
already_locked = lvmcache_vgname_is_locked(vg_name);
if (!already_locked && !(misc_flags & READ_WITHOUT_LOCK) &&
- !lock_vol(cmd, vg_name, lock_flags)) {
+ !lock_vol(cmd, vg_name, lock_flags, NULL)) {
log_error("Can't get lock for %s", vg_name);
return _vg_make_handle(cmd, vg, FAILED_LOCKING);
}
*/
uint32_t vg_lock_newname(struct cmd_context *cmd, const char *vgname)
{
- if (!lock_vol(cmd, vgname, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, vgname, LCK_VG_WRITE, NULL)) {
return FAILED_LOCKING;
}
if (!vg_check_write_mode(vg))
return -1;
- if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return -1;
}
}
if (! dm_list_empty(&vg->removed_pvs)) {
- if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(vg->cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return 0;
}
lp->wait_completion);
/* use LCK_VG_WRITE to match lvconvert()'s READ_FOR_UPDATE */
- if (!lock_vol(cmd, vg_name, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, vg_name, LCK_VG_WRITE, NULL)) {
log_error("ABORTING: Can't relock VG for %s "
"after polling finished", vg_name);
ret = ECMD_FAILED;
* take the lock here, pvs with 0 mdas in a non-orphan VG will
* be processed twice.
*/
- if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_WRITE, NULL)) {
log_error("Unable to obtain global lock.");
return ECMD_FAILED;
}
}
for (i = 0; i < argc; i++) {
- if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return ECMD_FAILED;
}
struct device *dev;
int ret = ECMD_FAILED;
- if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return ECMD_FAILED;
}
int vg_needs_pv_write = 0;
if (is_orphan_vg(vg_name)) {
- if (!lock_vol(cmd, vg_name, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, vg_name, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphans");
return 0;
}
return EINVALID_CMD_LINE;
}
- if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_READ)) {
+ if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_READ, NULL)) {
log_error("Unable to obtain global lock.");
return ECMD_FAILED;
}
arg_count(cmd, exported_ARG) ?
"of exported volume group(s)" : "in no volume group");
- if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_WRITE, NULL)) {
log_error("Unable to obtain global lock.");
return ECMD_FAILED;
}
dm_list_init(&tags);
- if (lock_global && !lock_vol(cmd, VG_GLOBAL, LCK_VG_READ)) {
+ if (lock_global && !lock_vol(cmd, VG_GLOBAL, LCK_VG_READ, NULL)) {
log_error("Unable to obtain global lock.");
return ECMD_FAILED;
}
lvmcache_seed_infos_from_lvmetad(cmd);
- if (!lock_vol(cmd, vg_name, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, vg_name, LCK_VG_WRITE, NULL)) {
log_error("Unable to lock volume group %s", vg_name);
return ECMD_FAILED;
}
- if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Unable to lock orphans");
unlock_vg(cmd, vg_name);
return ECMD_FAILED;
!vg_set_mda_copies(vg, vp_new.vgmetadatacopies))
goto bad_orphan;
- if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
goto bad_orphan;
}
goto bad;
}
} else { /* no --restore, normal vgextend */
- if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
unlock_and_release_vg(cmd, vg, vg_name);
return ECMD_FAILED;
return ECMD_FAILED;
}
- if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_ORPHANS, LCK_VG_WRITE, NULL)) {
log_error("Can't get lock for orphan PVs");
return ECMD_FAILED;
}
return EINVALID_CMD_LINE;
}
- if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_WRITE)) {
+ if (!lock_vol(cmd, VG_GLOBAL, LCK_VG_WRITE, NULL)) {
log_error("Unable to obtain global lock.");
return ECMD_FAILED;
}