Introduce sync_local_dev_names and CLVMD_CMD_SYNC_NAMES to issue fs_unlock.
Version 2.02.81 -
===================================
+ Replace fs_unlock by sync_local_dev_names to notify local clvmd. (2.02.80)
+ Introduce sync_local_dev_names and CLVMD_CMD_SYNC_NAMES to issue fs_unlock.
Accept fusion fio in device type filter.
Add disk to mirrored log type conversion.
#define CLVMD_CMD_SET_DEBUG 42
#define CLVMD_CMD_VG_BACKUP 43
#define CLVMD_CMD_RESTART 44
+#define CLVMD_CMD_SYNC_NAMES 45
#endif
do_refresh_cache();
break;
+ case CLVMD_CMD_SYNC_NAMES:
+ lvm_do_fs_unlock();
+ break;
+
case CLVMD_CMD_SET_DEBUG:
debug = args[0];
break;
case CLVMD_CMD_GET_CLUSTERNAME:
case CLVMD_CMD_SET_DEBUG:
case CLVMD_CMD_VG_BACKUP:
+ case CLVMD_CMD_SYNC_NAMES:
case CLVMD_CMD_LOCK_QUERY:
case CLVMD_CMD_RESTART:
break;
case CLVMD_CMD_LOCK_VG:
case CLVMD_CMD_VG_BACKUP:
+ case CLVMD_CMD_SYNC_NAMES:
case CLVMD_CMD_LOCK_QUERY:
/* Nothing to do here */
break;
void lvm_do_fs_unlock(void)
{
pthread_mutex_lock(&lvm_lock);
+ DEBUGLOG("Syncing device names\n");
fs_unlock();
pthread_mutex_unlock(&lvm_lock);
}
void fs_unlock(void)
{
if (!memlock()) {
+ log_debug("Syncing device names");
/* Wait for all processed udev devices */
if (!dm_udev_wait(_fs_cookie))
stack;
* locks are cluster-wide.
* Also, if the lock is exclusive it makes no sense to try to
* acquire it on all nodes, so just do that on the local node too.
- * One exception, is that P_ locks /do/ get distributed across
- * the cluster because they might have side-effects.
+ * One exception, is that P_ locks (except VG_SYNC_NAMES) /do/ get
+ * distributed across the cluster because they might have side-effects.
*/
- if (strncmp(name, "P_", 2) &&
- (clvmd_cmd == CLVMD_CMD_LOCK_VG ||
- (flags & LCK_TYPE_MASK) == LCK_EXCL ||
- (flags & LCK_LOCAL) ||
- !(flags & LCK_CLUSTER_VG)))
+ if ((strncmp(name, "P_", 2) &&
+ (clvmd_cmd == CLVMD_CMD_LOCK_VG ||
+ (flags & LCK_TYPE_MASK) == LCK_EXCL ||
+ (flags & LCK_LOCAL) ||
+ !(flags & LCK_CLUSTER_VG))) ||
+ (clvmd_cmd == CLVMD_CMD_SYNC_NAMES && (flags & LCK_LOCAL)))
node = ".";
status = _cluster_request(clvmd_cmd, node, args, len,
switch (flags & LCK_SCOPE_MASK) {
case LCK_VG:
+ if (!strcmp(resource, VG_SYNC_NAMES)) {
+ log_very_verbose("Requesting sync names.");
+ return _lock_for_cluster(cmd, CLVMD_CMD_SYNC_NAMES,
+ flags & ~LCK_HOLD, resource);
+ }
if (flags == LCK_VG_BACKUP) {
log_very_verbose("Requesting backup of VG metadata for %s",
resource);
if (strcmp(resource, VG_GLOBAL))
lvmcache_drop_metadata(resource, 0);
+ if (!strcmp(resource, VG_SYNC_NAMES))
+ fs_unlock();
+
/* LCK_CACHE does not require a real lock */
if (flags & LCK_CACHE)
break;
char path[PATH_MAX];
/* We'll allow operations on orphans */
- if (is_orphan_vg(vgname) || is_global_vg(vgname))
+ if (!is_real_vg(vgname))
return 1;
/* LVM1 is only present in 2.4 kernels. */
*/
#define VG_ORPHANS "#orphans"
#define VG_GLOBAL "#global"
+#define VG_SYNC_NAMES "#sync_names"
/*
* Common combinations
lock_vol((vg)->cmd, (vg)->name, LCK_VG_REVERT)
#define remote_backup_metadata(vg) \
lock_vol((vg)->cmd, (vg)->name, LCK_VG_BACKUP)
+#define sync_local_dev_names(cmd) \
+ lock_vol(cmd, VG_SYNC_NAMES, LCK_NONE | LCK_CACHE | LCK_LOCAL)
/* Process list of LVs */
int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs);
{
switch (flags & LCK_SCOPE_MASK) {
case LCK_VG:
+ if (!strcmp(resource, VG_SYNC_NAMES))
+ fs_unlock();
break;
case LCK_LV:
switch (flags & LCK_TYPE_MASK) {
return 0;
}
- fs_unlock(); /* Wait until devices are available */
+ sync_local_dev_names(cmd); /* Wait until devices are available */
log_verbose("Clearing start of logical volume \"%s\"", lv->name);
const char *lv_name);
int is_global_vg(const char *vg_name);
int is_orphan_vg(const char *vg_name);
+int is_real_vg(const char *vg_name);
int vg_missing_pv_count(const struct volume_group *vg);
int vgs_are_compatible(struct cmd_context *cmd,
struct volume_group *vg_from,
return (vg_name && !strncmp(vg_name, ORPHAN_PREFIX, sizeof(ORPHAN_PREFIX) - 1)) ? 1 : 0;
}
+/*
+ * Exclude pseudo VG names used for locking.
+ */
+int is_real_vg(const char *vg_name)
+{
+ return (vg_name && *vg_name != '#');
+}
+
/*
* Returns:
* 0 - fail
log_verbose("Executing: %s", _verbose_args(argv, buf, sizeof(buf)));
- fs_unlock(); /* Flush oops and ensure cookie is not shared */
+ sync_local_dev_names(cmd); /* Flush ops and reset dm cookie */
if ((pid = fork()) == -1) {
log_error("fork failed: %s", strerror(errno));
(void) dm_prepare_selinux_context(path, S_IFBLK);
old_mask = umask(0);
if (mknod(path, S_IFBLK | mode, dev) < 0) {
- log_error("Unable to make device node for '%s'", dev_name);
+ log_error("%s: mknod for %s failed: %s", path, dev_name, strerror(errno));
umask(old_mask);
(void) dm_prepare_selinux_context(NULL, 0);
return 0;
sigaction(SIGCHLD, &act, NULL);
- fs_unlock(); /* Flush oops and ensure cookie is not shared */
+ sync_local_dev_names(cmd); /* Flush ops and reset dm cookie */
if ((pid = fork()) == -1) {
log_error("fork failed: %s", strerror(errno));