int only_error_or_zero_target = 1;
int r = 0;
- if (dev_cache_use_dm_devs_cache() &&
+ if (dm_devs_cache_use() &&
/* With cache we can avoid status calls for unusable UUIDs */
- (dm_dev = dev_cache_get_dm_dev_by_devno(cmd, dev->dev)) &&
+ (dm_dev = dm_devs_cache_get_by_devno(cmd, dev->dev)) &&
!_is_usable_uuid(dev, dm_dev->name, dm_dev->uuid, check.check_reserved, check.check_lv, is_lv))
return 0;
const char *uuid;
int r = 0;
- if (dev_cache_use_dm_devs_cache()) {
- if ((dm_dev = dev_cache_get_dm_dev_by_devno(cmd, MKDEV(major, minor)))) {
+ if (dm_devs_cache_use()) {
+ if ((dm_dev = dm_devs_cache_get_by_devno(cmd, MKDEV(major, minor)))) {
dm_strncpy(uuid_buf, dm_dev->uuid, uuid_buf_size);
return 1;
}
dm_strncpy(old_style_dlid, dlid, sizeof(old_style_dlid));
- if (dev_cache_use_dm_devs_cache() &&
- !dev_cache_get_dm_dev_by_uuid(cmd, dlid) &&
- !dev_cache_get_dm_dev_by_uuid(cmd, old_style_dlid)) {
+ if (dm_devs_cache_use() &&
+ !dm_devs_cache_get_by_uuid(cmd, dlid) &&
+ !dm_devs_cache_get_by_uuid(cmd, old_style_dlid)) {
log_debug("Cached as inactive %s.", name);
if (dminfo)
memset(dminfo, 0, sizeof(*dminfo));
if (!(dlid = build_dm_uuid(dm->track_pending_delete ? dm->cmd->pending_delete_mem : dm->mem, lv, layer)))
return_0;
- if (dev_cache_use_dm_devs_cache()) {
- if (!(dm_dev = dev_cache_get_dm_dev_by_uuid(dm->cmd, dlid))) {
+ if (dm_devs_cache_use()) {
+ if (!(dm_dev = dm_devs_cache_get_by_uuid(dm->cmd, dlid))) {
log_debug("Cached as not present %s.", name);
return 1;
}
}
}
- dev_cache_destroy_dm_devs();
+ dm_devs_cache_destroy();
log_debug("Running check command on %s", mpath);
/* Drop any cache before DM table manipulation within locked section
* TODO: check if it makes sense to manage cache within lock */
- dev_cache_destroy_dm_devs();
+ dm_devs_cache_destroy();
dtree = _create_partial_dtree(dm, lv, laopts->origin_only);
return r;
}
-int dev_cache_use_dm_devs_cache(void)
+int dm_devs_cache_use(void)
{
return _cache.use_dm_devs_cache;
}
-void dev_cache_destroy_dm_devs(void)
+void dm_devs_cache_destroy(void)
{
_cache.use_dm_devs_cache = 0;
dm_device_list_destroy(&_cache.dm_devs);
}
-int dev_cache_update_dm_devs(void)
+int dm_devs_cache_update(void)
{
struct dm_active_device *dm_dev;
unsigned devs_features;
uint32_t d;
- dev_cache_destroy_dm_devs();
+ dm_devs_cache_destroy();
if (!get_dm_active_devices(NULL, &_cache.dm_devs, &devs_features))
return 1;
return 1;
}
-void dev_cache_dm_devs_label_invalidate(struct cmd_context *cmd)
+void dm_devs_cache_label_invalidate(struct cmd_context *cmd)
{
struct dm_active_device *dm_dev;
struct device *dev;
/* Find active DM device in devs array for given major:minor */
const struct dm_active_device *
-dev_cache_get_dm_dev_by_devno(struct cmd_context *cmd, dev_t devno)
+dm_devs_cache_get_by_devno(struct cmd_context *cmd, dev_t devno)
{
uint32_t d = _shuffle_devno(devno);
/* Find active DM device in devs array for given DM UUID */
const struct dm_active_device *
-dev_cache_get_dm_dev_by_uuid(struct cmd_context *cmd, const char *dm_uuid)
+dm_devs_cache_get_by_uuid(struct cmd_context *cmd, const char *dm_uuid)
{
if (!_cache.dm_uuids)
return NULL;
vt.num_open);
}
- dev_cache_destroy_dm_devs();
+ dm_devs_cache_destroy();
if (_cache.mem)
dm_pool_destroy(_cache.mem);
struct dm_list *dev_cache_get_dev_list_for_vgid(const char *vgid);
struct dm_list *dev_cache_get_dev_list_for_lvid(const char *lvid);
-int dev_cache_use_dm_devs_cache(void);
-int dev_cache_update_dm_devs(void);
-void dev_cache_destroy_dm_devs(void);
-void dev_cache_dm_devs_label_invalidate(struct cmd_context *cmd);
+/*
+ * The cache of dm devices is enabled when the kernel
+ * supports the ability to quickly report on many dm
+ * devs together, in which case we can get all the dm
+ * info at once and store it in this dm_devs_cache.
+ * This avoids many individual dm dev ioctl calls.
+ * The callers of these dm_devs_cache functions must
+ * have an alternative for when dm_devs_cache_use()
+ * returns 0.
+ */
+int dm_devs_cache_use(void);
+int dm_devs_cache_update(void);
+void dm_devs_cache_destroy(void);
+void dm_devs_cache_label_invalidate(struct cmd_context *cmd);
const struct dm_active_device *
-dev_cache_get_dm_dev_by_devno(struct cmd_context *cmd, dev_t devno);
+dm_devs_cache_get_by_devno(struct cmd_context *cmd, dev_t devno);
const struct dm_active_device *
-dev_cache_get_dm_dev_by_uuid(struct cmd_context *cmd, const char *dm_uuid);
+dm_devs_cache_get_by_uuid(struct cmd_context *cmd, const char *dm_uuid);
/*
* The global device cache.
* here, before processing the hints file, so that the dm uuid checks
* in hint processing can benefit from the dm uuid cache.)
*/
- if (!dev_cache_update_dm_devs())
+ if (!dm_devs_cache_update())
return_0;
/*
log_debug("Invalidating devs for any PVs on LVs.");
- if (dev_cache_use_dm_devs_cache())
- dev_cache_dm_devs_label_invalidate(cmd);
+ if (dm_devs_cache_use())
+ dm_devs_cache_label_invalidate(cmd);
else {
dm_list_iterate_items(lvl, lvs)
label_scan_invalidate_lv(cmd, lvl->lv);
int sync_local_dev_names(struct cmd_context* cmd)
{
- dev_cache_destroy_dm_devs();
+ dm_devs_cache_destroy();
memlock_unlock(cmd);
fs_unlock();
return 1;