# range from 32(kiB) to 1048576 in multiples of 32.
# cache_pool_chunk_size = 64
+ # Specify the default cache mode used for new cache pools.
+ # Possible options are:
+ # "writethrough" - Data blocks are immediately written from
+ # the cache to disk.
+ # "writeback" - Data blocks are written from the cache
+ # back to disk after some delay to improve
+ # performance.
+ # cache_pool_cachemode = "writethrough"
+
# Set to 1 to guarantee that thin pool metadata will always
# be placed on different PVs from the pool data.
thin_pool_metadata_require_separate_pvs = 0
cfg(allocation_wipe_signatures_when_zeroing_new_lvs_CFG, "wipe_signatures_when_zeroing_new_lvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, 1, vsn(2, 2, 105), NULL)
cfg(allocation_mirror_logs_require_separate_pvs_CFG, "mirror_logs_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_MIRROR_LOGS_REQUIRE_SEPARATE_PVS, vsn(2, 2, 85), NULL)
cfg(allocation_cache_pool_metadata_require_separate_pvs_CFG, "cache_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_CACHE_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 106), NULL)
+cfg(allocation_cache_pool_cachemode_CFG, "cache_pool_cachemode", allocation_CFG_SECTION, 0, CFG_TYPE_STRING, DEFAULT_CACHE_POOL_CACHEMODE, vsn(2, 2, 113), NULL)
cfg_runtime(allocation_cache_pool_chunk_size_CFG, "cache_pool_chunk_size", allocation_CFG_SECTION, CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 106), NULL)
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL)
cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL)
#define DEFAULT_CACHE_POOL_CHUNK_SIZE 64 /* KB */
#define DEFAULT_CACHE_POOL_MIN_METADATA_SIZE 2048 /* KB */
#define DEFAULT_CACHE_POOL_MAX_METADATA_SIZE (16 * 1024 * 1024) /* KB */
+#define DEFAULT_CACHE_POOL_CACHEMODE "writethrough"
#define DEFAULT_UMASK 0077
#include "activate.h"
#include "defaults.h"
+const char *get_cachepool_cachemode_name(const struct lv_segment *seg)
+{
+ if (seg->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
+ return "writeback";
+
+ if (seg->feature_flags & DM_CACHE_FEATURE_WRITETHROUGH)
+ return "writethrough";
+
+ return "unknown";
+}
+
int update_cache_pool_params(struct volume_group *vg, unsigned attr,
int passed_args, uint32_t data_extents,
uint64_t *pool_metadata_size,
return dm_pool_strdup(mem, get_pool_discards_name(seg->discards));
}
+char *lvseg_cachemode_dup(struct dm_pool *mem, const struct lv_segment *seg)
+{
+ return dm_pool_strdup(mem, get_cachepool_cachemode_name(seg));
+}
+
#ifdef DMEVENTD
# include "libdevmapper-event.h"
#endif
uint64_t lvseg_chunksize(const struct lv_segment *seg);
char *lvseg_segtype_dup(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_discards_dup(struct dm_pool *mem, const struct lv_segment *seg);
+char *lvseg_cachemode_dup(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_monitor_dup(struct dm_pool *mem, const struct lv_segment *seg);
char *lvseg_tags_dup(const struct lv_segment *seg);
char *lvseg_devices(struct dm_pool *mem, const struct lv_segment *seg);
/* -- metadata/raid_manip.c */
/* ++ metadata/cache_manip.c */
+const char *get_cachepool_cachemode_name(const struct lv_segment *seg);
int update_cache_pool_params(struct volume_group *vg, unsigned attr,
int passed_args, uint32_t data_extents,
uint64_t *pool_metadata_size,
FIELD(SEGS, seg, SIZ, "Chunk", list, 5, chunksize, chunk_size, "For snapshots, the unit of data used when tracking changes.", 0)
FIELD(SEGS, seg, NUM, "#Thins", list, 4, thincount, thin_count, "For thin pools, the number of thin volumes in this pool.", 0)
FIELD(SEGS, seg, STR, "Discards", list, 8, discards, discards, "For thin pools, how discards are handled.", 0)
+FIELD(SEGS, seg, STR, "Cachemode", list, 9, cachemode, cachemode, "For cache pools, how writes are cached.", 0)
FIELD(SEGS, seg, BIN, "Zero", list, 4, thinzero, zero, "For thin pools, if zeroing is enabled.", 0)
FIELD(SEGS, seg, NUM, "TransId", list, 4, transactionid, transaction_id, "For thin pools, the transaction id.", 0)
FIELD(SEGS, seg, NUM, "ThId", list, 4, thinid, thin_id, "For thin volume, the thin device id.", 0)
#define _thin_id_set prop_not_implemented_set
GET_LVSEG_STR_PROPERTY_FN(discards, lvseg_discards_dup(lvseg->lv->vg->vgmem, lvseg))
#define _discards_set prop_not_implemented_set
+GET_LVSEG_STR_PROPERTY_FN(cachemode, lvseg_cachemode_dup(lvseg->lv->vg->vgmem, lvseg))
+#define _cachemode_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(seg_start, (SECTOR_SIZE * lvseg_start(lvseg)))
#define _seg_start_set prop_not_implemented_set
GET_LVSEG_NUM_PROPERTY_FN(seg_start_pe, lvseg->le)
return _field_set_value(field, "", NULL);
}
+static int _cachemode_disp(struct dm_report *rh, struct dm_pool *mem,
+ struct dm_report_field *field,
+ const void *data, void *private)
+{
+ const struct lv_segment *seg = (const struct lv_segment *) data;
+ const char *cachemode_str;
+
+ if (seg_is_cache(seg))
+ seg = first_seg(seg->pool_lv);
+
+ if (seg_is_cache_pool(seg)) {
+ cachemode_str = get_cachepool_cachemode_name(seg);
+ return dm_report_field_string(rh, field, &cachemode_str);
+ }
+
+ return _field_set_value(field, "", NULL);
+}
+
static int _originsize_disp(struct dm_report *rh, struct dm_pool *mem,
struct dm_report_field *field,
const void *data, void *private)
allocation policy:
.IP
cling_tag_list = [ "@site1", "@site2" ]
+.IP
+\fBcache_pool_cachemode\fP \(em Cache mode for new cache pools.
+.IP
+This is the default cache mode a new cache pool will be given.
+Valid cache modes are:
+\fBwritethrough\fP - Data blocks are immediately written from the
+cache to disk.
+\fBwriteback\fP - Data blocks are written from the cache
+back to disk after some delay to improve performance.
.TP
\fBlog\fP \(em Default log settings
.IP
static int _read_pool_params(struct lvconvert_params *lp, struct cmd_context *cmd,
const char *type_str, int *pargc, char ***pargv)
{
- const char *tmp_str;
int cachepool = 0;
int thinpool = 0;
thinpool = 1;
if (cachepool) {
- if ((tmp_str = arg_str_value(cmd, cachemode_ARG, NULL)) &&
- !get_cache_mode(tmp_str, &lp->feature_flags))
+ const char *cachemode = arg_str_value(cmd, cachemode_ARG, NULL);
+ if (!cachemode)
+ cachemode = find_config_tree_str(cmd, allocation_cache_pool_cachemode_CFG, NULL);
+
+ if (!get_cache_mode(cachemode, &lp->feature_flags))
return_0;
} else {
if (arg_from_list_is_set(cmd, "is valid only with cache pools",
static int _read_cache_pool_params(struct lvcreate_params *lp,
struct cmd_context *cmd)
{
- const char *str_arg;
+ const char *cachemode;
if (!segtype_is_cache_pool(lp->segtype))
return 1;
- if ((str_arg = arg_str_value(cmd, cachemode_ARG, NULL)) &&
- !get_cache_mode(str_arg, &lp->feature_flags))
+ cachemode = arg_str_value(cmd, cachemode_ARG, NULL);
+ if (!cachemode)
+ cachemode = find_config_tree_str(cmd, allocation_cache_pool_cachemode_CFG, NULL);
+
+ if (!get_cache_mode(cachemode, &lp->feature_flags))
return_0;
return 1;