cfg(devices_use_devicesfile_CFG, "use_devicesfile", devices_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_USE_DEVICES_FILE, vsn(2, 3, 12), "@DEFAULT_USE_DEVICES_FILE@", 0, NULL,
"Enable or disable the use of a devices file.\n"
"When enabled, lvm will only use devices that\n"
- "are lised in the devices file. A devices file will\n"
+ "are listed in the devices file. A devices file will\n"
"be used, regardless of this setting, when the --devicesfile\n"
"option is set to a specific file name.\n")
cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, 0, vsn(2, 2, 165), NULL, 0, NULL,
"The maximum number of chunks in a cache pool.\n"
- "For cache target v1.9 the recommended maximumm is 1000000 chunks.\n"
+ "For cache target v1.9 the recommended maximum is 1000000 chunks.\n"
"Using cache pool with more chunks may degrade cache performance.\n")
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL,
"to define fields to display and sort fields for the log report.\n"
"You can also use log/command_log_selection to define selection\n"
"criteria used each time the log is reported.\n"
- "Note that if report/output_format (or --reporformat command line\n"
+ "Note that if report/output_format (or --reportformat command line\n"
"option) is set to json or json_std, then log/report_command_log=1\n"
"is default.\n")
" uses are present. Other PVs in the Volume Group may be missing.\n"
" degraded\n"
" Like complete, but additionally RAID LVs of segment type raid1,\n"
- " raid4, raid5, radid6 and raid10 will be activated if there is no\n"
+ " raid4, raid5, raid6 and raid10 will be activated if there is no\n"
" data loss, i.e. they have sufficient redundancy to present the\n"
" entire addressable range of the Logical Volume.\n"
" partial\n"
" %F\n"
" Equivalent to %Y-%m-%d (the ISO 8601 date format).\n"
" %G\n"
- " The ISO 8601 week-based year with century as adecimal number.\n"
+ " The ISO 8601 week-based year with century as a decimal number.\n"
" The 4-digit year corresponding to the ISO week number (see %V).\n"
" This has the same format and value as %Y, except that if the\n"
" ISO week number belongs to the previous or next year, that year\n"
FIELD(SEGS, seg, NUM, "VDOHashZoneThreads", list, 0, vdo_hash_zone_threads, vdo_hash_zone_threads, "Threads for subdivide parts (vdopool).", 0)
FIELD(SEGS, seg, NUM, "VDOLogicalThreads", list, 0, vdo_logical_threads, vdo_logical_threads, "Logical threads for subdivide parts (vdopool).", 0)
FIELD(SEGS, seg, NUM, "VDOPhysicalThreads", list, 0, vdo_physical_threads, vdo_physical_threads, "Physical threads for subdivide parts (vdopool).", 0)
-FIELD(SEGS, seg, NUM, "VDOMaxDiscard", list, 0, vdo_max_discard, vdo_max_discard, "Maximum discard size volume can recieve (vdopool).", 0)
+FIELD(SEGS, seg, NUM, "VDOMaxDiscard", list, 0, vdo_max_discard, vdo_max_discard, "Maximum discard size volume can receive (vdopool).", 0)
FIELD(SEGS, seg, STR, "VDOWritePolicy", list, 0, vdo_write_policy, vdo_write_policy, "Specified write policy (vdopool).", 0)
FIELD(SEGS, seg, SIZ, "VDOHeaderSize", list, 0, vdo_header_size, vdo_header_size, "Header size at front of vdopool.", 0)
FIELD_F(STATS, NUM, "WSz/s", 5, dm_stats_write_secs, "write_size_per_sec", "Size of data written per second.")
FIELD_F(STATS, NUM, "AvgRqSz", 7, dm_stats_arqsz, "avg_request_size", "Average request size.")
FIELD_F(STATS, NUM, "QSize", 5, dm_stats_qusz, "queue_size", "Average queue size.")
-FIELD_F(STATS, NUM, "AWait", 5, dm_stats_await, "await", "Averate wait time.")
-FIELD_F(STATS, NUM, "RdAWait", 7, dm_stats_r_await, "read_await", "Averate read wait time.")
-FIELD_F(STATS, NUM, "WrAWait", 7, dm_stats_w_await, "write_await", "Averate write wait time.")
+FIELD_F(STATS, NUM, "AWait", 5, dm_stats_await, "await", "Average wait time.")
+FIELD_F(STATS, NUM, "RdAWait", 7, dm_stats_r_await, "read_await", "Average read wait time.")
+FIELD_F(STATS, NUM, "WrAWait", 7, dm_stats_w_await, "write_await", "Average write wait time.")
FIELD_F(STATS, NUM, "Throughput", 10, dm_stats_tput, "throughput", "Throughput.")
FIELD_F(STATS, NUM, "SvcTm", 5, dm_stats_svctm, "service_time", "Service time.")
FIELD_F(STATS, NUM, "Util%", 5, dm_stats_util, "util", "Utilization.")
.
.TP
.B -i
-Query the running daemon instance for the status informations. The format is
+Query the running daemon instance for the status information. The format is
internal and unstable and it is targeted for developers.
Format may change between versions.
.
dm message (253:0) [ opencount flush ] @stats_list dmstats [16384] (*1)
Read alias 'data' from aux_data
Found group_id 0: alias="data"
-dm_stats_walk_init: initialised flags to 4000000000000
+dm_stats_walk_init: initialized flags to 4000000000000
starting stats walk with GROUP
exiting _filemap_monitor_get_events() with deleted=0, check=0
Waiting for check interval
Specify uuid prefix for snapshot volume used during vdo conversion.
.TP
.B LVM_BINARY
-Allow to overide command called from lvm. Defaults to "\fIlvm\fP".
+Allow to override command called from lvm. Defaults to "\fIlvm\fP".
.TP
.B VDO_BINARY
-Allow to overide command called from vdo. Defaults to "\fIvdo\fP".
+Allow to override command called from vdo. Defaults to "\fIvdo\fP".
.
.SH SEE ALSO
.
is used if a PV is placed on top of an lvm LV, reported by sysfs.
.IP \[bu] 2
.B loop_file
-is used for loop devices, the backing file name repored by sysfs.
+is used for loop devices, the backing file name reported by sysfs.
.IP \[bu] 2
.B devname
the device name is used if no other type applies.
blocks to N new images on new devices. Converting to a parity RAID level
requires reading all LV data blocks, calculating parity, and writing the
new parity blocks. Synchronization can take a long time depending on the
-throughpout of the devices used and the size of the RaidLV. It can degrade
+throughput of the devices used and the size of the RaidLV. It can degrade
performance. Rate controls also apply to conversion; see
\fB--minrecoveryrate\fP and \fB--maxrecoveryrate\fP.
.P
Overrides report/binary_values_as_numeric configuration setting.
TP
.B --headings
-Overrides report/headings congiguration settings.
+Overrides report/headings configuration settings.
.TP
.B --nameprefixes
Overrides report/prefixes configuration setting.
The "zero" property of a thin pool determines if chunks are overwritten
with zeros when they are provisioned for a thin LV. The current setting
is reported with lvs -o zero (displaying "zero" or "1" when zeroing is
-enabled), or 'z' in the eigth lv_attr. The option -Z|--zero is used to
+enabled), or 'z' in the eighth lv_attr. The option -Z|--zero is used to
specify the zeroing mode.
Create a thin pool with zeroing mode:
command. However not all option can be changed.
Only compression and deduplication options can be also changed for an active VDO LV.
Lowest priority options are specified with configuration file,
-then with --vdosettings and highest are expliction option --compression
+then with --vdosettings and highest are explicit option --compression
and --deduplication.
.P
.I Example
.
You can convert existing VDO LV into a thin volume. After this conversion
you can create a thin snapshot or you can add more thin volumes
-with thin-pool named after orignal LV name LV_tpool0.
+with thin-pool named after original LV name LV_tpool0.
.P
.I Example
.nf
vgcreate creates a new VG on block devices. If the devices were not
previously initialized as PVs with \fBpvcreate\fP(8), vgcreate will
-inititialize them, making them PVs. The pvcreate options for initializing
+initialize them, making them PVs. The pvcreate options for initializing
devices are also available with vgcreate.
.P
When vgcreate uses an existing PV, that PV's existing values for metadata
.IP "\fBlvm2rescue\fR" 4
.IX Item "lvm2rescue"
Causes the initrd image to run a shell prior to mounting the root filesystem. This is
-helpful in disaster situations where your initrd image is accessable, but there is
+helpful in disaster situations where your initrd image is accessible, but there is
a problem with the root filesystem (corrupted image, incorrect device setup, etc.). This
option is (of course) optional.
.SH "OPTIONS"
.IX Item "$BINFILES"
Overrides the default value of \f(CW$BINFILES\fR (which is \*(L"/lib/lvm\-200/lvm /bin/bash /bin/busybox /sbin/pivot_root\*(R"). The difference between using this and adding
a file to the \f(CW$EXTRAFILES\fR list above is that libraries that these depend upon are also included. You can still use \f(CW$EXTRAFILES\fR to achieve the same effect, but
-you must resolve library dependencies youself.
+you must resolve library dependencies yourself.
.ie n .IP "\fB\fB$INITRDSIZE\fB\fR" 4
.el .IP "\fB\f(CB$INITRDSIZE\fB\fR" 4
.IX Item "$INITRDSIZE"
=item B<lvm2rescue>
Causes the initrd image to run a shell prior to mounting the root filesystem. This is
-helpful in disaster situations where your initrd image is accessable, but there is
+helpful in disaster situations where your initrd image is accessible, but there is
a problem with the root filesystem (corrupted image, incorrect device setup, etc.). This
option is (of course) optional.
Overrides the default value of $BINFILES (which is "/lib/lvm-200/lvm /bin/bash /bin/busybox /sbin/pivot_root"). The difference between using this and adding
a file to the $EXTRAFILES list above is that libraries that these depend upon are also included. You can still use $EXTRAFILES to achieve the same effect, but
-you must resolve library dependencies youself.
+you must resolve library dependencies yourself.
=item B<$INITRDSIZE>
OO: --activate Active, OO_LVCHANGE
IO: --ignoreskippedcluster
ID: lvchange_resync
-DESC: Resyncronize a mirror or raid LV.
+DESC: Resynchronize a mirror or raid LV.
DESC: Use to reset 'R' attribute on a not initially synchronized LV.
RULE: all not lv_is_pvmove lv_is_locked lv_is_raid_with_integrity
RULE: all not LV_raid0