uint64_t current_id;
uint64_t new_id;
} m_set_transaction_id;
- struct {
- uint32_t device_id;
- uint64_t new_size;
- } m_trim;
} u;
};
* Note: only direct child is allowed
*/
struct dm_tree_node *presuspend_node;
+
+ /* Callback */
+ dm_node_callback_fn callback;
+ void *callback_data;
};
struct dm_tree {
r = dm_snprintf(buf, sizeof(buf), "delete %u",
m->u.m_delete.device_id);
break;
- case DM_THIN_MESSAGE_TRIM:
- r = dm_snprintf(buf, sizeof(buf), "trim %u %" PRIu64,
- m->u.m_trim.device_id,
- m->u.m_trim.new_size);
- break;
case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
r = dm_snprintf(buf, sizeof(buf),
"set_transaction_id %" PRIu64 " %" PRIu64,
} else if (info.suspended)
dec_suspended();
- if (dm_tree_node_num_children(child, 0)) {
- if (!_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
- return_0;
- }
+ if (child->callback &&
+ !child->callback(child, DM_NODE_CALLBACK_DEACTIVATED,
+ child->callback_data))
+ stack;
+ // FIXME: We need to let lvremove pass,
+ // so for now deactivation ignores check result
+ //r = 0; // FIXME: _node_clear_table() without callback ?
+
+ if (dm_tree_node_num_children(child, 0) &&
+ !_dm_tree_deactivate_children(child, uuid_prefix, uuid_prefix_len, level + 1))
+ return_0;
}
return r;
existing_table_size = dm_task_get_existing_table_size(dmt);
if ((dnode->props.size_changed =
(existing_table_size == seg_start) ? 0 : 1)) {
- log_debug("Table size changed from %" PRIu64 " to %"
- PRIu64 " for %s", existing_table_size,
- seg_start, dnode->name);
/*
* Kernel usually skips size validation on zero-length devices
* now so no need to preload them.
/* FIXME In which kernel version did this begin? */
if (!existing_table_size && dnode->props.delay_resume_if_new)
dnode->props.size_changed = 0;
+
+ log_debug("Table size changed from %" PRIu64 " to %"
+ PRIu64 " for %s.%s", existing_table_size,
+ seg_start, dnode->name,
+ dnode->props.size_changed ? "" : " (Ignoring.)");
}
}
update_devs_flag = 1;
}
- if (update_devs_flag) {
+ if (update_devs_flag ||
+ (!dnode->info.exists && dnode->callback)) {
if (!dm_udev_wait(dm_tree_get_cookie(dnode)))
stack;
dm_tree_set_cookie(dnode, 0);
+
+ if (!dnode->info.exists && dnode->callback &&
+ !dnode->callback(child, DM_NODE_CALLBACK_PRELOADED,
+ dnode->callback_data))
+ return_0;
}
return r;
uint64_t low_water_mark,
unsigned skip_block_zeroing)
{
- struct load_segment *seg;
+ struct load_segment *seg, *mseg;
+ uint64_t devsize = 0;
+ /*
+ * Max supported size for thin pool metadata device
+ * Limitation is hardcoded into kernel and bigger
+ * device size is not accepted. (16978542592)
+ */
+ const uint64_t max_metadata_size =
+ 255ULL * (1 << 14) * (4096 / (1 << 9)) - 256 * 1024;
if (data_block_size < DM_THIN_MIN_DATA_BLOCK_SIZE) {
log_error("Data block size %u is lower then %u sectors.",
if (!_link_tree_nodes(node, seg->metadata))
return_0;
+ /* FIXME: more complex target may need more tweaks */
+ dm_list_iterate_items(mseg, &seg->metadata->props.segs) {
+ devsize += mseg->size;
+ if (devsize > max_metadata_size) {
+ log_debug("Ignoring %" PRIu64 " of device.",
+ devsize - max_metadata_size);
+ mseg->size -= (devsize - max_metadata_size);
+ devsize = max_metadata_size;
+ /* FIXME: drop remaining segs */
+ }
+ }
+
if (!(seg->pool = dm_tree_find_node_by_uuid(node->dtree, pool_uuid))) {
log_error("Missing pool uuid %s.", pool_uuid);
return 0;
tm->message.u.m_delete.device_id = id1;
tm->expected_errno = ENODATA;
break;
- case DM_THIN_MESSAGE_TRIM:
- if (!_thin_validate_device_id(id1))
- return_0;
- tm->message.u.m_trim.device_id = id1;
- tm->message.u.m_trim.new_size = id2;
- break;
case DM_THIN_MESSAGE_SET_TRANSACTION_ID:
if ((id1 + 1) != id2) {
log_error("New transaction id must be sequential.");
return 1;
}
+
+void dm_tree_node_set_callback(struct dm_tree_node *dnode,
+ dm_node_callback_fn cb, void *data)
+{
+ dnode->callback = cb;
+ dnode->callback_data = data;
+}