#ifndef _LINUX_DEVICE_MAPPER_H
#define _LINUX_DEVICE_MAPPER_H
-#define DM_DIR "mapper" /* Slashes not supported */
-#define DM_MAX_TYPE_NAME 16
-#define DM_NAME_LEN 128
-#define DM_UUID_LEN 129
-
#ifdef __KERNEL__
+typedef unsigned long sector_t;
+
+struct dm_target;
struct dm_table;
struct dm_dev;
-typedef unsigned long offset_t;
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
/*
- * Prototypes for functions for a target
+ * In the constructor the target parameter will already have the
+ * table, type, begin and len fields filled in.
*/
-typedef int (*dm_ctr_fn) (struct dm_table *t, offset_t b, offset_t l,
- int argc, char **argv, void **context);
-typedef void (*dm_dtr_fn) (struct dm_table *t, void *c);
-typedef int (*dm_map_fn) (struct buffer_head *bh, int rw, void *context);
-typedef int (*dm_err_fn) (struct buffer_head *bh, int rw, void *context);
-typedef int (*dm_status_fn) (status_type_t status_type, char *result,
- int maxlen, void *context);
+typedef int (*dm_ctr_fn) (struct dm_target *target, int argc, char **argv);
+
+/*
+ * The destructor doesn't need to free the dm_target, just
+ * anything hidden ti->private.
+ */
+typedef void (*dm_dtr_fn) (struct dm_target *ti);
+
+/*
+ * The map function must return:
+ * < 0: error
+ * = 0: The target will handle the io by resubmitting it later
+ * > 0: simple remap complete
+ */
+typedef int (*dm_map_fn) (struct dm_target *ti, struct buffer_head *bh, int rw);
+typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+ char *result, int maxlen);
void dm_error(const char *message);
/*
* Constructors should call these functions to ensure destination devices
- * are opened/closed correctly
+ * are opened/closed correctly.
+ * FIXME: too many arguments.
*/
-int dm_table_get_device(struct dm_table *t, const char *path,
- offset_t start, offset_t len,
- int mode, struct dm_dev **result);
-void dm_table_put_device(struct dm_table *table, struct dm_dev *d);
+int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
+ sector_t len, int mode, struct dm_dev **result);
+void dm_put_device(struct dm_target *ti, struct dm_dev *d);
/*
* Information about a target type
dm_ctr_fn ctr;
dm_dtr_fn dtr;
dm_map_fn map;
- dm_err_fn err;
dm_status_fn status;
};
+struct dm_target {
+ struct dm_table *table;
+ struct target_type *type;
+
+ /* target limits */
+ sector_t begin;
+ sector_t len;
+
+ /* target specific data */
+ void *private;
+
+ /* Used to provide an error string from the ctr */
+ char *error;
+};
+
int dm_register_target(struct target_type *t);
int dm_unregister_target(struct target_type *t);
#include "dm-snapshot.h"
#include "kcopyd.h"
+
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
#define SECTOR_SIZE 512
#define SECTOR_SHIFT 9
};
struct commit_callback {
- void (*callback)(void *, int success);
+ void (*callback) (void *, int success);
void *context;
};
return 0;
}
-#if LINUX_VERSION_CODE < KERNEL_VERSION ( 2, 4, 19)
-/*
- * FIXME: Remove once 2.4.19 has been released.
- */
-struct page *vmalloc_to_page(void *vmalloc_addr)
-{
- unsigned long addr = (unsigned long) vmalloc_addr;
- struct page *page = NULL;
- pmd_t *pmd;
- pte_t *pte;
- pgd_t *pgd;
-
- pgd = pgd_offset_k(addr);
- if (!pgd_none(*pgd)) {
- pmd = pmd_offset(pgd, addr);
- if (!pmd_none(*pmd)) {
- pte = pte_offset(pmd, addr);
- if (pte_present(*pte)) {
- page = pte_page(*pte);
- }
- }
- }
- return page;
-}
-#endif
-
static int allocate_iobuf(struct pstore *ps)
{
size_t i, r = -ENOMEM, len, nr_pages;
if (dh->magic == 0) {
*new_snapshot = 1;
- } else if (le32_to_cpu(dh->magic) == SNAP_MAGIC) {
+ } else if (dh->magic == SNAP_MAGIC) {
*new_snapshot = 0;
- ps->valid = le32_to_cpu(dh->valid);
- ps->version = le32_to_cpu(dh->version);
- ps->chunk_size = le32_to_cpu(dh->chunk_size);
+ ps->valid = dh->valid;
+ ps->version = dh->version;
+ ps->chunk_size = dh->chunk_size;
} else {
DMWARN("Invalid/corrupt snapshot");
{
struct pstore *ps = get_info(store);
uint32_t stride;
- offset_t size = get_dev_size(store->snap->cow->dev);
+ sector_t size = get_dev_size(store->snap->cow->dev);
/* Is there enough room ? */
if (size <= (ps->next_free * store->snap->chunk_size))
* Implementation of the store for non-persistent snapshots.
*---------------------------------------------------------------*/
struct transient_c {
- offset_t next_free;
+ sector_t next_free;
};
void transient_destroy(struct exception_store *store)
int transient_prepare(struct exception_store *store, struct exception *e)
{
struct transient_c *tc = (struct transient_c *) store->context;
- offset_t size = get_dev_size(store->snap->cow->dev);
+ sector_t size = get_dev_size(store->snap->cow->dev);
if (size < (tc->next_free + store->snap->chunk_size))
return -1;
}
int dm_create_transient(struct exception_store *store,
- struct dm_snapshot *s, int blocksize, void **error)
+ struct dm_snapshot *s, int blocksize)
{
struct transient_c *tc;
+++ /dev/null
-/*
- * Copyright (C) 2002 Sistina Software (UK) Limited.
- *
- * This file is released under the GPL.
- */
-
-/*
- * We need to be able to quickly return the struct mapped_device,
- * whether it is looked up by name, uuid or by kdev_t. Note that
- * multiple major numbers are now supported, so we cannot keep
- * things simple by putting them in an array.
- *
- * Instead this will be implemented as a trio of closely coupled
- * hash tables.
- */
-
-#include <linux/list.h>
-#include <linux/rwsem.h>
-#include <linux/slab.h>
-
-#include "dm.h"
-
-struct hash_cell {
- struct list_head list;
- struct mapped_device *md;
-};
-
-#define NUM_BUCKETS 64
-#define MASK_BUCKETS (NUM_BUCKETS - 1)
-#define HASH_MULT 2654435387U
-static struct list_head *_dev_buckets;
-static struct list_head *_name_buckets;
-static struct list_head *_uuid_buckets;
-
-/*
- * Guards access to all three tables.
- */
-static DECLARE_RWSEM(_hash_lock);
-
-
-/*-----------------------------------------------------------------
- * Init/exit code
- *---------------------------------------------------------------*/
-void dm_hash_exit(void)
-{
- if (_dev_buckets) {
- kfree(_dev_buckets);
- _dev_buckets = NULL;
- }
-
- if (_name_buckets) {
- kfree(_name_buckets);
- _name_buckets = NULL;
- }
-
- if (_uuid_buckets) {
- kfree(_uuid_buckets);
- _uuid_buckets = NULL;
- }
-}
-
-struct list_head *alloc_buckets(void)
-{
- struct list_head *buckets;
- unsigned int i, len;
-
- len = NUM_BUCKETS * sizeof(struct list_head);
- buckets = kmalloc(len, GFP_KERNEL);
- if (buckets)
- for (i = 0; i < NUM_BUCKETS; i++)
- INIT_LIST_HEAD(buckets + i);
-
- return buckets;
-}
-
-int dm_hash_init(void)
-{
- _dev_buckets = alloc_buckets();
- if (!_dev_buckets)
- goto bad;
-
- _name_buckets = alloc_buckets();
- if (!_name_buckets)
- goto bad;
-
- _uuid_buckets = alloc_buckets();
- if (!_uuid_buckets)
- goto bad;
-
- return 0;
-
- bad:
- dm_hash_exit();
- return -ENOMEM;
-}
-
-
-/*-----------------------------------------------------------------
- * Hash functions
- *---------------------------------------------------------------*/
-static inline unsigned int hash_dev(kdev_t dev)
-{
- return (HASHDEV(dev) * HASH_MULT) & MASK_BUCKETS;
-}
-
-/*
- * We're not really concerned with the str hash function being
- * fast since it's only used by the ioctl interface.
- */
-static unsigned int hash_str(const char *str)
-{
- unsigned int h = 0;
-
- while (*str)
- h = (h + (unsigned int) *str++) * HASH_MULT;
-
- return h & MASK_BUCKETS;
-}
-
-
-/*-----------------------------------------------------------------
- * Code for looking up the device by kdev_t.
- *---------------------------------------------------------------*/
-static struct hash_cell *__get_dev_cell(kdev_t dev)
-{
- struct list_head *tmp;
- struct hash_cell *hc;
- unsigned int h = hash_dev(dev);
-
- list_for_each (tmp, _dev_buckets + h) {
- hc = list_entry(tmp, struct hash_cell, list);
- if (kdev_same(hc->md->dev, dev))
- return hc;
- }
-
- return NULL;
-}
-
-struct mapped_device *dm_get_r(kdev_t dev)
-{
- struct hash_cell *hc;
- struct mapped_device *md = NULL;
-
- down_read(&_hash_lock);
-
- hc = __get_dev_cell(dev);
- if (!hc)
- goto out;
-
- down_read(&hc->md->lock);
- if (!dm_flag(hc->md, DMF_VALID)) {
- up_read(&hc->md->lock);
- goto out;
- }
-
- md = hc->md;
-
- out:
- up_read(&_hash_lock);
- return md;
-}
-
-struct mapped_device *dm_get_w(kdev_t dev)
-{
- struct hash_cell *hc;
- struct mapped_device *md = NULL;
-
- down_read(&_hash_lock);
-
- hc = __get_dev_cell(dev);
- if (!hc)
- goto out;
-
- down_write(&hc->md->lock);
- if (!dm_flag(hc->md, DMF_VALID)) {
- up_write(&hc->md->lock);
- goto out;
- }
-
- md = hc->md;
-
- out:
- up_read(&_hash_lock);
- return md;
-}
-
-
-/*-----------------------------------------------------------------
- * Code for looking up a device by name
- *---------------------------------------------------------------*/
-static int namecmp(struct mapped_device *md, const char *str, int uuid)
-{
- if (!uuid)
- return strcmp(md->name, str);
-
- if (!md->uuid)
- return -1; /* never equal */
-
- return strcmp(md->uuid, str);
-}
-
-static struct hash_cell *__get_str_cell(const char *str, int uuid)
-{
- struct list_head *tmp, *buckets;
- struct hash_cell *hc;
- unsigned int h = hash_str(str);
-
- buckets = uuid ? _uuid_buckets : _name_buckets;
- list_for_each (tmp, buckets + h) {
- hc = list_entry(tmp, struct hash_cell, list);
- if (!namecmp(hc->md, str, uuid))
- return hc;
- }
-
- return NULL;
-}
-
-static inline struct mapped_device *get_name(const char *str, int uuid,
- int write)
-{
- struct hash_cell *hc;
- struct mapped_device *md = NULL;
-
- down_read(&_hash_lock);
-
- hc = __get_str_cell(str, uuid);
- if (!hc)
- goto out;
-
- if (write)
- down_write(&hc->md->lock);
- else
- down_read(&hc->md->lock);
-
- if (!dm_flag(hc->md, DMF_VALID)) {
- if (write)
- up_write(&hc->md->lock);
- else
- up_read(&hc->md->lock);
- goto out;
- }
-
- md = hc->md;
-
- out:
- up_read(&_hash_lock);
-
- return md;
-}
-
-struct mapped_device *dm_get_name_r(const char *name)
-{
- return get_name(name, 0, 0);
-}
-
-struct mapped_device *dm_get_name_w(const char *name)
-{
- return get_name(name, 0, 1);
-}
-
-struct mapped_device *dm_get_uuid_r(const char *uuid)
-{
- return get_name(uuid, 1, 0);
-}
-
-struct mapped_device *dm_get_uuid_w(const char *uuid)
-{
- return get_name(uuid, 1, 1);
-}
-
-/*-----------------------------------------------------------------
- * Inserting and removing and renaming a device.
- *---------------------------------------------------------------*/
-static struct hash_cell *alloc_cell(struct mapped_device *md)
-{
- struct hash_cell *hc = kmalloc(sizeof(*hc), GFP_KERNEL);
- if (hc) {
- INIT_LIST_HEAD(&hc->list);
- hc->md = md;
- }
-
- return hc;
-}
-
-/*
- * The kdev_t and uuid of a device can never change once it is
- * initially inserted.
- */
-int dm_hash_insert(struct mapped_device *md)
-{
- struct hash_cell *dev_cell, *name_cell, *uuid_cell;
-
- /*
- * Allocate the new cells.
- */
- dev_cell = name_cell = uuid_cell = NULL;
- if (!(dev_cell = alloc_cell(md)) ||
- !(name_cell = alloc_cell(md)) ||
- !(uuid_cell = alloc_cell(md))) {
- if (uuid_cell)
- kfree(uuid_cell);
- if (name_cell)
- kfree(name_cell);
- if (dev_cell)
- kfree(dev_cell);
-
- return -ENOMEM;
- }
-
- /*
- * Insert the cell into all three hash tables.
- */
- down_write(&_hash_lock);
- if (__get_dev_cell(md->dev))
- goto bad;
-
- list_add(&dev_cell->list, _dev_buckets + hash_dev(md->dev));
-
- if (__get_str_cell(md->name, 0)) {
- list_del(&dev_cell->list);
- goto bad;
- }
- list_add(&name_cell->list, _name_buckets + hash_str(md->name));
-
- if (md->uuid) {
- if (__get_str_cell(md->uuid, 1)) {
- list_del(&name_cell->list);
- list_del(&dev_cell->list);
- goto bad;
- }
- list_add(&uuid_cell->list, _uuid_buckets + hash_str(md->uuid));
- }
- up_write(&_hash_lock);
-
- if (!md->uuid)
- kfree(uuid_cell);
-
- return 0;
-
- bad:
- up_write(&_hash_lock);
- kfree(uuid_cell);
- kfree(name_cell);
- kfree(dev_cell);
- return -EBUSY;
-}
-
-static void dispose_cell(struct hash_cell *hc)
-{
- list_del(&hc->list);
- kfree(hc);
-}
-
-/*
- * md should already have the write lock and DMF_VALID unset.
- */
-void dm_hash_remove(struct mapped_device *md)
-{
- struct hash_cell *hc;
-
- /*
- * Ensure that anything else waiting for the lock gets it and
- * promptly releases it because DMF_VALID is no longer set.
- * Acquiring _hash_lock exclusively prevents anything else
- * starting a search for an md until our md is completely removed.
- */
- up_write(&md->lock);
- down_write(&_hash_lock);
- down_write(&md->lock);
-
- /* remove from the dev hash */
- hc = __get_dev_cell(md->dev);
- if (!hc)
- DMWARN("device doesn't appear to be in the dev hash table.");
- else
- dispose_cell(hc);
-
- /* remove from the name hash */
- hc = __get_str_cell(md->name, 0);
- if (!hc)
- DMWARN("device doesn't appear to be in the name hash table.");
- else
- dispose_cell(hc);
-
- /* remove from the uuid hash, if it has a uuid */
- if (md->uuid) {
- hc = __get_str_cell(md->uuid, 1);
- if (!hc)
- DMWARN("device doesn't appear to be in the uuid "
- "hash table.");
- else
- dispose_cell(hc);
- }
-
- up_write(&_hash_lock);
-}
-
-int dm_hash_rename(const char *old, const char *new)
-{
- char *new_name, *old_name;
- struct hash_cell *hc;
-
- /*
- * duplicate new.
- */
- new_name = dm_strdup(new);
- if (!new_name)
- return -ENOMEM;
-
- down_write(&_hash_lock);
-
- /*
- * Is new free ?
- */
- hc = __get_str_cell(new, 0);
- if (hc) {
- DMWARN("asked to rename to an already existing name %s -> %s",
- old, new);
- up_write(&_hash_lock);
- return -EBUSY;
- }
-
- /*
- * Is there such a device as 'old' ?
- */
- hc = __get_str_cell(old, 0);
- if (!hc) {
- DMWARN("asked to rename a non existent device %s -> %s",
- old, new);
- up_write(&_hash_lock);
- return -ENXIO;
- }
-
- /*
- * rename and move the name cell.
- */
- list_del(&hc->list);
- old_name = hc->md->name;
- hc->md->name = new_name;
- list_add(&hc->list, _name_buckets + hash_str(new_name));
-
- up_write(&_hash_lock);
- kfree(old_name);
- return 0;
-}
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/slab.h>
/*
* Linear: maps a linear range of a device.
*/
struct linear_c {
- long delta; /* FIXME: we need a signed offset type */
- long start; /* For display only */
struct dm_dev *dev;
+ sector_t start;
};
/*
* Construct a linear mapping: <dev_path> <offset>
*/
-static int linear_ctr(struct dm_table *t, offset_t b, offset_t l,
- int argc, char **argv, void **context)
+static int linear_ctr(struct dm_target *ti, int argc, char **argv)
{
struct linear_c *lc;
- unsigned long start; /* FIXME: unsigned long long */
- char *end;
if (argc != 2) {
- *context = "dm-linear: Not enough arguments";
+ ti->error = "dm-linear: Not enough arguments";
return -EINVAL;
}
lc = kmalloc(sizeof(*lc), GFP_KERNEL);
if (lc == NULL) {
- *context = "dm-linear: Cannot allocate linear context";
+ ti->error = "dm-linear: Cannot allocate linear context";
return -ENOMEM;
}
- start = simple_strtoul(argv[1], &end, 10);
- if (*end) {
- *context = "dm-linear: Invalid device sector";
+ if (sscanf(argv[1], SECTOR_FORMAT, &lc->start) != 1) {
+ ti->error = "dm-linear: Invalid device sector";
goto bad;
}
- if (dm_table_get_device(t, argv[0], start, l, t->mode, &lc->dev)) {
- *context = "dm-linear: Device lookup failed";
+ if (dm_get_device(ti, argv[0], lc->start, ti->len,
+ dm_table_get_mode(ti->table), &lc->dev)) {
+ ti->error = "dm-linear: Device lookup failed";
goto bad;
}
- lc->delta = (int) start - (int) b;
- lc->start = start;
- *context = lc;
+ ti->private = lc;
return 0;
bad:
return -EINVAL;
}
-static void linear_dtr(struct dm_table *t, void *c)
+static void linear_dtr(struct dm_target *ti)
{
- struct linear_c *lc = (struct linear_c *) c;
+ struct linear_c *lc = (struct linear_c *) ti->private;
- dm_table_put_device(t, lc->dev);
- kfree(c);
+ dm_put_device(ti, lc->dev);
+ kfree(lc);
}
-static int linear_map(struct buffer_head *bh, int rw, void *context)
+static int linear_map(struct dm_target *ti, struct buffer_head *bh, int rw)
{
- struct linear_c *lc = (struct linear_c *) context;
+ struct linear_c *lc = (struct linear_c *) ti->private;
bh->b_rdev = lc->dev->dev;
- bh->b_rsector = bh->b_rsector + lc->delta;
+ bh->b_rsector = lc->start + (bh->b_rsector - ti->begin);
return 1;
}
-static int linear_status(status_type_t type, char *result, int maxlen,
- void *context)
+static int linear_status(struct dm_target *ti, status_type_t type,
+ char *result, int maxlen)
{
- struct linear_c *lc = (struct linear_c *) context;
+ struct linear_c *lc = (struct linear_c *) ti->private;
switch (type) {
case STATUSTYPE_INFO:
break;
case STATUSTYPE_TABLE:
- snprintf(result, maxlen, "%s %ld", kdevname(lc->dev->dev),
- lc->start);
+ snprintf(result, maxlen, "%s " SECTOR_FORMAT,
+ kdevname(to_kdev_t(lc->dev->bdev->bd_dev)), lc->start);
break;
}
return 0;
}
static struct target_type linear_target = {
- name: "linear",
- module: THIS_MODULE,
- ctr: linear_ctr,
- dtr: linear_dtr,
- map: linear_map,
- status: linear_status,
+ .name = "linear",
+ .module = THIS_MODULE,
+ .ctr = linear_ctr,
+ .dtr = linear_dtr,
+ .map = linear_map,
+ .status = linear_status,
};
int __init dm_linear_init(void)
#include <linux/blkdev.h>
#include <linux/mempool.h>
#include <linux/device-mapper.h>
+#include <linux/vmalloc.h>
#include "dm-snapshot.h"
#include "kcopyd.h"
*/
static int init_hash_tables(struct dm_snapshot *s)
{
- offset_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
+ sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
/*
* Calculate based on the size of the original volume or
/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
-static int snapshot_ctr(struct dm_table *t, offset_t b, offset_t l,
- int argc, char **argv, void **context)
+static int snapshot_ctr(struct dm_target *ti, int argc, char **argv)
{
struct dm_snapshot *s;
unsigned long chunk_size;
int blocksize;
if (argc < 4) {
- *context = "dm-snapshot: requires exactly 4 arguments";
+ ti->error = "dm-snapshot: requires exactly 4 arguments";
r = -EINVAL;
goto bad;
}
persistent = argv[2];
if ((*persistent & 0x5f) != 'P' && (*persistent & 0x5f) != 'N') {
- *context = "Persistent flag is not P or N";
+ ti->error = "Persistent flag is not P or N";
r = -EINVAL;
goto bad;
}
chunk_size = simple_strtoul(argv[3], &value, 10);
if (chunk_size == 0 || value == NULL) {
- *context = "Invalid chunk size";
+ ti->error = "Invalid chunk size";
r = -EINVAL;
goto bad;
}
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (s == NULL) {
- *context = "Cannot allocate snapshot context private structure";
+ ti->error = "Cannot allocate snapshot context private "
+ "structure";
r = -ENOMEM;
goto bad;
}
- r = dm_table_get_device(t, origin_path, 0, 0, FMODE_READ, &s->origin);
+ r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
if (r) {
- *context = "Cannot get origin device";
+ ti->error = "Cannot get origin device";
goto bad_free;
}
- r = dm_table_get_device(t, cow_path, 0, 0,
- FMODE_READ | FMODE_WRITE, &s->cow);
+ /* FIXME: get cow length */
+ r = dm_get_device(ti, cow_path, 0, 0,
+ FMODE_READ | FMODE_WRITE, &s->cow);
if (r) {
- dm_table_put_device(t, s->origin);
- *context = "Cannot get COW device";
+ dm_put_device(ti, s->origin);
+ ti->error = "Cannot get COW device";
goto bad_free;
}
/* Validate the chunk size against the device block size */
blocksize = get_hardsect_size(s->cow->dev);
if (chunk_size % (blocksize / SECTOR_SIZE)) {
- *context = "Chunk size is not a multiple of device blocksize";
+ ti->error = "Chunk size is not a multiple of device blocksize";
r = -EINVAL;
goto bad_putdev;
}
/* Check the sizes are small enough to fit in one kiovec */
if (chunk_size > KIO_MAX_SECTORS) {
- *context = "Chunk size is too big";
+ ti->error = "Chunk size is too big";
r = -EINVAL;
goto bad_putdev;
}
/* Check chunk_size is a power of 2 */
if (chunk_size & (chunk_size - 1)) {
- *context = "Chunk size is not a power of 2";
+ ti->error = "Chunk size is not a power of 2";
r = -EINVAL;
goto bad_putdev;
}
s->valid = 1;
s->last_percent = 0;
- s->table = t;
init_rwsem(&s->lock);
+ s->table = ti->table;
/* Allocate hash table for COW data */
if (init_hash_tables(s)) {
- *context = "Unable to allocate hash table space";
+ ti->error = "Unable to allocate hash table space";
r = -ENOMEM;
goto bad_putdev;
}
if ((*persistent & 0x5f) == 'P')
r = dm_create_persistent(&s->store, s->chunk_size);
else
- r = dm_create_transient(&s->store, s, blocksize, context);
+ r = dm_create_transient(&s->store, s, blocksize);
if (r) {
- *context = "Couldn't create exception store";
+ ti->error = "Couldn't create exception store";
r = -EINVAL;
goto bad_free1;
}
/* Add snapshot to the list of snapshots for this origin */
if (register_snapshot(s)) {
r = -EINVAL;
- *context = "Cannot register snapshot origin";
+ ti->error = "Cannot register snapshot origin";
goto bad_free2;
}
#if LVM_VFS_ENHANCEMENT
#endif
kcopyd_inc_client_count();
- *context = s;
+ ti->private = s;
return 0;
bad_free2:
exit_exception_table(&s->complete, exception_cache);
bad_putdev:
- dm_table_put_device(t, s->cow);
- dm_table_put_device(t, s->origin);
+ dm_put_device(ti, s->cow);
+ dm_put_device(ti, s->origin);
bad_free:
kfree(s);
return r;
}
-static void snapshot_dtr(struct dm_table *t, void *context)
+static void snapshot_dtr(struct dm_target *ti)
{
- struct dm_snapshot *s = (struct dm_snapshot *) context;
+ struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
- dm_table_event(s->table);
+ dm_table_event(ti->table);
unregister_snapshot(s);
/* Deallocate memory used */
s->store.destroy(&s->store);
- dm_table_put_device(t, s->origin);
- dm_table_put_device(t, s->cow);
+ dm_put_device(ti, s->origin);
+ dm_put_device(ti, s->cow);
kfree(s);
kcopyd_dec_client_count();
(bh->b_rsector & s->chunk_mask);
}
-static int snapshot_map(struct buffer_head *bh, int rw, void *context)
+static int snapshot_map(struct dm_target *ti, struct buffer_head *bh, int rw)
{
struct exception *e;
- struct dm_snapshot *s = (struct dm_snapshot *) context;
+ struct dm_snapshot *s = (struct dm_snapshot *) ti->private;
int r = 1;
chunk_t chunk;
struct pending_exception *pe;
return r;
}
-static int snapshot_status(status_type_t type, char *result,
- int maxlen, void *context)
+static int snapshot_status(struct dm_target *ti, status_type_t type,
+ char *result, int maxlen)
{
- struct dm_snapshot *snap = (struct dm_snapshot *) context;
+ struct dm_snapshot *snap = (struct dm_snapshot *) ti->private;
char cow[16];
char org[16];
* The context for an origin is merely a 'struct dm_dev *'
* pointing to the real device.
*/
-static int origin_ctr(struct dm_table *t, offset_t b, offset_t l,
- int argc, char **argv, void **context)
+static int origin_ctr(struct dm_target *ti, int argc, char **argv)
{
int r;
struct dm_dev *dev;
if (argc != 1) {
- *context = "dm-origin: incorrect number of arguments";
+ ti->error = "dm-origin: incorrect number of arguments";
return -EINVAL;
}
- r = dm_table_get_device(t, argv[0], 0, l, t->mode, &dev);
+ r = dm_get_device(ti, argv[0], 0, ti->len,
+ dm_table_get_mode(ti->table), &dev);
if (r) {
- *context = "Cannot get target device";
+ ti->error = "Cannot get target device";
return r;
}
- *context = dev;
+ ti->private = dev;
return 0;
}
-static void origin_dtr(struct dm_table *t, void *c)
+static void origin_dtr(struct dm_target *ti)
{
- struct dm_dev *dev = (struct dm_dev *) c;
- dm_table_put_device(t, dev);
+ struct dm_dev *dev = (struct dm_dev *) ti->private;
+ dm_put_device(ti, dev);
}
-static int origin_map(struct buffer_head *bh, int rw, void *context)
+static int origin_map(struct dm_target *ti, struct buffer_head *bh, int rw)
{
- struct dm_dev *dev = (struct dm_dev *) context;
+ struct dm_dev *dev = (struct dm_dev *) ti->private;
bh->b_rdev = dev->dev;
/* Only tell snapshots if this is a write */
return (rw == WRITE) ? do_origin(dev, bh) : 1;
}
-static int origin_status(status_type_t type, char *result,
- int maxlen, void *context)
+static int origin_status(struct dm_target *ti, status_type_t type, char *result,
+ int maxlen)
{
- struct dm_dev *dev = (struct dm_dev *) context;
+ struct dm_dev *dev = (struct dm_dev *) ti->private;
switch (type) {
case STATUSTYPE_INFO:
dtr: origin_dtr,
map: origin_map,
status: origin_status,
- err: NULL
};
static struct target_type snapshot_target = {
dtr: snapshot_dtr,
map: snapshot_map,
status: snapshot_status,
- err: NULL
};
int __init dm_snapshot_init(void)
* time. Typically 64k - 256k.
*/
/* FIXME: can we get away with limiting these to a uint32_t ? */
-typedef offset_t chunk_t;
+typedef sector_t chunk_t;
/*
* An exception is used where an old chunk of data has been
/*
* Destroys this object when you've finished with it.
*/
- void (*destroy) (struct exception_store *store);
+ void (*destroy) (struct exception_store * store);
/*
* Find somewhere to store the next exception.
*/
- int (*prepare_exception) (struct exception_store *store,
- struct exception *e);
+ int (*prepare_exception) (struct exception_store * store,
+ struct exception * e);
/*
* Update the metadata with this exception.
*/
- void (*commit_exception) (struct exception_store *store,
- struct exception *e,
+ void (*commit_exception) (struct exception_store * store,
+ struct exception * e,
void (*callback) (void *, int success),
void *callback_context);
/*
* The snapshot is invalid, note this in the metadata.
*/
- void (*drop_snapshot) (struct exception_store *store);
+ void (*drop_snapshot) (struct exception_store * store);
/*
* Return the %age full of the snapshot
*/
- int (*percent_full) (struct exception_store *store);
+ int (*percent_full) (struct exception_store * store);
struct dm_snapshot *snap;
void *context;
int dm_create_persistent(struct exception_store *store, uint32_t chunk_size);
int dm_create_transient(struct exception_store *store,
- struct dm_snapshot *s, int blocksize, void **error);
+ struct dm_snapshot *s, int blocksize);
/*
* Return the number of sectors in the device.
*/
-static inline offset_t get_dev_size(kdev_t dev)
+static inline sector_t get_dev_size(kdev_t dev)
{
int *sizes;
return 0;
}
-static inline chunk_t sector_to_chunk(struct dm_snapshot *s, offset_t sector)
+static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector)
{
return (sector & ~s->chunk_mask) >> s->chunk_shift;
}
-static inline offset_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
+static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
{
return chunk << s->chunk_shift;
}
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
+#include <linux/slab.h>
struct stripe {
struct dm_dev *dev;
- offset_t physical_start;
+ sector_t physical_start;
};
struct stripe_c {
- offset_t logical_start;
uint32_t stripes;
/* The size of this target / num. stripes */
/* stripe chunk size */
uint32_t chunk_shift;
- offset_t chunk_mask;
+ sector_t chunk_mask;
struct stripe stripe[0];
};
/*
* Parse a single <dev> <sector> pair
*/
-static int get_stripe(struct dm_table *t, struct stripe_c *sc,
+static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
int stripe, char **argv)
{
- char *end;
- unsigned long start;
+ sector_t start;
- start = simple_strtoul(argv[1], &end, 10);
- if (*end)
+ if (sscanf(argv[1], SECTOR_FORMAT, &start) != 1)
return -EINVAL;
- if (dm_table_get_device(t, argv[0], start, sc->stripe_width,
- t->mode, &sc->stripe[stripe].dev))
+ if (dm_get_device(ti, argv[0], start, sc->stripe_width,
+ dm_table_get_mode(ti->table),
+ &sc->stripe[stripe].dev))
return -ENXIO;
sc->stripe[stripe].physical_start = start;
return 0;
}
+/*
+ * FIXME: Nasty function, only present because we can't link
+ * against __moddi3 and __divdi3.
+ *
+ * returns a == b * n
+ */
+static int multiple(sector_t a, sector_t b, sector_t *n)
+{
+ sector_t acc, prev, i;
+
+ *n = 0;
+ while (a >= b) {
+ for (acc = b, prev = 0, i = 1;
+ acc <= a;
+ prev = acc, acc <<= 1, i <<= 1)
+ ;
+
+ a -= prev;
+ *n += i >> 1;
+ }
+
+ return a == 0;
+}
+
/*
* Construct a striped mapping.
* <number of stripes> <chunk size (2^^n)> [<dev_path> <offset>]+
*/
-static int stripe_ctr(struct dm_table *t, offset_t b, offset_t l,
- int argc, char **argv, void **context)
+static int stripe_ctr(struct dm_target *ti, int argc, char **argv)
{
struct stripe_c *sc;
+ sector_t width;
uint32_t stripes;
uint32_t chunk_size;
char *end;
int r, i;
if (argc < 2) {
- *context = "dm-stripe: Not enough arguments";
+ ti->error = "dm-stripe: Not enough arguments";
return -EINVAL;
}
stripes = simple_strtoul(argv[0], &end, 10);
if (*end) {
- *context = "dm-stripe: Invalid stripe count";
+ ti->error = "dm-stripe: Invalid stripe count";
return -EINVAL;
}
chunk_size = simple_strtoul(argv[1], &end, 10);
if (*end) {
- *context = "dm-stripe: Invalid chunk_size";
+ ti->error = "dm-stripe: Invalid chunk_size";
return -EINVAL;
}
- if (l % stripes) {
- *context = "dm-stripe: Target length not divisable by "
+ if (!multiple(ti->len, stripes, &width)) {
+ ti->error = "dm-stripe: Target length not divisable by "
"number of stripes";
return -EINVAL;
}
sc = alloc_context(stripes);
if (!sc) {
- *context = "dm-stripe: Memory allocation for striped context "
- "failed";
+ ti->error = "dm-stripe: Memory allocation for striped context "
+ "failed";
return -ENOMEM;
}
- sc->logical_start = b;
sc->stripes = stripes;
- sc->stripe_width = l / stripes;
+ sc->stripe_width = width;
/*
* chunk_size is a power of two
*/
if (!chunk_size || (chunk_size & (chunk_size - 1))) {
- *context = "dm-stripe: Invalid chunk size";
+ ti->error = "dm-stripe: Invalid chunk size";
kfree(sc);
return -EINVAL;
}
- sc->chunk_mask = chunk_size - 1;
+ sc->chunk_mask = ((sector_t) chunk_size) - 1;
for (sc->chunk_shift = 0; chunk_size; sc->chunk_shift++)
chunk_size >>= 1;
sc->chunk_shift--;
*/
for (i = 0; i < stripes; i++) {
if (argc < 2) {
- *context = "dm-stripe: Not enough destinations "
- "specified";
+ ti->error = "dm-stripe: Not enough destinations "
+ "specified";
kfree(sc);
return -EINVAL;
}
argv += 2;
- r = get_stripe(t, sc, i, argv);
+ r = get_stripe(ti, sc, i, argv);
if (r < 0) {
- *context = "dm-stripe: Couldn't parse stripe "
- "destination";
+ ti->error = "dm-stripe: Couldn't parse stripe "
+ "destination";
while (i--)
- dm_table_put_device(t, sc->stripe[i].dev);
+ dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
return r;
}
}
- *context = sc;
+ ti->private = sc;
return 0;
}
-static void stripe_dtr(struct dm_table *t, void *c)
+static void stripe_dtr(struct dm_target *ti)
{
unsigned int i;
- struct stripe_c *sc = (struct stripe_c *) c;
+ struct stripe_c *sc = (struct stripe_c *) ti->private;
for (i = 0; i < sc->stripes; i++)
- dm_table_put_device(t, sc->stripe[i].dev);
+ dm_put_device(ti, sc->stripe[i].dev);
kfree(sc);
}
-static int stripe_map(struct buffer_head *bh, int rw, void *context)
+static int stripe_map(struct dm_target *ti, struct buffer_head *bh, int rw)
{
- struct stripe_c *sc = (struct stripe_c *) context;
+ struct stripe_c *sc = (struct stripe_c *) ti->private;
- offset_t offset = bh->b_rsector - sc->logical_start;
+ sector_t offset = bh->b_rsector - ti->begin;
uint32_t chunk = (uint32_t) (offset >> sc->chunk_shift);
uint32_t stripe = chunk % sc->stripes; /* 32bit modulus */
chunk = chunk / sc->stripes;
return 1;
}
-static int stripe_status(status_type_t type, char *result, int maxlen,
- void *context)
+static int stripe_status(struct dm_target *ti,
+ status_type_t type, char *result, int maxlen)
{
- struct stripe_c *sc = (struct stripe_c *) context;
+ struct stripe_c *sc = (struct stripe_c *) ti->private;
int offset;
int i;
break;
case STATUSTYPE_TABLE:
- offset = snprintf(result, maxlen, "%d %ld",
+ offset = snprintf(result, maxlen, "%d " SECTOR_FORMAT,
sc->stripes, sc->chunk_mask + 1);
for (i = 0; i < sc->stripes; i++) {
- offset +=
- snprintf(result + offset, maxlen - offset,
- " %s %ld",
- kdevname(sc->stripe[i].dev->dev),
- sc->stripe[i].physical_start);
+ offset += snprintf(result + offset, maxlen - offset,
+ " %s " SECTOR_FORMAT,
+ kdevname(to_kdev_t
+ (sc->stripe[i].dev->bdev->bd_dev)),
+ sc->stripe[i].physical_start);
}
break;
}
}
static struct target_type stripe_target = {
- name: "striped",
- module: THIS_MODULE,
- ctr: stripe_ctr,
- dtr: stripe_dtr,
- map: stripe_map,
- status: stripe_status,
+ .name = "striped",
+ .module = THIS_MODULE,
+ .ctr = stripe_ctr,
+ .dtr = stripe_dtr,
+ .map = stripe_map,
+ .status = stripe_status,
};
int __init dm_stripe_init(void)
#include "dm.h"
+#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/blkdev.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
-/* ceiling(n / size) * size */
-static inline unsigned long round_up(unsigned long n, unsigned long size)
-{
- unsigned long r = n % size;
- return n + (r ? (size - r) : 0);
-}
+#define MAX_DEPTH 16
+#define NODE_SIZE L1_CACHE_BYTES
+#define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t))
+#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
+
+struct dm_table {
+ atomic_t holders;
+
+ /* btree table */
+ int depth;
+ int counts[MAX_DEPTH]; /* in nodes */
+ sector_t *index[MAX_DEPTH];
+
+ int num_targets;
+ int num_allocated;
+ sector_t *highs;
+ struct dm_target *targets;
+
+ /*
+ * Indicates the rw permissions for the new logical
+ * device. This should be a combination of FMODE_READ
+ * and FMODE_WRITE.
+ */
+ int mode;
+
+ /* a list of devices used by this table */
+ struct list_head devices;
-/* ceiling(n / size) */
+ /*
+ * A waitqueue for processes waiting for something
+ * interesting to happen to this table.
+ */
+ wait_queue_head_t eventq;
+};
+
+/*
+ * Ceiling(n / size)
+ */
static inline unsigned long div_up(unsigned long n, unsigned long size)
{
- return round_up(n, size) / size;
+ return dm_round_up(n, size) / size;
}
-/* similar to ceiling(log_size(n)) */
-static uint int_log(unsigned long n, unsigned long base)
+/*
+ * Similar to ceiling(log_size(n))
+ */
+static unsigned int int_log(unsigned long n, unsigned long base)
{
int result = 0;
}
/*
- * return the highest key that you could lookup
- * from the n'th node on level l of the btree.
+ * Calculate the index of the child node of the n'th node k'th key.
+ */
+static inline int get_child(int n, int k)
+{
+ return (n * CHILDREN_PER_NODE) + k;
+}
+
+/*
+ * Return the n'th node of level l from table t.
*/
-static offset_t high(struct dm_table *t, int l, int n)
+static inline sector_t *get_node(struct dm_table *t, int l, int n)
+{
+ return t->index[l] + (n * KEYS_PER_NODE);
+}
+
+/*
+ * Return the highest key that you could lookup from the n'th
+ * node on level l of the btree.
+ */
+static sector_t high(struct dm_table *t, int l, int n)
{
for (; l < t->depth - 1; l++)
n = get_child(n, CHILDREN_PER_NODE - 1);
if (n >= t->counts[l])
- return (offset_t) - 1;
+ return (sector_t) - 1;
return get_node(t, l, n)[KEYS_PER_NODE - 1];
}
/*
- * fills in a level of the btree based on the
- * highs of the level below it.
+ * Fills in a level of the btree based on the highs of the level
+ * below it.
*/
static int setup_btree_index(int l, struct dm_table *t)
{
int n, k;
- offset_t *node;
+ sector_t *node;
for (n = 0; n < t->counts[l]; n++) {
node = get_node(t, l, n);
}
/*
- * highs, and targets are managed as dynamic
- * arrays during a table load.
+ * highs, and targets are managed as dynamic arrays during a
+ * table load.
*/
static int alloc_targets(struct dm_table *t, int num)
{
- offset_t *n_highs;
- struct target *n_targets;
+ sector_t *n_highs;
+ struct dm_target *n_targets;
int n = t->num_targets;
/*
* Allocate both the target array and offset array at once.
*/
- n_highs = (offset_t *) vcalloc(sizeof(struct target) + sizeof(offset_t),
- num);
+ n_highs = (sector_t *) vcalloc(sizeof(struct dm_target) +
+ sizeof(sector_t), num);
if (!n_highs)
return -ENOMEM;
- n_targets = (struct target *) (n_highs + num);
+ n_targets = (struct dm_target *) (n_highs + num);
if (n) {
memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
}
memset(n_highs + n, -1, sizeof(*n_highs) * (num - n));
- if (t->highs)
- vfree(t->highs);
+ vfree(t->highs);
t->num_allocated = num;
t->highs = n_highs;
memset(t, 0, sizeof(*t));
INIT_LIST_HEAD(&t->devices);
+ atomic_set(&t->holders, 1);
- /* allocate a single node's worth of targets to begin with */
+ /* allocate a single nodes worth of targets to begin with */
if (alloc_targets(t, KEYS_PER_NODE)) {
kfree(t);
t = NULL;
}
}
-void dm_table_destroy(struct dm_table *t)
+void table_destroy(struct dm_table *t)
{
int i;
/* free the targets */
for (i = 0; i < t->num_targets; i++) {
- struct target *tgt = &t->targets[i];
+ struct dm_target *tgt = &t->targets[i];
dm_put_target_type(t->targets[i].type);
if (tgt->type->dtr)
- tgt->type->dtr(t, tgt->private);
+ tgt->type->dtr(tgt);
}
vfree(t->highs);
kfree(t);
}
+void dm_table_get(struct dm_table *t)
+{
+ atomic_inc(&t->holders);
+}
+
+void dm_table_put(struct dm_table *t)
+{
+ if (atomic_dec_and_test(&t->holders))
+ table_destroy(t);
+}
+
/*
* Checks to see if we need to extend highs or targets.
*/
}
/*
- * Convert a device path to a kdev_t.
+ * Convert a device path to a dev_t.
*/
-int lookup_device(const char *path, kdev_t *dev)
+static int lookup_device(const char *path, kdev_t *dev)
{
int r;
struct nameidata nd;
return 0;
if ((r = path_walk(path, &nd)))
- goto bad;
+ goto out;
inode = nd.dentry->d_inode;
if (!inode) {
r = -ENOENT;
- goto bad;
+ goto out;
}
if (!S_ISBLK(inode->i_mode)) {
r = -EINVAL;
- goto bad;
+ goto out;
}
*dev = inode->i_rdev;
- bad:
+ out:
path_release(&nd);
return r;
}
list_for_each(tmp, l) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
- if (dd->dev == dev)
+ if (kdev_same(dd->dev, dev))
return dd;
}
/*
* Open a device so we can use it as a map destination.
*/
-static int open_dev(struct dm_dev *d)
+static int open_dev(struct dm_dev *dd)
{
- int err;
-
- if (d->bd)
+ if (dd->bdev)
BUG();
- if (!(d->bd = bdget(kdev_t_to_nr(d->dev))))
+ dd->bdev = bdget(kdev_t_to_nr(dd->dev));
+ if (!dd->bdev)
return -ENOMEM;
- if ((err = blkdev_get(d->bd, d->mode, 0, BDEV_FILE)))
- return err;
-
- return 0;
+ return blkdev_get(dd->bdev, dd->mode, 0, BDEV_RAW);
}
/*
* Close a device that we've been using.
*/
-static void close_dev(struct dm_dev *d)
+static void close_dev(struct dm_dev *dd)
{
- if (!d->bd)
+ if (!dd->bdev)
return;
- blkdev_put(d->bd, BDEV_FILE);
- d->bd = NULL;
+ blkdev_put(dd->bdev, BDEV_RAW);
+ dd->bdev = NULL;
}
/*
- * If possible (ie. blk_size[major] is set), this
- * checks an area of a destination device is
- * valid.
+ * If possible (ie. blk_size[major] is set), this checks an area
+ * of a destination device is valid.
*/
-static int check_device_area(kdev_t dev, offset_t start, offset_t len)
+static int check_device_area(kdev_t dev, sector_t start, sector_t len)
{
int *sizes;
- offset_t dev_size;
+ sector_t dev_size;
- if (!(sizes = blk_size[MAJOR(dev)]) || !(dev_size = sizes[MINOR(dev)]))
+ if (!(sizes = blk_size[major(dev)]) || !(dev_size = sizes[minor(dev)]))
/* we don't know the device details,
* so give the benefit of the doubt */
return 1;
memcpy(&dd_copy, dd, sizeof(dd_copy));
dd->mode |= new_mode;
- dd->bd = NULL;
+ dd->bdev = NULL;
r = open_dev(dd);
if (!r)
close_dev(&dd_copy);
}
/*
- * Add a device to the list, or just increment the usage count
- * if it's already present.
+ * Add a device to the list, or just increment the usage count if
+ * it's already present.
*/
-int dm_table_get_device(struct dm_table *t, const char *path,
- offset_t start, offset_t len, int mode,
- struct dm_dev **result)
+int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
+ sector_t len, int mode, struct dm_dev **result)
{
int r;
kdev_t dev;
struct dm_dev *dd;
int major, minor;
+ struct dm_table *t = ti->table;
+
+ if (!t)
+ BUG();
if (sscanf(path, "%x:%x", &major, &minor) == 2) {
/* Extract the major/minor numbers */
- dev = MKDEV(major, minor);
+ dev = mk_kdev(major, minor);
} else {
/* convert the path to a device */
if ((r = lookup_device(path, &dev)))
if (!dd)
return -ENOMEM;
- dd->mode = mode;
dd->dev = dev;
- dd->bd = NULL;
+ dd->mode = mode;
+ dd->bdev = NULL;
if ((r = open_dev(dd))) {
kfree(dd);
if (!check_device_area(dd->dev, start, len)) {
DMWARN("device %s too small for target", path);
- dm_table_put_device(t, dd);
+ dm_put_device(ti, dd);
return -EINVAL;
}
/*
* Decrement a devices use count and remove it if neccessary.
*/
-void dm_table_put_device(struct dm_table *t, struct dm_dev *dd)
+void dm_put_device(struct dm_target *ti, struct dm_dev *dd)
{
if (atomic_dec_and_test(&dd->count)) {
close_dev(dd);
}
/*
- * Adds a target to the map
+ * Checks to see if the target joins onto the end of the table.
*/
-int dm_table_add_target(struct dm_table *t, offset_t highs,
- struct target_type *type, void *private)
+static int adjoin(struct dm_table *table, struct dm_target *ti)
{
- int r, n;
+ struct dm_target *prev;
+
+ if (!table->num_targets)
+ return !ti->begin;
+
+ prev = &table->targets[table->num_targets - 1];
+ return (ti->begin == (prev->begin + prev->len));
+}
+
+/*
+ * Destructively splits up the argument list to pass to ctr.
+ */
+static int split_args(int max, int *argc, char **argv, char *input)
+{
+ char *start, *end = input, *out;
+ *argc = 0;
+
+ while (1) {
+ start = end;
+
+ /* Skip whitespace */
+ while (*start && isspace(*start))
+ start++;
+
+ if (!*start)
+ break; /* success, we hit the end */
+
+ /* 'out' is used to remove any back-quotes */
+ end = out = start;
+ while (*end) {
+ /* Everything apart from '\0' can be quoted */
+ if (*end == '\\' && *(end + 1)) {
+ *out++ = *(end + 1);
+ end += 2;
+ continue;
+ }
+
+ if (isspace(*end))
+ break; /* end of token */
+
+ *out++ = *end++;
+ }
+
+ /* have we already filled the array ? */
+ if ((*argc + 1) > max)
+ return -EINVAL;
+
+ /* we know this is whitespace */
+ if (*end)
+ end++;
+
+ /* terminate the string and put it in the array */
+ *out = '\0';
+ argv[*argc] = start;
+ (*argc)++;
+ }
+
+ return 0;
+}
+
+int dm_table_add_target(struct dm_table *t, const char *type,
+ sector_t start, sector_t len, char *params)
+{
+ int r, argc;
+ char *argv[32];
+ struct target_type *tt;
+ struct dm_target *tgt;
if ((r = check_space(t)))
return r;
- n = t->num_targets++;
- t->highs[n] = highs;
- t->targets[n].type = type;
- t->targets[n].private = private;
+ tgt = t->targets + t->num_targets;
+ memset(tgt, 0, sizeof(*tgt));
+ tt = dm_get_target_type(type);
+ if (!tt) {
+ tgt->error = "unknown target type";
+ return -EINVAL;
+ }
+
+ tgt->table = t;
+ tgt->type = tt;
+ tgt->begin = start;
+ tgt->len = len;
+ tgt->error = "Unknown error";
+
+ /*
+ * Does this target adjoin the previous one ?
+ */
+ if (!adjoin(t, tgt)) {
+ DMERR("Gap in table");
+ dm_put_target_type(tt);
+ return -EINVAL;
+ }
+
+ r = split_args(ARRAY_SIZE(argv), &argc, argv, params);
+ if (r) {
+ tgt->error = "couldn't split parameters";
+ dm_put_target_type(tt);
+ return r;
+ }
+
+ r = tt->ctr(tgt, argc, argv);
+ if (r) {
+ dm_put_target_type(tt);
+ return r;
+ }
+
+ t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
return 0;
}
static int setup_indexes(struct dm_table *t)
{
int i, total = 0;
- offset_t *indexes;
+ sector_t *indexes;
/* allocate the space for *all* the indexes */
for (i = t->depth - 2; i >= 0; i--) {
total += t->counts[i];
}
- indexes = (offset_t *) vcalloc(total, (unsigned long) NODE_SIZE);
+ indexes = (sector_t *) vcalloc(total, (unsigned long) NODE_SIZE);
if (!indexes)
return -ENOMEM;
}
/*
- * Builds the btree to index the map
+ * Builds the btree to index the map.
*/
int dm_table_complete(struct dm_table *t)
{
wake_up_interruptible(&t->eventq);
}
-EXPORT_SYMBOL(dm_table_get_device);
-EXPORT_SYMBOL(dm_table_put_device);
+sector_t dm_table_get_size(struct dm_table *t)
+{
+ return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
+}
+
+struct dm_target *dm_table_get_target(struct dm_table *t, int index)
+{
+ if (index > t->num_targets)
+ return NULL;
+
+ return t->targets + index;
+}
+
+/*
+ * Search the btree for the correct target.
+ */
+struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
+{
+ int l, n = 0, k = 0;
+ sector_t *node;
+
+ for (l = 0; l < t->depth; l++) {
+ n = get_child(n, k);
+ node = get_node(t, l, n);
+
+ for (k = 0; k < KEYS_PER_NODE; k++)
+ if (node[k] >= sector)
+ break;
+ }
+
+ return &t->targets[(KEYS_PER_NODE * n) + k];
+}
+
+unsigned int dm_table_get_num_targets(struct dm_table *t)
+{
+ return t->num_targets;
+}
+
+struct list_head *dm_table_get_devices(struct dm_table *t)
+{
+ return &t->devices;
+}
+
+int dm_table_get_mode(struct dm_table *t)
+{
+ return t->mode;
+}
+
+void dm_table_add_wait_queue(struct dm_table *t, wait_queue_t *wq)
+{
+ add_wait_queue(&t->eventq, wq);
+}
+
+EXPORT_SYMBOL(dm_get_device);
+EXPORT_SYMBOL(dm_put_device);
EXPORT_SYMBOL(dm_table_event);
#include "dm.h"
+#include <linux/module.h>
#include <linux/kmod.h>
+#include <linux/slab.h>
struct tt_internal {
struct target_type tt;
#define DM_MOD_NAME_SIZE 32
-/*
- * Destructively splits up the argument list to pass to ctr.
- */
-int split_args(int max, int *argc, char **argv, char *input)
-{
- char *start, *end = input, *out;
- *argc = 0;
-
- while (1) {
- start = end;
-
- /* Skip whitespace */
- while (*start && isspace(*start))
- start++;
-
- if (!*start)
- break; /* success, we hit the end */
-
- /* 'out' is used to remove any back-quotes */
- end = out = start;
- while (*end) {
- /* Everything apart from '\0' can be quoted */
- if (*end == '\\' && *(end + 1)) {
- *out++ = *(end + 1);
- end += 2;
- continue;
- }
-
- if (isspace(*end))
- break; /* end of token */
-
- *out++ = *end++;
- }
-
- /* have we already filled the array ? */
- if ((*argc + 1) > max)
- return -EINVAL;
-
- /* we know this is whitespace */
- if (*end)
- end++;
-
- /* terminate the string and put it in the array */
- *out = '\0';
- argv[*argc] = start;
- (*argc)++;
- }
-
- return 0;
-}
-
static inline struct tt_internal *__find_target_type(const char *name)
{
struct list_head *tih;
/*
* io-err: always fails an io, useful for bringing
- * up LV's that have holes in them.
+ * up LVs that have holes in them.
*/
-static int io_err_ctr(struct dm_table *t, offset_t b, offset_t l,
- int argc, char **args, void **context)
+static int io_err_ctr(struct dm_target *ti, int argc, char **args)
{
- *context = NULL;
return 0;
}
-static void io_err_dtr(struct dm_table *t, void *c)
+static void io_err_dtr(struct dm_target *ti)
{
/* empty */
return;
}
-static int io_err_map(struct buffer_head *bh, int rw, void *context)
+static int io_err_map(struct dm_target *ti, struct buffer_head *bh, int rw)
{
buffer_IO_error(bh);
return 0;
}
static struct target_type error_target = {
- name: "error",
- ctr: io_err_ctr,
- dtr: io_err_dtr,
- map: io_err_map,
- status: NULL,
+ .name = "error",
+ .ctr = io_err_ctr,
+ .dtr = io_err_dtr,
+ .map = io_err_map,
};
int dm_target_init(void)
/*
- * Copyright (C) 2001 Sistina Software (UK) Limited.
+ * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include "dm.h"
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/blk.h>
#include <linux/blkpg.h>
-
-/* we only need this for the lv_bmap struct definition, not happy */
+#include <linux/mempool.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
#include <linux/lvm.h>
-#define DEFAULT_READ_AHEAD 64
+#include <asm/uaccess.h>
static const char *_name = DM_NAME;
+#define MAX_DEVICES (1 << MINORBITS)
+#define SECTOR_SHIFT 9
+#define DEFAULT_READ_AHEAD 64
static int major = 0;
static int _major = 0;
-struct io_hook {
+struct dm_io {
struct mapped_device *md;
- struct target *target;
- int rw;
void (*end_io) (struct buffer_head * bh, int uptodate);
void *context;
};
-static kmem_cache_t *_io_hook_cache;
+struct deferred_io {
+ int rw;
+ struct buffer_head *bh;
+ struct deferred_io *next;
+};
+
+/*
+ * Bits for the md->flags field.
+ */
+#define DMF_BLOCK_IO 0
+#define DMF_SUSPENDED 1
+
+struct mapped_device {
+ struct rw_semaphore lock;
+ atomic_t holders;
+
+ kdev_t dev;
+ unsigned long flags;
+
+ /*
+ * A list of ios that arrived while we were suspended.
+ */
+ atomic_t pending;
+ wait_queue_head_t wait;
+ struct deferred_io *deferred;
+
+ /*
+ * The current mapping.
+ */
+ struct dm_table *map;
+};
+
+#define MIN_IOS 256
+static kmem_cache_t *_io_cache;
+static mempool_t *_io_pool;
/* block device arrays */
static int _block_size[MAX_DEVICES];
static int _blksize_size[MAX_DEVICES];
static int _hardsect_size[MAX_DEVICES];
-static devfs_handle_t _dev_dir;
-
-static int request(request_queue_t * q, int rw, struct buffer_head *bh);
+static struct mapped_device *get_kdev(kdev_t dev);
+static int dm_request(request_queue_t *q, int rw, struct buffer_head *bh);
static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb);
+
static __init int local_init(void)
{
int r;
- /* allocate a slab for the io-hooks */
- if (!_io_hook_cache &&
- !(_io_hook_cache = kmem_cache_create("dm io hooks",
- sizeof(struct io_hook),
- 0, 0, NULL, NULL)))
+ /* allocate a slab for the dm_ios */
+ _io_cache = kmem_cache_create("dm io",
+ sizeof(struct dm_io), 0, 0, NULL, NULL);
+
+ if (!_io_cache)
return -ENOMEM;
+ _io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
+ mempool_free_slab, _io_cache);
+ if (!_io_pool) {
+ kmem_cache_destroy(_io_cache);
+ return -ENOMEM;
+ }
+
_major = major;
- r = devfs_register_blkdev(_major, _name, &dm_blk_dops);
+ r = register_blkdev(_major, _name, &dm_blk_dops);
if (r < 0) {
DMERR("register_blkdev failed");
- kmem_cache_destroy(_io_hook_cache);
+ mempool_destroy(_io_pool);
+ kmem_cache_destroy(_io_cache);
return r;
}
blksize_size[_major] = _blksize_size;
hardsect_size[_major] = _hardsect_size;
- blk_queue_make_request(BLK_DEFAULT_QUEUE(_major), request);
-
- _dev_dir = devfs_mk_dir(0, DM_DIR, NULL);
+ blk_queue_make_request(BLK_DEFAULT_QUEUE(_major), dm_request);
return 0;
}
static void local_exit(void)
{
- if (kmem_cache_destroy(_io_hook_cache))
- DMWARN("io_hooks still allocated during unregistration");
- _io_hook_cache = NULL;
+ mempool_destroy(_io_pool);
+ kmem_cache_destroy(_io_cache);
- if (devfs_unregister_blkdev(_major, _name) < 0)
+ if (unregister_blkdev(_major, _name) < 0)
DMERR("devfs_unregister_blkdev failed");
read_ahead[_major] = 0;
* expands a prefix into a pair of function names.
*/
static struct {
- int (*init)(void);
- void (*exit)(void);
+ int (*init) (void);
+ void (*exit) (void);
} _inits[] = {
#define xx(n) {n ## _init, n ## _exit},
xx(local)
- xx(dm_hash)
xx(dm_target)
xx(dm_linear)
xx(dm_stripe)
xx(dm_snapshot)
-/* xx(dm_mirror) */
xx(dm_interface)
#undef xx
};
static int __init dm_init(void)
{
- const int count = sizeof(_inits) / sizeof(*_inits);
+ const int count = ARRAY_SIZE(_inits);
int r, i;
static void __exit dm_exit(void)
{
- int i = sizeof(_inits) / sizeof(*_inits);
+ int i = ARRAY_SIZE(_inits);
- dm_destroy_all();
while (i--)
_inits[i].exit();
}
{
struct mapped_device *md;
- md = dm_get_w(inode->i_rdev);
+ md = get_kdev(inode->i_rdev);
if (!md)
return -ENXIO;
- md->use_count++;
- dm_put_w(md);
-
return 0;
}
{
struct mapped_device *md;
- md = dm_get_w(inode->i_rdev);
- if (!md)
- return -ENXIO;
+ md = get_kdev(inode->i_rdev);
+ dm_put(md); /* put the reference gained by dm_blk_open */
+ dm_put(md);
+ return 0;
+}
- if (md->use_count < 1)
- DMWARN("incorrect reference count found in mapped_device");
+static inline struct dm_io *alloc_io(void)
+{
+ return mempool_alloc(_io_pool, GFP_NOIO);
+}
- md->use_count--;
- dm_put_w(md);
+static inline void free_io(struct dm_io *io)
+{
+ mempool_free(io, _io_pool);
+}
- return 0;
+static inline struct deferred_io *alloc_deferred(void)
+{
+ return kmalloc(sizeof(struct deferred_io), GFP_NOIO);
+}
+
+static inline void free_deferred(struct deferred_io *di)
+{
+ kfree(di);
}
/* In 512-byte units */
#define VOLUME_SIZE(minor) (_block_size[(minor)] << 1)
+/* FIXME: check this */
static int dm_blk_ioctl(struct inode *inode, struct file *file,
uint command, unsigned long a)
{
return 0;
}
-static inline struct io_hook *alloc_io_hook(void)
-{
- return kmem_cache_alloc(_io_hook_cache, GFP_NOIO);
-}
-
-static inline void free_io_hook(struct io_hook *ih)
-{
- kmem_cache_free(_io_hook_cache, ih);
-}
-
/*
- * FIXME: We need to decide if deferred_io's need
- * their own slab, I say no for now since they are
- * only used when the device is suspended.
+ * Add the buffer to the list of deferred io.
*/
-static inline struct deferred_io *alloc_deferred(void)
+static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
{
- return kmalloc(sizeof(struct deferred_io), GFP_NOIO);
-}
+ struct deferred_io *di;
-static inline void free_deferred(struct deferred_io *di)
-{
- kfree(di);
-}
+ di = alloc_deferred();
+ if (!di)
+ return -ENOMEM;
-/*
- * Call a target's optional error function if an I/O failed.
- */
-static inline int call_err_fn(struct io_hook *ih, struct buffer_head *bh)
-{
- dm_err_fn err = ih->target->type->err;
+ down_write(&md->lock);
- if (err)
- return err(bh, ih->rw, ih->target->private);
+ if (!test_bit(DMF_SUSPENDED, &md->flags)) {
+ up_write(&md->lock);
+ free_deferred(di);
+ return 1;
+ }
- return 0;
+ di->bh = bh;
+ di->rw = rw;
+ di->next = md->deferred;
+ md->deferred = di;
+
+ up_write(&md->lock);
+ return 0; /* deferred successfully */
}
/*
*/
static void dec_pending(struct buffer_head *bh, int uptodate)
{
- struct io_hook *ih = bh->b_private;
+ struct dm_io *io = bh->b_private;
- if (!uptodate && call_err_fn(ih, bh))
- return;
-
- if (atomic_dec_and_test(&ih->md->pending))
+ if (atomic_dec_and_test(&io->md->pending))
/* nudge anyone waiting on suspend queue */
- wake_up(&ih->md->wait);
+ wake_up(&io->md->wait);
- bh->b_end_io = ih->end_io;
- bh->b_private = ih->context;
- free_io_hook(ih);
+ bh->b_end_io = io->end_io;
+ bh->b_private = io->context;
+ free_io(io);
bh->b_end_io(bh, uptodate);
}
-/*
- * Add the bh to the list of deferred io.
- */
-static int queue_io(struct buffer_head *bh, int rw)
-{
- struct deferred_io *di = alloc_deferred();
- struct mapped_device *md;
-
- if (!di)
- return -ENOMEM;
-
- md = dm_get_w(bh->b_rdev);
- if (!md) {
- free_deferred(di);
- return -ENXIO;
- }
-
- if (!dm_flag(md, DMF_SUSPENDED)) {
- dm_put_w(md);
- free_deferred(di);
- return 1;
- }
-
- di->bh = bh;
- di->rw = rw;
- di->next = md->deferred;
- md->deferred = di;
-
- dm_put_w(md);
-
- return 0; /* deferred successfully */
-}
-
/*
* Do the bh mapping for a given leaf
*/
static inline int __map_buffer(struct mapped_device *md,
- struct buffer_head *bh, int rw, int leaf)
+ int rw, struct buffer_head *bh)
{
int r;
- dm_map_fn fn;
- void *context;
- struct io_hook *ih = NULL;
- struct target *ti = md->map->targets + leaf;
-
- fn = ti->type->map;
- context = ti->private;
+ struct dm_io *io;
+ struct dm_target *ti;
- ih = alloc_io_hook();
+ ti = dm_table_find_target(md->map, bh->b_rsector);
+ if (!ti)
+ return -EINVAL;
- if (!ih)
- return -1;
+ io = alloc_io();
+ if (!io)
+ return -ENOMEM;
- ih->md = md;
- ih->rw = rw;
- ih->target = ti;
- ih->end_io = bh->b_end_io;
- ih->context = bh->b_private;
+ io->md = md;
+ io->end_io = bh->b_end_io;
+ io->context = bh->b_private;
- r = fn(bh, rw, context);
+ r = ti->type->map(ti, bh, rw);
if (r > 0) {
/* hook the end io request fn */
atomic_inc(&md->pending);
bh->b_end_io = dec_pending;
- bh->b_private = ih;
+ bh->b_private = io;
- } else if (r == 0)
+ } else
/* we don't need to hook */
- free_io_hook(ih);
-
- else if (r < 0) {
- free_io_hook(ih);
- return -1;
- }
+ free_io(io);
return r;
}
/*
- * Search the btree for the correct target.
+ * Checks to see if we should be deferring io, if so it queues it
+ * and returns 1.
*/
-static inline int __find_node(struct dm_table *t, struct buffer_head *bh)
+static inline int __deferring(struct mapped_device *md, int rw,
+ struct buffer_head *bh)
{
- int l, n = 0, k = 0;
- offset_t *node;
+ int r;
- for (l = 0; l < t->depth; l++) {
- n = get_child(n, k);
- node = get_node(t, l, n);
+ /*
+ * If we're suspended we have to queue this io for later.
+ */
+ while (test_bit(DMF_BLOCK_IO, &md->flags)) {
+ up_read(&md->lock);
+
+ /*
+ * There's no point deferring a read ahead
+ * request, just drop it.
+ */
+ if (rw == READA) {
+ down_read(&md->lock);
+ return -EIO;
+ }
+
+ r = queue_io(md, bh, rw);
+ down_read(&md->lock);
+
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ return 1; /* deferred successfully */
- for (k = 0; k < KEYS_PER_NODE; k++)
- if (node[k] >= bh->b_rsector)
- break;
}
- return (KEYS_PER_NODE * n) + k;
+ return 0;
}
-static int request(request_queue_t *q, int rw, struct buffer_head *bh)
+static int dm_request(request_queue_t *q, int rw, struct buffer_head *bh)
{
- struct mapped_device *md;
int r;
+ struct mapped_device *md;
- md = dm_get_r(bh->b_rdev);
+ md = get_kdev(bh->b_rdev);
if (!md) {
buffer_IO_error(bh);
return 0;
}
- /*
- * Sanity check.
- */
- if (bh->b_rsector & ((bh->b_size >> 9) - 1))
- DMERR("misaligned block requested logical "
- "sector (%lu), b_size (%d)",
- bh->b_rsector, bh->b_size);
-
- /*
- * If we're suspended we have to queue
- * this io for later.
- */
- while (dm_flag(md, DMF_SUSPENDED)) {
- dm_put_r(md);
-
- if (rw == READA)
- goto bad_no_lock;
+ down_read(&md->lock);
- r = queue_io(bh, rw);
+ r = __deferring(md, rw, bh);
+ if (r < 0)
+ goto bad;
+ else if (!r) {
+ /* not deferring */
+ r = __map_buffer(md, rw, bh);
if (r < 0)
- goto bad_no_lock;
-
- else if (r == 0)
- return 0; /* deferred successfully */
-
- /*
- * We're in a while loop, because someone could suspend
- * before we get to the following read lock.
- */
- md = dm_get_r(bh->b_rdev);
- if (!md) {
- buffer_IO_error(bh);
- return 0;
- }
- }
-
- if ((r = __map_buffer(md, bh, rw, __find_node(md->map, bh))) < 0)
- goto bad;
+ goto bad;
+ } else
+ r = 0;
- dm_put_r(md);
+ up_read(&md->lock);
+ dm_put(md);
return r;
bad:
- dm_put_r(md);
-
- bad_no_lock:
buffer_IO_error(bh);
+ up_read(&md->lock);
+ dm_put(md);
return 0;
}
/*
* Creates a dummy buffer head and maps it (for lilo).
*/
-static int do_bmap(kdev_t dev, unsigned long block,
- kdev_t * r_dev, unsigned long *r_block)
+static int __bmap(struct mapped_device *md, kdev_t dev, unsigned long block,
+ kdev_t *r_dev, unsigned long *r_block)
{
- struct mapped_device *md;
struct buffer_head bh;
+ struct dm_target *ti;
int r;
- struct target *t;
-
- md = dm_get_r(dev);
- if (!md)
- return -ENXIO;
- if (dm_flag(md, DMF_SUSPENDED)) {
- dm_put_r(md);
+ if (test_bit(DMF_BLOCK_IO, &md->flags)) {
return -EPERM;
}
if (!check_dev_size(dev, block)) {
- dm_put_r(md);
return -EINVAL;
}
bh.b_rsector = block * (bh.b_size >> 9);
/* find target */
- t = md->map->targets + __find_node(md->map, &bh);
+ ti = dm_table_find_target(md->map, bh.b_rsector);
/* do the mapping */
- r = t->type->map(&bh, READ, t->private);
+ r = ti->type->map(ti, &bh, READ);
- *r_dev = bh.b_rdev;
- *r_block = bh.b_rsector / (bh.b_size >> 9);
+ if (!r) {
+ *r_dev = bh.b_rdev;
+ *r_block = bh.b_rsector / (bh.b_size >> 9);
+ }
- dm_put_r(md);
return r;
}
*/
static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
{
+ struct mapped_device *md;
unsigned long block, r_block;
kdev_t r_dev;
int r;
if (get_user(block, &lvb->lv_block))
return -EFAULT;
- if ((r = do_bmap(inode->i_rdev, block, &r_dev, &r_block)))
- return r;
+ md = get_kdev(inode->i_rdev);
+ if (!md)
+ return -ENXIO;
- if (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) ||
- put_user(r_block, &lvb->lv_block))
- return -EFAULT;
+ down_read(&md->lock);
+ r = __bmap(md, inode->i_rdev, block, &r_dev, &r_block);
+ up_read(&md->lock);
+ dm_put(md);
- return 0;
+ if (!r && (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) ||
+ put_user(r_block, &lvb->lv_block)))
+ r = -EFAULT;
+
+ return r;
+}
+
+/*-----------------------------------------------------------------
+ * A bitset is used to keep track of allocated minor numbers.
+ *---------------------------------------------------------------*/
+static spinlock_t _minor_lock = SPIN_LOCK_UNLOCKED;
+static struct mapped_device *_mds[MAX_DEVICES];
+
+static void free_minor(int minor)
+{
+ spin_lock(&_minor_lock);
+ _mds[minor] = NULL;
+ spin_unlock(&_minor_lock);
}
/*
- * See if the device with a specific minor # is free. Inserts
- * the device into the hashes.
+ * See if the device with a specific minor # is free.
*/
-static inline int specific_dev(int minor, struct mapped_device *md)
+static int specific_minor(int minor, struct mapped_device *md)
{
+ int r = -EBUSY;
+
if (minor >= MAX_DEVICES) {
DMWARN("request for a mapped_device beyond MAX_DEVICES (%d)",
MAX_DEVICES);
return -EINVAL;
}
- md->dev = mk_kdev(_major, minor);
- if (dm_hash_insert(md))
- /* in use */
- return -EBUSY;
+ spin_lock(&_minor_lock);
+ if (!_mds[minor]) {
+ _mds[minor] = md;
+ r = minor;
+ }
+ spin_unlock(&_minor_lock);
- return minor;
+ return r;
}
-/*
- * Find the first free device.
- */
-static int any_old_dev(struct mapped_device *md)
+static int next_free_minor(struct mapped_device *md)
{
int i;
- for (i = 0; i < MAX_DEVICES; i++)
- if (specific_dev(i, md) >= 0)
- return i;
+ spin_lock(&_minor_lock);
+ for (i = 0; i < MAX_DEVICES; i++) {
+ if (!_mds[i]) {
+ _mds[i] = md;
+ break;
+ }
+ }
+ spin_unlock(&_minor_lock);
+
+ return (i < MAX_DEVICES) ? i : -EBUSY;
+}
+
+static struct mapped_device *get_kdev(kdev_t dev)
+{
+ struct mapped_device *md;
+
+ if (major(dev) != _major)
+ return NULL;
+
+ spin_lock(_minor_lock);
+ md = _mds[minor(dev)];
+ if (md)
+ dm_get(md);
+ spin_unlock(_minor_lock);
- return -EBUSY;
+ return md;
}
/*
- * Allocate and initialise a blank device, then insert it into
- * the hash tables. Caller must ensure uuid is null-terminated.
- * Device is returned with a write lock held.
+ * Allocate and initialise a blank device with a given minor.
*/
-static struct mapped_device *alloc_dev(const char *name, const char *uuid,
- int minor)
+static struct mapped_device *alloc_dev(int minor)
{
struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
return NULL;
}
- memset(md, 0, sizeof(*md));
- init_rwsem(&md->lock);
- down_write(&md->lock);
-
- /*
- * Copy in the name.
- */
- md->name = dm_strdup(name);
- if (!md->name)
- goto bad;
-
- /*
- * Copy in the uuid.
- */
- if (uuid && *uuid) {
- md->uuid = dm_strdup(uuid);
- if (!md->uuid) {
- DMWARN("unable to allocate uuid - out of memory.");
- goto bad;
- }
+ /* get a minor number for the dev */
+ minor = (minor < 0) ? next_free_minor(md) : specific_minor(minor, md);
+ if (minor < 0) {
+ kfree(md);
+ return NULL;
}
- /*
- * This will have inserted the device into the hashes iff
- * it succeeded.
- */
- minor = (minor < 0) ? any_old_dev(md) : specific_dev(minor, md);
- if (minor < 0)
- goto bad;
-
- dm_clear_flag(md, DMF_SUSPENDED);
- dm_set_flag(md, DMF_VALID);
- md->use_count = 0;
- md->deferred = NULL;
-
- md->pending = (atomic_t) ATOMIC_INIT(0);
+ memset(md, 0, sizeof(*md));
+ md->dev = mk_kdev(_major, minor);
+ init_rwsem(&md->lock);
+ atomic_set(&md->holders, 1);
+ atomic_set(&md->pending, 0);
init_waitqueue_head(&md->wait);
- return md;
-
- bad:
- if (md->name)
- kfree(md->name);
-
- if (md->uuid)
- kfree(md->uuid);
- kfree(md);
- return NULL;
+ return md;
}
static void free_dev(struct mapped_device *md)
{
- dm_hash_remove(md);
- kfree(md->name);
-
- if (md->uuid)
- kfree(md->uuid);
-
+ free_minor(minor(md->dev));
kfree(md);
}
-static int __register_device(struct mapped_device *md)
-{
- md->devfs_entry =
- devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER,
- MAJOR(md->dev), MINOR(md->dev),
- S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
- &dm_blk_dops, NULL);
-
- return 0;
-}
-
-static int __unregister_device(struct mapped_device *md)
-{
- devfs_unregister(md->devfs_entry);
- return 0;
-}
-
/*
* The hardsect size for a mapped device is the largest hardsect size
* from the devices it maps onto.
*/
static int __bind(struct mapped_device *md, struct dm_table *t)
{
- int minor = MINOR(md->dev);
-
+ int minor = minor(md->dev);
md->map = t;
- if (!t->num_targets) {
- _block_size[minor] = 0;
- _blksize_size[minor] = BLOCK_SIZE;
- _hardsect_size[minor] = 0;
- return 0;
- }
-
/* in k */
- _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1;
-
+ _block_size[minor] = dm_table_get_size(t) >> 1;
_blksize_size[minor] = BLOCK_SIZE;
- _hardsect_size[minor] = __find_hardsect_size(&t->devices);
+ _hardsect_size[minor] = __find_hardsect_size(dm_table_get_devices(t));
register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]);
+ dm_table_get(t);
return 0;
}
static void __unbind(struct mapped_device *md)
{
- int minor = MINOR(md->dev);
+ int minor = minor(md->dev);
- dm_table_destroy(md->map);
+ dm_table_put(md->map);
md->map = NULL;
_block_size[minor] = 0;
_hardsect_size[minor] = 0;
}
-static int check_name(const char *name)
-{
- if (strchr(name, '/')) {
- DMWARN("invalid device name");
- return -EINVAL;
- }
-
- return 0;
-}
-
/*
* Constructor for a new device.
*/
-int dm_create(const char *name, const char *uuid, int minor, int ro,
- struct dm_table *table)
+int dm_create(int minor, struct dm_table *table, struct mapped_device **result)
{
int r;
struct mapped_device *md;
- r = check_name(name);
- if (r)
- return r;
-
- md = alloc_dev(name, uuid, minor);
+ md = alloc_dev(minor);
if (!md)
return -ENXIO;
- r = __register_device(md);
- if (r)
- goto bad;
-
r = __bind(md, table);
- if (r)
- goto bad;
-
- dm_set_ro(md, ro);
- dm_put_w(md);
- return 0;
-
- bad:
- dm_put_w(md);
- free_dev(md);
- return r;
-}
-
-/*
- * Renames the device. No lock held.
- */
-int dm_set_name(const char *name, const char *new_name)
-{
- int r;
- struct mapped_device *md;
-
- r = dm_hash_rename(name, new_name);
- if (r)
- return r;
-
- md = dm_get_name_w(new_name);
- r = __unregister_device(md);
- if (!r)
- r = __register_device(md);
- dm_put_w(md);
- return r;
-}
-
-/*
- * Destructor for the device. You cannot destroy an open device.
- * Write lock must be held before calling. md will have been
- * freed if call was successful.
- */
-int dm_destroy(struct mapped_device *md)
-{
- int r;
-
- if (md->use_count)
- return -EPERM;
-
- r = __unregister_device(md);
- if (r)
+ if (r) {
+ free_dev(md);
return r;
+ }
- /*
- * Signal that this md is now invalid so that nothing further
- * can acquire its lock.
- */
- dm_clear_flag(md, DMF_VALID);
-
- __unbind(md);
- free_dev(md);
+ *result = md;
return 0;
}
-/*
- * Destroy all devices - except open ones
- */
-void dm_destroy_all(void)
+void dm_get(struct mapped_device *md)
{
- int i, some_destroyed, r;
- struct mapped_device *md;
-
- do {
- some_destroyed = 0;
- for (i = 0; i < MAX_DEVICES; i++) {
- md = dm_get_w(mk_kdev(_major, i));
- if (!md)
- continue;
-
- r = dm_destroy(md);
- if (r)
- dm_put_w(md);
- else
- some_destroyed = 1;
- }
- } while (some_destroyed);
+ atomic_inc(&md->holders);
}
-/*
- * Sets or clears the read-only flag for the device. Write lock
- * must be held.
- */
-void dm_set_ro(struct mapped_device *md, int ro)
+void dm_put(struct mapped_device *md)
{
- if (ro)
- dm_set_flag(md, DMF_RO);
- else
- dm_clear_flag(md, DMF_RO);
-
- set_device_ro(md->dev, ro);
+ if (atomic_dec_and_test(&md->holders)) {
+ __unbind(md);
+ free_dev(md);
+ }
}
/*
- * Requeue the deferred buffer_heads by calling generic_make_request.
+ * Requeue the deferred io by calling generic_make_request.
*/
static void flush_deferred_io(struct deferred_io *c)
{
}
/*
- * Swap in a new table (destroying old one). Write lock must be
- * held.
+ * Swap in a new table (destroying old one).
*/
int dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
int r;
+ down_write(&md->lock);
+
/* device must be suspended */
- if (!dm_flag(md, DMF_SUSPENDED))
+ if (!test_bit(DMF_SUSPENDED, &md->flags)) {
+ up_write(&md->lock);
return -EPERM;
+ }
__unbind(md);
-
r = __bind(md, table);
if (r)
return r;
+ up_write(&md->lock);
return 0;
}
* filesystem. For example we might want to move some data in
* the background. Before the table can be swapped with
* dm_bind_table, dm_suspend must be called to flush any in
- * flight buffer_heads and ensure that any further io gets
- * deferred. Write lock must be held.
+ * flight io and ensure that any further io gets deferred.
*/
-int dm_suspend(kdev_t dev)
+int dm_suspend(struct mapped_device *md)
{
- struct mapped_device *md;
DECLARE_WAITQUEUE(wait, current);
+ down_write(&md->lock);
+
/*
- * First we set the suspend flag so no more ios will be
+ * First we set the BLOCK_IO flag so no more ios will be
* mapped.
*/
- md = dm_get_w(dev);
- if (!md)
- return -ENXIO;
-
- if (dm_flag(md, DMF_SUSPENDED)) {
- dm_put_w(md);
+ if (test_bit(DMF_BLOCK_IO, &md->flags)) {
+ up_write(&md->lock);
return -EINVAL;
}
- dm_set_flag(md, DMF_SUSPENDED);
- dm_put_w(md);
+ set_bit(DMF_BLOCK_IO, &md->flags);
+ up_write(&md->lock);
/*
- * Then we wait for wait for the already mapped ios to
+ * Then we wait for the already mapped ios to
* complete.
*/
- md = dm_get_r(dev);
- if (!md)
- return -ENXIO;
- if (!dm_flag(md, DMF_SUSPENDED))
- return -EINVAL;
+ down_read(&md->lock);
add_wait_queue(&md->wait, &wait);
- current->state = TASK_UNINTERRUPTIBLE;
- do {
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
if (!atomic_read(&md->pending))
break;
schedule();
-
- } while (1);
+ }
current->state = TASK_RUNNING;
remove_wait_queue(&md->wait, &wait);
- dm_put_r(md);
+ up_read(&md->lock);
+
+ /* set_bit is atomic */
+ set_bit(DMF_SUSPENDED, &md->flags);
return 0;
}
-int dm_resume(kdev_t dev)
+int dm_resume(struct mapped_device *md)
{
- struct mapped_device *md;
struct deferred_io *def;
- md = dm_get_w(dev);
- if (!md)
- return -ENXIO;
-
- if (!dm_flag(md, DMF_SUSPENDED) || !md->map->num_targets) {
- dm_put_w(md);
+ down_write(&md->lock);
+ if (!test_bit(DMF_SUSPENDED, &md->flags) ||
+ !dm_table_get_size(md->map)) {
+ up_write(&md->lock);
return -EINVAL;
}
- dm_clear_flag(md, DMF_SUSPENDED);
+ clear_bit(DMF_SUSPENDED, &md->flags);
+ clear_bit(DMF_BLOCK_IO, &md->flags);
def = md->deferred;
md->deferred = NULL;
- dm_put_w(md);
+ up_write(&md->lock);
flush_deferred_io(def);
run_task_queue(&tq_disk);
return 0;
}
+struct dm_table *dm_get_table(struct mapped_device *md)
+{
+ struct dm_table *t;
+
+ down_read(&md->lock);
+ t = md->map;
+ dm_table_get(t);
+ up_read(&md->lock);
+
+ return t;
+}
+
+kdev_t dm_kdev(struct mapped_device *md)
+{
+ kdev_t dev;
+
+ down_read(&md->lock);
+ dev = md->dev;
+ up_read(&md->lock);
+
+ return dev;
+}
+
+int dm_suspended(struct mapped_device *md)
+{
+ return test_bit(DMF_SUSPENDED, &md->flags);
+}
+
struct block_device_operations dm_blk_dops = {
- open: dm_blk_open,
- release: dm_blk_close,
- ioctl: dm_blk_ioctl,
- owner: THIS_MODULE
+ .open = dm_blk_open,
+ .release = dm_blk_close,
+ .ioctl = dm_blk_ioctl,
+ .owner = THIS_MODULE
};
/*
/*
* Internal header file for device mapper
*
- * Copyright (C) 2001 Sistina Software
+ * Copyright (C) 2001, 2002 Sistina Software
*
* This file is released under the LGPL.
*/
#ifndef DM_INTERNAL_H
#define DM_INTERNAL_H
-#include <linux/config.h>
-#include <linux/version.h>
-#include <linux/major.h>
-#include <linux/iobuf.h>
-#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/compatmac.h>
-#include <linux/cache.h>
-#include <linux/devfs_fs_kernel.h>
-#include <linux/ctype.h>
#include <linux/device-mapper.h>
#include <linux/list.h>
-#include <linux/init.h>
+#include <linux/blkdev.h>
-#define DM_NAME "device-mapper" /* Name for messaging */
-#define DM_DRIVER_EMAIL "lvm-devel@lists.sistina.com"
-#define MAX_DEPTH 16
-#define NODE_SIZE L1_CACHE_BYTES
-#define KEYS_PER_NODE (NODE_SIZE / sizeof(offset_t))
-#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
-#define MAX_ARGS 32
-#define MAX_DEVICES 256
-
-/*
- * List of devices that a metadevice uses and should open/close.
- */
-struct dm_dev {
- atomic_t count;
- struct list_head list;
-
- int mode;
-
- kdev_t dev;
- struct block_device *bd;
-};
+#define DM_NAME "device-mapper"
+#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
+#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
+#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
/*
- * I/O that had to be deferred while we were suspended
+ * FIXME: I think this should be with the definition of sector_t
+ * in types.h.
*/
-struct deferred_io {
- int rw;
- struct buffer_head *bh;
- struct deferred_io *next;
-};
+#ifdef CONFIG_LBD
+#define SECTOR_FORMAT "%Lu"
+#else
+#define SECTOR_FORMAT "%lu"
+#endif
-/*
- * Btree leaf - this does the actual mapping
- */
-struct target {
- struct target_type *type;
- void *private;
-};
+extern struct block_device_operations dm_blk_dops;
/*
- * The btree
+ * List of devices that a metadevice uses and should open/close.
*/
-struct dm_table {
- /* btree table */
- int depth;
- int counts[MAX_DEPTH]; /* in nodes */
- offset_t *index[MAX_DEPTH];
-
- int num_targets;
- int num_allocated;
- offset_t *highs;
- struct target *targets;
+struct dm_dev {
+ struct list_head list;
- /*
- * Indicates the rw permissions for the new logical
- * device. This should be a combination of FMODE_READ
- * and FMODE_WRITE.
- */
+ atomic_t count;
int mode;
-
- /* a list of devices used by this table */
- struct list_head devices;
-
- /*
- * A waitqueue for processes waiting for something
- * interesting to happen to this table.
- */
- wait_queue_head_t eventq;
-};
-
-/*
- * The actual device struct
- */
-struct mapped_device {
- struct rw_semaphore lock;
- unsigned long flags;
-
kdev_t dev;
- char *name;
- char *uuid;
-
- int use_count;
-
- /* a list of io's that arrived while we were suspended */
- atomic_t pending;
- wait_queue_head_t wait;
- struct deferred_io *deferred;
-
- struct dm_table *map;
-
- /* used by dm-fs.c */
- devfs_handle_t devfs_entry;
+ struct block_device *bdev;
};
-extern struct block_device_operations dm_blk_dops;
-
-/* dm-target.c */
-int dm_target_init(void);
-struct target_type *dm_get_target_type(const char *name);
-void dm_put_target_type(struct target_type *t);
-void dm_target_exit(void);
+struct dm_table;
+struct mapped_device;
-/*
- * Destructively splits argument list to pass to ctr.
- */
-int split_args(int max, int *argc, char **argv, char *input);
+/*-----------------------------------------------------------------
+ * Functions for manipulating a struct mapped_device.
+ * Drop the reference with dm_put when you finish with the object.
+ *---------------------------------------------------------------*/
+int dm_create(int minor, struct dm_table *table, struct mapped_device **md);
/*
- * dm-hash manages the lookup of devices by dev/name/uuid.
+ * Reference counting for md.
*/
-int dm_hash_init(void);
-void dm_hash_exit(void);
-
-int dm_hash_insert(struct mapped_device *md);
-void dm_hash_remove(struct mapped_device *md);
-int dm_hash_rename(const char *old, const char *new);
+void dm_get(struct mapped_device *md);
+void dm_put(struct mapped_device *md);
/*
- * There are three ways to lookup a device: by kdev_t, by name
- * and by uuid. A code path (eg an ioctl) should only ever get
- * one device at any time.
- */
-struct mapped_device *dm_get_r(kdev_t dev);
-struct mapped_device *dm_get_w(kdev_t dev);
-
-struct mapped_device *dm_get_name_r(const char *name);
-struct mapped_device *dm_get_name_w(const char *name);
-
-struct mapped_device *dm_get_uuid_r(const char *uuid);
-struct mapped_device *dm_get_uuid_w(const char *uuid);
-
-static inline void dm_put_r(struct mapped_device *md)
-{
- up_read(&md->lock);
-}
-
-static inline void dm_put_w(struct mapped_device *md)
-{
- up_write(&md->lock);
-}
-
-/*
- * Call with no lock.
- */
-int dm_create(const char *name, const char *uuid, int minor, int ro,
- struct dm_table *table);
-int dm_set_name(const char *name, const char *newname);
-void dm_destroy_all(void);
-
-/*
- * You must have the write lock before calling the remaining md
- * methods.
+ * A device can still be used while suspended, but I/O is deferred.
*/
-int dm_destroy(struct mapped_device *md);
-void dm_set_ro(struct mapped_device *md, int ro);
+int dm_suspend(struct mapped_device *md);
+int dm_resume(struct mapped_device *md);
/*
* The device must be suspended before calling this method.
int dm_swap_table(struct mapped_device *md, struct dm_table *t);
/*
- * A device can still be used while suspended, but I/O is deferred.
+ * Drop a reference on the table when you've finished with the
+ * result.
*/
-int dm_suspend(kdev_t dev);
-int dm_resume(kdev_t dev);
-
-/* dm-table.c */
-int dm_table_create(struct dm_table **result, int mode);
-void dm_table_destroy(struct dm_table *t);
-
-int dm_table_add_target(struct dm_table *t, offset_t highs,
- struct target_type *type, void *private);
-int dm_table_complete(struct dm_table *t);
+struct dm_table *dm_get_table(struct mapped_device *md);
/*
- * Event handling
+ * Info functions.
*/
-void dm_table_event(struct dm_table *t);
+kdev_t dm_kdev(struct mapped_device *md);
+int dm_suspended(struct mapped_device *md);
-#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
-#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
-#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
+/*-----------------------------------------------------------------
+ * Functions for manipulating a table. Tables are also reference
+ * counted.
+ *---------------------------------------------------------------*/
+int dm_table_create(struct dm_table **result, int mode);
-/*
- * Calculate the index of the child node of the n'th node k'th key.
- */
-static inline int get_child(int n, int k)
-{
- return (n * CHILDREN_PER_NODE) + k;
-}
+void dm_table_get(struct dm_table *t);
+void dm_table_put(struct dm_table *t);
-/*
- * Return the n'th node of level l from table t.
- */
-static inline offset_t *get_node(struct dm_table *t, int l, int n)
-{
- return t->index[l] + (n * KEYS_PER_NODE);
-}
+int dm_table_add_target(struct dm_table *t, const char *type,
+ sector_t start, sector_t len, char *params);
+int dm_table_complete(struct dm_table *t);
+void dm_table_event(struct dm_table *t);
+sector_t dm_table_get_size(struct dm_table *t);
+struct dm_target *dm_table_get_target(struct dm_table *t, int index);
+struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
+unsigned int dm_table_get_num_targets(struct dm_table *t);
+struct list_head *dm_table_get_devices(struct dm_table *t);
+int dm_table_get_mode(struct dm_table *t);
+void dm_table_add_wait_queue(struct dm_table *t, wait_queue_t *wq);
+
+/*-----------------------------------------------------------------
+ * A registry of target types.
+ *---------------------------------------------------------------*/
+int dm_target_init(void);
+void dm_target_exit(void);
+struct target_type *dm_get_target_type(const char *name);
+void dm_put_target_type(struct target_type *t);
+/*-----------------------------------------------------------------
+ * Useful inlines.
+ *---------------------------------------------------------------*/
static inline int array_too_big(unsigned long fixed, unsigned long obj,
unsigned long num)
{
return (num > (ULONG_MAX - fixed) / obj);
}
-static inline char *dm_strdup(const char *str)
-{
- char *r = kmalloc(strlen(str) + 1, GFP_KERNEL);
- if (r)
- strcpy(r, str);
- return r;
-}
-
/*
- * Flags in struct mapped_device
+ * ceiling(n / size) * size
*/
-
-#define DMF_VALID 0
-#define DMF_SUSPENDED 1
-#define DMF_RO 2
-
-static inline int dm_flag(struct mapped_device *md, int flag)
+static inline unsigned long dm_round_up(unsigned long n, unsigned long size)
{
- return (md->flags & (1 << flag));
+ unsigned long r = n % size;
+ return n + (r ? (size - r) : 0);
}
-static inline void dm_set_flag(struct mapped_device *md, int flag)
-{
- md->flags |= (1 << flag);
-}
-
-static inline void dm_clear_flag(struct mapped_device *md, int flag)
-{
- md->flags &= ~(1 << flag);
-}
+/*
+ * The device-mapper can be driven through one of two interfaces;
+ * ioctl or filesystem, depending which patch you have applied.
+ */
+int dm_interface_init(void);
+void dm_interface_exit(void);
/*
- * Targets
+ * Targets for linear and striped mappings
*/
int dm_linear_init(void);
void dm_linear_exit(void);
int dm_snapshot_init(void);
void dm_snapshot_exit(void);
-/* Future */
-/* int dm_mirror_init(void); */
-/* void dm_mirror_exit(void); */
-
-/*
- * Init functions for the user interface to device-mapper. At
- * the moment an ioctl interface on a special char device is
- * used. A filesystem based interface would be a nicer way to
- * go.
- */
-int __init dm_interface_init(void);
-void dm_interface_exit(void);
-
#endif
* This file is released under the GPL.
*/
+#include <asm/atomic.h>
+
+#include <linux/blkdev.h>
#include <linux/config.h>
-#include <linux/module.h>
+#include <linux/device-mapper.h>
+#include <linux/fs.h>
#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/fs.h>
-#include <linux/blkdev.h>
-#include <linux/device-mapper.h>
+#include <linux/locks.h>
#include <linux/mempool.h>
-#include <asm/atomic.h>
+#include <linux/module.h>
#include <linux/pagemap.h>
-#include <linux/locks.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include "kcopyd.h"
{
struct kcopyd_job *job;
- job = mempool_alloc(_job_pool, GFP_KERNEL);
+ job = mempool_alloc(_job_pool, GFP_NOIO);
if (!job)
return NULL;
static inline struct copy_info *alloc_copy_info(void)
{
- return mempool_alloc(_copy_pool, GFP_KERNEL);
+ return mempool_alloc(_copy_pool, GFP_NOIO);
}
static inline void free_copy_info(struct copy_info *info)
struct kcopyd_region {
kdev_t dev;
- offset_t sector;
- offset_t count;
+ sector_t sector;
+ sector_t count;
};
#define MAX_KCOPYD_PAGES 128
* Shifts and masks that will be useful when dispatching
* each buffer_head.
*/
- offset_t offset;
- offset_t block_size;
- offset_t block_shift;
- offset_t bpp_shift; /* blocks per page */
- offset_t bpp_mask;
+ sector_t offset;
+ sector_t block_size;
+ sector_t block_shift;
+ sector_t bpp_shift; /* blocks per page */
+ sector_t bpp_mask;
/*
* nr_blocks is how many buffer heads will have to be
* Set this to ensure you are notified when the job has
* completed. 'context' is for callback to use.
*/
- void (*callback)(struct kcopyd_job *job);
+ void (*callback) (struct kcopyd_job *job);
void *context;
};
* Submit a copy job to kcopyd. This is built on top of the
* previous three fns.
*/
-typedef void (*kcopyd_notify_fn)(int err, void *context);
+typedef void (*kcopyd_notify_fn) (int err, void *context);
int kcopyd_copy(struct kcopyd_region *from, struct kcopyd_region *to,
kcopyd_notify_fn fn, void *context);
/*
- * Copyright (C) 2001 Sistina Software (UK) Limited.
+ * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
*
* This file is released under the GPL.
*/
#include "dm.h"
+#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <linux/miscdevice.h>
#include <linux/dm-ioctl.h>
#include <linux/init.h>
#include <linux/wait.h>
+#include <linux/blk.h>
+#include <linux/slab.h>
+#include <asm/uaccess.h>
+
+#define DM_DRIVER_EMAIL "dm@uk.sistina.com"
/*-----------------------------------------------------------------
- * Implementation of the ioctl commands
+ * The ioctl interface needs to be able to look up devices by
+ * name or uuid.
*---------------------------------------------------------------*/
+struct hash_cell {
+ struct list_head name_list;
+ struct list_head uuid_list;
+
+ char *name;
+ char *uuid;
+ struct mapped_device *md;
+
+ /* I hate devfs */
+ devfs_handle_t devfs_entry;
+};
+
+#define NUM_BUCKETS 64
+#define MASK_BUCKETS (NUM_BUCKETS - 1)
+static struct list_head _name_buckets[NUM_BUCKETS];
+static struct list_head _uuid_buckets[NUM_BUCKETS];
+
+static devfs_handle_t _dev_dir;
+void dm_hash_remove_all(void);
/*
- * All the ioctl commands get dispatched to functions with this
- * prototype.
+ * Guards access to all three tables.
*/
-typedef int (*ioctl_fn)(struct dm_ioctl *param, struct dm_ioctl *user);
+static DECLARE_RWSEM(_hash_lock);
+
+static void init_buckets(struct list_head *buckets)
+{
+ unsigned int i;
+
+ for (i = 0; i < NUM_BUCKETS; i++)
+ INIT_LIST_HEAD(buckets + i);
+}
+
+int dm_hash_init(void)
+{
+ init_buckets(_name_buckets);
+ init_buckets(_uuid_buckets);
+ _dev_dir = devfs_mk_dir(0, DM_DIR, NULL);
+ return 0;
+}
+
+void dm_hash_exit(void)
+{
+ dm_hash_remove_all();
+ devfs_unregister(_dev_dir);
+}
+
+/*-----------------------------------------------------------------
+ * Hash function:
+ * We're not really concerned with the str hash function being
+ * fast since it's only used by the ioctl interface.
+ *---------------------------------------------------------------*/
+static unsigned int hash_str(const char *str)
+{
+ const unsigned int hash_mult = 2654435387U;
+ unsigned int h = 0;
+
+ while (*str)
+ h = (h + (unsigned int) *str++) * hash_mult;
+
+ return h & MASK_BUCKETS;
+}
+
+/*-----------------------------------------------------------------
+ * Code for looking up a device by name
+ *---------------------------------------------------------------*/
+static struct hash_cell *__get_name_cell(const char *str)
+{
+ struct list_head *tmp;
+ struct hash_cell *hc;
+ unsigned int h = hash_str(str);
+
+ list_for_each(tmp, _name_buckets + h) {
+ hc = list_entry(tmp, struct hash_cell, name_list);
+ if (!strcmp(hc->name, str))
+ return hc;
+ }
+
+ return NULL;
+}
+
+static struct hash_cell *__get_uuid_cell(const char *str)
+{
+ struct list_head *tmp;
+ struct hash_cell *hc;
+ unsigned int h = hash_str(str);
+
+ list_for_each(tmp, _uuid_buckets + h) {
+ hc = list_entry(tmp, struct hash_cell, uuid_list);
+ if (!strcmp(hc->uuid, str))
+ return hc;
+ }
+
+ return NULL;
+}
+
+/*-----------------------------------------------------------------
+ * Inserting, removing and renaming a device.
+ *---------------------------------------------------------------*/
+static inline char *kstrdup(const char *str)
+{
+ char *r = kmalloc(strlen(str) + 1, GFP_KERNEL);
+ if (r)
+ strcpy(r, str);
+ return r;
+}
+
+static struct hash_cell *alloc_cell(const char *name, const char *uuid,
+ struct mapped_device *md)
+{
+ struct hash_cell *hc;
+
+ hc = kmalloc(sizeof(*hc), GFP_KERNEL);
+ if (!hc)
+ return NULL;
+
+ hc->name = kstrdup(name);
+ if (!hc->name) {
+ kfree(hc);
+ return NULL;
+ }
+
+ if (!uuid)
+ hc->uuid = NULL;
+
+ else {
+ hc->uuid = kstrdup(uuid);
+ if (!hc->uuid) {
+ kfree(hc->name);
+ kfree(hc);
+ return NULL;
+ }
+ }
+
+ INIT_LIST_HEAD(&hc->name_list);
+ INIT_LIST_HEAD(&hc->uuid_list);
+ hc->md = md;
+ return hc;
+}
+
+static void free_cell(struct hash_cell *hc)
+{
+ if (hc) {
+ kfree(hc->name);
+ kfree(hc->uuid);
+ kfree(hc);
+ }
+}
/*
- * This is really a debug only call.
+ * devfs stuff.
*/
-static int remove_all(struct dm_ioctl *param, struct dm_ioctl *user)
+static int register_with_devfs(struct hash_cell *hc)
+{
+ kdev_t dev = dm_kdev(hc->md);
+
+ hc->devfs_entry =
+ devfs_register(_dev_dir, hc->name, DEVFS_FL_CURRENT_OWNER,
+ major(dev), minor(dev),
+ S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
+ &dm_blk_dops, NULL);
+
+ return 0;
+}
+
+static int unregister_with_devfs(struct hash_cell *hc)
+{
+ devfs_unregister(hc->devfs_entry);
+ return 0;
+}
+
+/*
+ * The kdev_t and uuid of a device can never change once it is
+ * initially inserted.
+ */
+int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
+{
+ struct hash_cell *cell;
+
+ /*
+ * Allocate the new cells.
+ */
+ cell = alloc_cell(name, uuid, md);
+ if (!cell)
+ return -ENOMEM;
+
+ /*
+ * Insert the cell into all three hash tables.
+ */
+ down_write(&_hash_lock);
+ if (__get_name_cell(name))
+ goto bad;
+
+ list_add(&cell->name_list, _name_buckets + hash_str(name));
+
+ if (uuid) {
+ if (__get_uuid_cell(uuid)) {
+ list_del(&cell->name_list);
+ goto bad;
+ }
+ list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
+ }
+ register_with_devfs(cell);
+ dm_get(md);
+ up_write(&_hash_lock);
+
+ return 0;
+
+ bad:
+ up_write(&_hash_lock);
+ free_cell(cell);
+ return -EBUSY;
+}
+
+void __hash_remove(struct hash_cell *hc)
+{
+ /* remove from the dev hash */
+ list_del(&hc->uuid_list);
+ list_del(&hc->name_list);
+ unregister_with_devfs(hc);
+ dm_put(hc->md);
+}
+
+void dm_hash_remove_all(void)
+{
+ int i;
+ struct hash_cell *hc;
+ struct list_head *tmp, *n;
+
+ down_write(&_hash_lock);
+ for (i = 0; i < NUM_BUCKETS; i++) {
+ list_for_each_safe(tmp, n, _name_buckets + i) {
+ hc = list_entry(tmp, struct hash_cell, name_list);
+ __hash_remove(hc);
+ }
+ }
+ up_write(&_hash_lock);
+}
+
+int dm_hash_rename(const char *old, const char *new)
{
- dm_destroy_all();
+ char *new_name, *old_name;
+ struct hash_cell *hc;
+
+ /*
+ * duplicate new.
+ */
+ new_name = kstrdup(new);
+ if (!new_name)
+ return -ENOMEM;
+
+ down_write(&_hash_lock);
+
+ /*
+ * Is new free ?
+ */
+ hc = __get_name_cell(new);
+ if (hc) {
+ DMWARN("asked to rename to an already existing name %s -> %s",
+ old, new);
+ up_write(&_hash_lock);
+ return -EBUSY;
+ }
+
+ /*
+ * Is there such a device as 'old' ?
+ */
+ hc = __get_name_cell(old);
+ if (!hc) {
+ DMWARN("asked to rename a non existent device %s -> %s",
+ old, new);
+ up_write(&_hash_lock);
+ return -ENXIO;
+ }
+
+ /*
+ * rename and move the name cell.
+ */
+ list_del(&hc->name_list);
+ old_name = hc->name;
+ hc->name = new_name;
+ list_add(&hc->name_list, _name_buckets + hash_str(new_name));
+
+ /* rename the device node in devfs */
+ unregister_with_devfs(hc);
+ register_with_devfs(hc);
+
+ up_write(&_hash_lock);
+ kfree(old_name);
return 0;
}
+
+/*-----------------------------------------------------------------
+ * Implementation of the ioctl commands
+ *---------------------------------------------------------------*/
+
+/*
+ * All the ioctl commands get dispatched to functions with this
+ * prototype.
+ */
+typedef int (*ioctl_fn)(struct dm_ioctl *param, struct dm_ioctl *user);
+
/*
* Check a string doesn't overrun the chunk of
* memory we copied from userland.
return valid_str(*params, begin, end);
}
-/*
- * Checks to see if there's a gap in the table.
- * Returns true iff there is a gap.
- */
-static int gap(struct dm_table *table, struct dm_target_spec *spec)
-{
- if (!table->num_targets)
- return (spec->sector_start > 0) ? 1 : 0;
-
- if (spec->sector_start != table->highs[table->num_targets - 1] + 1)
- return 1;
-
- return 0;
-}
-
static int populate_table(struct dm_table *table, struct dm_ioctl *args)
{
- int i = 0, r, first = 1, argc;
+ int i = 0, r, first = 1;
struct dm_target_spec *spec;
- char *params, *argv[MAX_ARGS];
- struct target_type *ttype;
- void *context, *begin, *end;
- offset_t highs = 0;
+ char *params;
+ void *begin, *end;
if (!args->target_count) {
DMWARN("populate_table: no targets specified");
begin = (void *) args;
end = begin + args->data_size;
-#define PARSE_ERROR(msg) {DMWARN(msg); return -EINVAL;}
-
for (i = 0; i < args->target_count; i++) {
if (first)
r = next_target(spec, spec->next, begin, end,
&spec, ¶ms);
- if (r)
- PARSE_ERROR("unable to find target");
-
- /* Look up the target type */
- ttype = dm_get_target_type(spec->target_type);
- if (!ttype)
- PARSE_ERROR("unable to find target type");
-
- if (gap(table, spec))
- PARSE_ERROR("gap in target ranges");
-
- /* Split up the parameter list */
- if (split_args(MAX_ARGS, &argc, argv, params) < 0)
- PARSE_ERROR("Too many arguments");
-
- /* Build the target */
- if (ttype->ctr(table, spec->sector_start, spec->length,
- argc, argv, &context)) {
- DMWARN("%s: target constructor failed",
- (char *) context);
+ if (r) {
+ DMWARN("unable to find target");
return -EINVAL;
}
- /* Add the target to the table */
- highs = spec->sector_start + (spec->length - 1);
- if (dm_table_add_target(table, highs, ttype, context))
- PARSE_ERROR("internal error adding target to table");
+ r = dm_table_add_target(table, spec->target_type,
+ spec->sector_start, spec->length,
+ params);
+ if (r) {
+ DMWARN("internal error adding target to table");
+ return -EINVAL;
+ }
first = 0;
}
-#undef PARSE_ERROR
-
- r = dm_table_complete(table);
- return r;
+ return dm_table_complete(table);
}
/*
* Fills in a dm_ioctl structure, ready for sending back to
* userland.
*/
-static void __info(struct mapped_device *md, struct dm_ioctl *param)
+static int __info(struct mapped_device *md, struct dm_ioctl *param)
{
+ kdev_t dev = dm_kdev(md);
+ struct dm_table *table;
+ struct block_device *bdev;
+
param->flags = DM_EXISTS_FLAG;
- if (dm_flag(md, DMF_SUSPENDED))
+ if (dm_suspended(md))
param->flags |= DM_SUSPEND_FLAG;
- if (dm_flag(md, DMF_RO))
- param->flags |= DM_READONLY_FLAG;
- strncpy(param->name, md->name, sizeof(param->name));
+ param->dev = kdev_t_to_nr(dev);
+ bdev = bdget(param->dev);
+ if (!bdev)
+ return -ENXIO;
- if (md->uuid)
- strncpy(param->uuid, md->uuid, sizeof(param->uuid) - 1);
- else
- param->uuid[0] = '\0';
+ param->open_count = bdev->bd_openers;
+ bdput(bdev);
- param->open_count = md->use_count;
- param->dev = kdev_t_to_nr(md->dev);
- param->target_count = md->map->num_targets;
+ if (is_read_only(dev))
+ param->flags |= DM_READONLY_FLAG;
+
+ table = dm_get_table(md);
+ param->target_count = dm_table_get_num_targets(table);
+ dm_table_put(table);
+
+ return 0;
}
/*
* Always use UUID for lookups if it's present, otherwise use name.
*/
-static inline struct mapped_device *find_device_r(struct dm_ioctl *param)
+static inline struct mapped_device *find_device(struct dm_ioctl *param)
{
- return (*param->uuid ?
- dm_get_uuid_r(param->uuid) :
- dm_get_name_r(param->name));
-}
+ struct hash_cell *hc;
+ struct mapped_device *md = NULL;
-static inline struct mapped_device *find_device_w(struct dm_ioctl *param)
-{
- return (*param->uuid ?
- dm_get_uuid_w(param->uuid) :
- dm_get_name_w(param->name));
+ down_read(&_hash_lock);
+ hc = *param->uuid ? __get_uuid_cell(param->uuid) :
+ __get_name_cell(param->name);
+ if (hc) {
+ md = hc->md;
+
+ /*
+ * Sneakily write in both the name and the uuid
+ * while we have the cell.
+ */
+ strncpy(param->name, hc->name, sizeof(param->name));
+ if (hc->uuid)
+ strncpy(param->uuid, hc->uuid, sizeof(param->uuid) - 1);
+ else
+ param->uuid[0] = '\0';
+
+ dm_get(md);
+ }
+ up_read(&_hash_lock);
+
+ return md;
}
#define ALIGNMENT sizeof(int)
param->flags = 0;
- md = find_device_r(param);
+ md = find_device(param);
if (!md)
/*
* Device not found - returns cleared exists flag.
goto out;
__info(md, param);
- dm_put_r(md);
+ dm_put(md);
out:
return results_to_user(user, param, NULL, 0);
return mode;
}
+static int check_name(const char *name)
+{
+ if (strchr(name, '/')) {
+ DMWARN("invalid device name");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int create(struct dm_ioctl *param, struct dm_ioctl *user)
{
- int r, ro;
+ int r;
+ kdev_t dev;
struct dm_table *t;
+ struct mapped_device *md;
int minor;
+ r = check_name(param->name);
+ if (r)
+ return r;
+
r = dm_table_create(&t, get_mode(param));
if (r)
return r;
r = populate_table(t, param);
if (r) {
- dm_table_destroy(t);
+ dm_table_put(t);
return r;
}
minor = (param->flags & DM_PERSISTENT_DEV_FLAG) ?
- MINOR(to_kdev_t(param->dev)) : -1;
-
- ro = (param->flags & DM_READONLY_FLAG) ? 1 : 0;
+ minor(to_kdev_t(param->dev)) : -1;
- r = dm_create(param->name, param->uuid, minor, ro, t);
+ r = dm_create(minor, t, &md);
if (r) {
- dm_table_destroy(t);
+ dm_table_put(t);
return r;
}
+ dm_table_put(t); /* md will have grabbed its own reference */
- r = info(param, user);
- return r;
-}
-
+ dev = dm_kdev(md);
+ set_device_ro(dev, (param->flags & DM_READONLY_FLAG));
+ r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
+ dm_put(md);
+ return r ? r : info(param, user);
+}
/*
* Build up the status struct for each target
static int __status(struct mapped_device *md, struct dm_ioctl *param,
char *outbuf, int *len)
{
- int i;
+ int i, num_targets;
struct dm_target_spec *spec;
- uint64_t sector = 0LL;
char *outptr;
status_type_t type;
+ struct dm_table *table = dm_get_table(md);
if (param->flags & DM_STATUS_TABLE_FLAG)
type = STATUSTYPE_TABLE;
outptr = outbuf;
/* Get all the target info */
- for (i = 0; i < md->map->num_targets; i++) {
- struct target_type *tt = md->map->targets[i].type;
- offset_t high = md->map->highs[i];
+ num_targets = dm_table_get_num_targets(table);
+ for (i = 0; i < num_targets; i++) {
+ struct dm_target *ti = dm_table_get_target(table, i);
if (outptr - outbuf +
- sizeof(struct dm_target_spec) > param->data_size)
- return -ENOMEM;
+ sizeof(struct dm_target_spec) > param->data_size) {
+ dm_table_put(table);
+ return -ENOMEM;
+ }
spec = (struct dm_target_spec *) outptr;
spec->status = 0;
- spec->sector_start = sector;
- spec->length = high - sector + 1;
- strncpy(spec->target_type, tt->name, sizeof(spec->target_type));
+ spec->sector_start = ti->begin;
+ spec->length = ti->len;
+ strncpy(spec->target_type, ti->type->name,
+ sizeof(spec->target_type));
outptr += sizeof(struct dm_target_spec);
/* Get the status/table string from the target driver */
- if (tt->status)
- tt->status(type, outptr,
- outbuf + param->data_size - outptr,
- md->map->targets[i].private);
+ if (ti->type->status)
+ ti->type->status(ti, type, outptr,
+ outbuf + param->data_size - outptr);
else
outptr[0] = '\0';
outptr += strlen(outptr) + 1;
_align(outptr, ALIGNMENT);
-
- sector = high + 1;
-
spec->next = outptr - outbuf;
}
- param->target_count = md->map->num_targets;
+ param->target_count = num_targets;
*len = outptr - outbuf;
+ dm_table_put(table);
return 0;
}
int ret;
char *outbuf = NULL;
- md = find_device_r(param);
+ md = find_device(param);
if (!md)
/*
* Device not found - returns cleared exists flag.
out:
if (md)
- dm_put_r(md);
+ dm_put(md);
ret = results_to_user(user, param, outbuf, len);
static int wait_device_event(struct dm_ioctl *param, struct dm_ioctl *user)
{
struct mapped_device *md;
+ struct dm_table *table;
DECLARE_WAITQUEUE(wq, current);
- md = find_device_r(param);
+ md = find_device(param);
if (!md)
/*
* Device not found - returns cleared exists flag.
*/
goto out;
+
/*
* Setup the basic dm_ioctl structure.
*/
* Wait for a notification event
*/
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&md->map->eventq, &wq);
-
- dm_put_r(md);
+ table = dm_get_table(md);
+ dm_table_add_wait_queue(table, &wq);
+ dm_table_put(table);
+ dm_put(md);
- schedule();
+ yield();
set_current_state(TASK_RUNNING);
out:
struct list_head *tmp;
size_t len = 0;
struct dm_target_deps *deps = NULL;
+ struct dm_table *table;
- md = find_device_r(param);
+ md = find_device(param);
if (!md)
goto out;
+ table = dm_get_table(md);
/*
* Setup the basic dm_ioctl structure.
* Count the devices.
*/
count = 0;
- list_for_each(tmp, &md->map->devices)
+ list_for_each(tmp, dm_table_get_devices(table))
count++;
/*
* struct.
*/
if (array_too_big(sizeof(*deps), sizeof(*deps->dev), count)) {
- dm_put_r(md);
+ dm_table_put(table);
+ dm_put(md);
return -ENOMEM;
}
len = sizeof(*deps) + (sizeof(*deps->dev) * count);
deps = kmalloc(len, GFP_KERNEL);
if (!deps) {
- dm_put_r(md);
+ dm_table_put(table);
+ dm_put(md);
return -ENOMEM;
}
*/
deps->count = count;
count = 0;
- list_for_each(tmp, &md->map->devices) {
+ list_for_each(tmp, dm_table_get_devices(table)) {
struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
- deps->dev[count++] = kdev_t_to_nr(dd->dev);
+ deps->dev[count++] = dd->bdev->bd_dev;
}
- dm_put_r(md);
+ dm_table_put(table);
+ dm_put(md);
out:
r = results_to_user(user, param, deps, len);
static int remove(struct dm_ioctl *param, struct dm_ioctl *user)
{
- int r;
- struct mapped_device *md;
-
- md = find_device_w(param);
- if (!md)
- return -ENXIO;
-
- /*
- * This unlocks and deallocates md.
- */
- r = dm_destroy(md);
- if (r) {
- dm_put_w(md);
- return r;
+ struct hash_cell *hc;
+
+ down_write(&_hash_lock);
+ hc = *param->uuid ? __get_uuid_cell(param->uuid) :
+ __get_name_cell(param->name);
+ if (!hc) {
+ DMWARN("device doesn't appear to be in the dev hash table.");
+ up_write(&_hash_lock);
+ return -EINVAL;
}
+ __hash_remove(hc);
+ up_write(&_hash_lock);
+ return 0;
+}
+
+static int remove_all(struct dm_ioctl *param, struct dm_ioctl *user)
+{
+ dm_hash_remove_all();
return 0;
}
static int suspend(struct dm_ioctl *param, struct dm_ioctl *user)
{
+ int r;
struct mapped_device *md;
- kdev_t dev;
- md = find_device_r(param);
+ md = find_device(param);
if (!md)
return -ENXIO;
- dev = md->dev;
- dm_put_r(md);
+ if (param->flags & DM_SUSPEND_FLAG)
+ r = dm_suspend(md);
+ else
+ r = dm_resume(md);
- return (param->flags & DM_SUSPEND_FLAG) ?
- dm_suspend(dev) : dm_resume(dev);
+ dm_put(md);
+ return r;
}
static int reload(struct dm_ioctl *param, struct dm_ioctl *user)
{
int r;
+ kdev_t dev;
struct mapped_device *md;
struct dm_table *t;
r = populate_table(t, param);
if (r) {
- dm_table_destroy(t);
+ dm_table_put(t);
return r;
}
- md = find_device_w(param);
+ md = find_device(param);
if (!md) {
- dm_table_destroy(t);
+ dm_table_put(t);
return -ENXIO;
}
r = dm_swap_table(md, t);
if (r) {
- dm_put_w(md);
- dm_table_destroy(t);
+ dm_put(md);
+ dm_table_put(t);
return r;
}
- dm_set_ro(md, (param->flags & DM_READONLY_FLAG) ? 1 : 0);
- dm_put_w(md);
+ dev = dm_kdev(md);
+ set_device_ro(dev, (param->flags & DM_READONLY_FLAG));
+ dm_put(md);
r = info(param, user);
return r;
static int rename(struct dm_ioctl *param, struct dm_ioctl *user)
{
- char *newname = (char *) param + param->data_start;
-
- if (!param->name) {
- DMWARN("Invalid old logical volume name supplied.");
- return -EINVAL;
- }
+ int r;
+ char *new_name = (char *) param + param->data_start;
- if (valid_str(newname, (void *) param,
+ if (valid_str(new_name, (void *) param,
(void *) param + param->data_size)) {
DMWARN("Invalid new logical volume name supplied.");
return -EINVAL;
}
- return dm_set_name(param->name, newname);
+ r = check_name(new_name);
+ if (r)
+ return r;
+
+ return dm_hash_rename(param->name, new_name);
}
* Implementation of open/close/ioctl on the special char
* device.
*---------------------------------------------------------------*/
-static int ctl_open(struct inode *inode, struct file *file)
-{
- /* only root can open this */
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
-
- MOD_INC_USE_COUNT;
-
- return 0;
-}
-
-static int ctl_close(struct inode *inode, struct file *file)
-{
- MOD_DEC_USE_COUNT;
- return 0;
-}
-
static ioctl_fn lookup_ioctl(unsigned int cmd)
{
static struct {
{DM_TARGET_STATUS_CMD, get_status},
{DM_TARGET_WAIT_CMD, wait_device_event},
};
- static int nelts = sizeof(_ioctls) / sizeof(*_ioctls);
- return (cmd >= nelts) ? NULL : _ioctls[cmd].fn;
+ return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
}
/*
static int ctl_ioctl(struct inode *inode, struct file *file,
uint command, ulong u)
{
-
int r = 0, cmd;
struct dm_ioctl *param;
struct dm_ioctl *user = (struct dm_ioctl *) u;
ioctl_fn fn = NULL;
+ /* only root can play with this */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
if (_IOC_TYPE(command) != DM_IOCTL)
return -ENOTTY;
}
static struct file_operations _ctl_fops = {
- open: ctl_open,
- release: ctl_close,
- ioctl: ctl_ioctl,
- owner: THIS_MODULE,
+ .ioctl = ctl_ioctl,
+ .owner = THIS_MODULE,
};
static devfs_handle_t _ctl_handle;
static struct miscdevice _dm_misc = {
- minor: MISC_DYNAMIC_MINOR,
- name: DM_NAME,
- fops: &_ctl_fops
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = DM_NAME,
+ .fops = &_ctl_fops
};
-static int __init dm_devfs_init(void) {
+/*
+ * Create misc character device and link to DM_DIR/control.
+ */
+int __init dm_interface_init(void)
+{
int r;
char rname[64];
+ r = dm_hash_init();
+ if (r)
+ return r;
+
+ r = misc_register(&_dm_misc);
+ if (r) {
+ DMERR("misc_register failed for control device");
+ dm_hash_exit();
+ return r;
+ }
+
r = devfs_generate_path(_dm_misc.devfs_handle, rname + 3,
sizeof rname - 3);
if (r == -ENOSYS)
if (r < 0) {
DMERR("devfs_generate_path failed for control device");
- return r;
+ goto failed;
}
strncpy(rname + r, "../", 3);
DEVFS_FL_DEFAULT, rname + r, &_ctl_handle, NULL);
if (r) {
DMERR("devfs_mk_symlink failed for control device");
- return r;
+ goto failed;
}
devfs_auto_unregister(_dm_misc.devfs_handle, _ctl_handle);
- return 0;
-}
-
-/*
- * Create misc character device and link to DM_DIR/control.
- */
-int __init dm_interface_init(void)
-{
- int r;
-
- r = misc_register(&_dm_misc);
- if (r) {
- DMERR("misc_register failed for control device");
- return r;
- }
-
- r = dm_devfs_init();
- if (r) {
- misc_deregister(&_dm_misc);
- return r;
- }
-
DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
DM_DRIVER_EMAIL);
-
return 0;
+
+ failed:
+ dm_hash_exit();
+ misc_deregister(&_dm_misc);
+ return r;
}
void dm_interface_exit(void)
{
+ dm_hash_exit();
+
if (misc_deregister(&_dm_misc) < 0)
DMERR("misc_deregister failed for control device");
}
#ifndef _LINUX_DM_IOCTL_H
#define _LINUX_DM_IOCTL_H
-#include <linux/device-mapper.h>
#include <linux/types.h>
+#define DM_DIR "mapper" /* Slashes not supported */
+#define DM_MAX_TYPE_NAME 16
+#define DM_NAME_LEN 128
+#define DM_UUID_LEN 129
+
/*
* Implements a traditional ioctl interface to the device mapper.
*/
#define DM_VERSION_MAJOR 1
#define DM_VERSION_MINOR 0
-#define DM_VERSION_PATCHLEVEL 5
-#define DM_VERSION_EXTRA "-ioctl-cvs (2002-10-14)"
+#define DM_VERSION_PATCHLEVEL 7
+#define DM_VERSION_EXTRA "-ioctl-cvs (2002-11-13)"
/* Status bits */
#define DM_READONLY_FLAG 0x00000001