-0.96.03-cvs (2002-06-27)
+0.96.04-cvs (2002-08-14)
* are opened/closed correctly
*/
int dm_table_get_device(struct dm_table *t, const char *path,
- offset_t start, offset_t len, struct dm_dev **result);
+ offset_t start, offset_t len,
+ int mode, struct dm_dev **result);
void dm_table_put_device(struct dm_table *table, struct dm_dev *d);
/*
goto bad;
}
- if (dm_table_get_device(t, argv[0], start, l, &lc->dev)) {
+ if (dm_table_get_device(t, argv[0], start, l, t->mode, &lc->dev)) {
*context = "dm-linear: Device lookup failed";
goto bad;
}
dest.sector = bh->b_rsector - mc->from_delta + mc->to_delta;
dest.count = bh->b_size / 512;
kcopyd_write_pages(&dest, 1, &bh->b_page,
- ((long) bh->b_data -
- (long) page_address(bh->b_page)) / 512,
+ ((long)bh->b_data -
+ (long)page_address(bh->b_page)) / 512,
mirror_callback, mc);
}
struct mirror_c *lc = (struct mirror_c *) context;
struct buffer_head *bh;
+
/* Submit, and mirror any pending BHs */
down_write(&lc->lock);
src.dev = lc->fromdev->dev;
src.sector = lc->frompos + lc->got_to;
- src.count = min((unsigned long) lc->chunksize,
+ src.count = min((unsigned long)lc->chunksize,
lc->size - lc->got_to);
dest.dev = lc->todev->dev;
return -ENOMEM;
}
- if (dm_table_get_device(t, argv[0], 0, l, &lc->fromdev)) {
+ if (dm_table_get_device(t, argv[0], 0, l, t->mode, &lc->fromdev)) {
*context = "dm-mirror: Device lookup failed";
goto bad;
}
goto bad;
}
- if (dm_table_get_device(t, argv[2], 0, l, &lc->todev)) {
+ if (dm_table_get_device(t, argv[2], 0, l, t->mode, &lc->todev)) {
*context = "dm-mirror: Device lookup failed";
dm_table_put_device(t, lc->fromdev);
goto bad;
/* Tell kcopyd to do the biz */
src.dev = lc->fromdev->dev;
src.sector = offset1;
- src.count = min((unsigned long) chunksize, lc->size);
+ src.count = min((unsigned long)chunksize, lc->size);
dest.dev = lc->todev->dev;
dest.sector = offset2;
return 0;
}
+/*
+ * Round a number up to the nearest 'size' boundary. size must
+ * be a power of 2.
+ */
+static inline ulong round_up(ulong n, ulong size)
+{
+ size--;
+ return (n + size) & ~size;
+}
+
/*
* Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
*/
goto bad;
}
- r = dm_table_get_device(t, origin_path, 0, 0, &s->origin);
+ r = dm_table_get_device(t, origin_path, 0, 0, FMODE_READ, &s->origin);
if (r) {
*context = "Cannot get origin device";
goto bad_free;
}
- r = dm_table_get_device(t, cow_path, 0, 0, &s->cow);
+ r = dm_table_get_device(t, cow_path, 0, 0,
+ FMODE_READ | FMODE_WRITE, &s->cow);
if (r) {
dm_table_put_device(t, s->origin);
*context = "Cannot get COW device";
goto bad_free;
}
- /* Chunk size must be multiple of page size. If it's wrong, fix it */
- if (chunk_size < (PAGE_SIZE / SECTOR_SIZE))
- chunk_size = PAGE_SIZE / SECTOR_SIZE;
+ /*
+ * Chunk size must be multiple of page size. Silently
+ * round up if it's not.
+ */
+ chunk_size = round_up(chunk_size, PAGE_SIZE / SECTOR_SIZE);
/* Validate the chunk size against the device block size */
blocksize = get_hardsect_size(s->cow->dev);
return -EINVAL;
}
- r = dm_table_get_device(t, argv[0], 0, l, &dev);
+ r = dm_table_get_device(t, argv[0], 0, l, t->mode, &dev);
if (r) {
*context = "Cannot get target device";
return r;
return -EINVAL;
if (dm_table_get_device(t, argv[0], start, sc->stripe_width,
- &sc->stripe[stripe].dev))
+ t->mode, &sc->stripe[stripe].dev))
return -ENXIO;
sc->stripe[stripe].physical_start = start;
return 0;
}
-int dm_table_create(struct dm_table **result)
+int dm_table_create(struct dm_table **result, int mode)
{
struct dm_table *t = kmalloc(sizeof(*t), GFP_NOIO);
}
init_waitqueue_head(&t->eventq);
+ t->mode = mode;
*result = t;
return 0;
}
if (!(d->bd = bdget(kdev_t_to_nr(d->dev))))
return -ENOMEM;
- if ((err = blkdev_get(d->bd, FMODE_READ | FMODE_WRITE, 0, BDEV_FILE)))
+ if ((err = blkdev_get(d->bd, d->mode, 0, BDEV_FILE)))
return err;
return 0;
return ((start < dev_size) && (len <= (dev_size - start)));
}
+/*
+ * This upgrades the mode on an already open dm_dev. Being
+ * careful to leave things as they were if we fail to reopen the
+ * device.
+ */
+static int upgrade_mode(struct dm_dev *dd, int new_mode)
+{
+ int r;
+ struct dm_dev dd_copy;
+
+ memcpy(&dd_copy, dd, sizeof(dd_copy));
+
+ dd->mode |= new_mode;
+ dd->bd = NULL;
+ r = open_dev(dd);
+ if (!r)
+ close_dev(&dd_copy);
+ else
+ memcpy(dd, &dd_copy, sizeof(dd_copy));
+
+ return r;
+}
+
/*
* Add a device to the list, or just increment the usage count
* if it's already present.
*/
int dm_table_get_device(struct dm_table *t, const char *path,
- offset_t start, offset_t len, struct dm_dev **result)
+ offset_t start, offset_t len, int mode,
+ struct dm_dev **result)
{
int r;
kdev_t dev;
if (!dd)
return -ENOMEM;
+ dd->mode = mode;
dd->dev = dev;
dd->bd = NULL;
atomic_set(&dd->count, 0);
list_add(&dd->list, &t->devices);
+
+ } else if (dd->mode != (mode | dd->mode)) {
+ r = upgrade_mode(dd, mode);
+ if (r)
+ return r;
}
atomic_inc(&dd->count);
*/
#include "dm.h"
-#include "kcopyd.h"
#include <linux/blk.h>
#include <linux/blkpg.h>
*/
static void dec_pending(struct buffer_head *bh, int uptodate)
{
- struct io_hook *ih = bh->b_bdev_private;
+ struct io_hook *ih = bh->b_private;
if (!uptodate && call_err_fn(ih, bh))
return;
wake_up(&ih->md->wait);
bh->b_end_io = ih->end_io;
- bh->b_bdev_private = ih->context;
+ bh->b_private = ih->context;
free_io_hook(ih);
bh->b_end_io(bh, uptodate);
ih->rw = rw;
ih->target = ti;
ih->end_io = bh->b_end_io;
- ih->context = bh->b_bdev_private;
+ ih->context = bh->b_private;
r = fn(bh, rw, context);
/* hook the end io request fn */
atomic_inc(&md->pending);
bh->b_end_io = dec_pending;
- bh->b_bdev_private = ih;
+ bh->b_private = ih;
} else if (r == 0)
/* we don't need to hook */
atomic_t count;
struct list_head list;
+ int mode;
+
kdev_t dev;
struct block_device *bd;
};
offset_t *highs;
struct target *targets;
+ /*
+ * Indicates the rw permissions for the new logical
+ * device. This should be a combination of FMODE_READ
+ * and FMODE_WRITE.
+ */
+ int mode;
+
/* a list of devices used by this table */
struct list_head devices;
int dm_resume(struct mapped_device *md);
/* dm-table.c */
-int dm_table_create(struct dm_table **result);
+int dm_table_create(struct dm_table **result, int mode);
void dm_table_destroy(struct dm_table *t);
int dm_table_add_target(struct dm_table *t, offset_t highs,
*/
void dm_table_event(struct dm_table *t);
-/* Snapshots */
-int dm_snapshot_init(void);
-void dm_snapshot_exit(void);
-
-/* dm-mirror.c */
-int dm_mirror_init(void);
-void dm_mirror_exit(void);
-
#define DMWARN(f, x...) printk(KERN_WARNING DM_NAME ": " f "\n" , ## x)
#define DMERR(f, x...) printk(KERN_ERR DM_NAME ": " f "\n" , ## x)
#define DMINFO(f, x...) printk(KERN_INFO DM_NAME ": " f "\n" , ## x)
}
/*
- * The device-mapper can be driven through one of two interfaces;
- * ioctl or filesystem, depending which patch you have applied.
+ * Targets
*/
-int __init dm_interface_init(void);
-void dm_interface_exit(void);
-
-/*
- * Targets for linear and striped mappings
- */
-
int dm_linear_init(void);
void dm_linear_exit(void);
int dm_stripe_init(void);
void dm_stripe_exit(void);
+int dm_snapshot_init(void);
+void dm_snapshot_exit(void);
+
+int dm_mirror_init(void);
+void dm_mirror_exit(void);
+
+/*
+ * Init functions for the user interface to device-mapper. At
+ * the moment an ioctl interface on a special char device is
+ * used. A filesystem based interface would be a nicer way to
+ * go.
+ */
+int __init dm_interface_init(void);
+void dm_interface_exit(void);
+
#endif
*/
static void free_buffer(struct buffer_head *bh)
{
- int flags;
+ int flags, was_empty;
spin_lock_irqsave(&_buffer_lock, flags);
+ was_empty = (_free_buffers == NULL) ? 1 : 0;
bh->b_reqnext = _free_buffers;
_free_buffers = bh;
spin_unlock_irqrestore(&_buffer_lock, flags);
+
+ /*
+ * If the buffer list was empty then kcopyd probably went
+ * to sleep because it ran out of buffer heads, so let's
+ * wake it up.
+ */
+ if (was_empty)
+ wake_kcopyd();
}
/*-----------------------------------------------------------------
return results_to_user(user, param, NULL, 0);
}
+static inline int get_mode(struct dm_ioctl *param)
+{
+ int mode = FMODE_READ | FMODE_WRITE;
+
+ if (param->flags & DM_READONLY_FLAG)
+ mode = FMODE_READ;
+
+ return mode;
+}
+
static int create(struct dm_ioctl *param, struct dm_ioctl *user)
{
int r, ro;
struct dm_table *t;
int minor;
- r = dm_table_create(&t);
+ r = dm_table_create(&t, get_mode(param));
if (r)
return r;
struct mapped_device *md;
struct dm_table *t;
- r = dm_table_create(&t);
+ r = dm_table_create(&t, get_mode(param));
if (r)
return r;
#define DM_VERSION_MAJOR 1
#define DM_VERSION_MINOR 0
-#define DM_VERSION_PATCHLEVEL 2
-#define DM_VERSION_EXTRA "-ioctl-cvs (2002-07-17)"
+#define DM_VERSION_PATCHLEVEL 3
+#define DM_VERSION_EXTRA "-ioctl-cvs (2002-08-14)"
/* Status bits */
#define DM_READONLY_FLAG 0x00000001