Please use this one from now on.\18
--- /dev/null
+#
+# Copyright (C) 2001 Sistina Software (UK) Limited.
+#
+# This file is released under the LGPL.
+#
+
+srcdir = .
+top_srcdir = ..
+VPATH = .
+SHELL = /bin/sh
+
+CC = gcc
+RANLIB = ranlib
+SHELL = /bin/sh
+INSTALL = /usr/bin/install -c
+LN_S = ln -s
+
+prefix = /usr
+libdir = ${prefix}/lib
+incdir = ${prefix}/include
+kernelsrcdir = /scratch/alphalinux/linux
+
+OWNER=root
+GROUP=root
+
+INCLUDES=-I${kernelsrcdir}/include
+CFLAGS+=-Wall
+#CFLAGS+=-O2
+CFLAGS+=-g -fno-omit-frame-pointer
+#CFLAGS+=-pg
+#LD_FLAGS=-pg
+
+SUFFIXES=
+SUFFIXES=.c .o .so
+
+%.o: %.c
+ $(CC) -c $(INCLUDES) $(CFLAGS) $< -o $@
+
+dmsetup: dmsetup.o libdevmapper.so
+ $(CC) -o dmsetup dmsetup.o $(LD_FLAGS) -L../lib -ldevmapper
+
+clean:
+ $(RM) dmsetup
+
+distclean: clean
+
+.PHONY: install test clean distclean
+
--- /dev/null
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "libdm.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <ctype.h>
+#include <errno.h>
+
+#define LINE_SIZE 1024
+
+#define err(msg, x...) fprintf(stderr, msg "\n", ##x)
+
+static int _parse_file(struct dm_task *dmt, const char *file)
+{
+ char buffer[LINE_SIZE], *ttype, *ptr, *comment;
+ FILE *fp = fopen(file, "r");
+ unsigned long long start, size;
+ int r = 0, n, line = 0;
+
+ if (!fp) {
+ err("Couldn't open '%s' for reading", file);
+ return 0;
+ }
+
+ while (fgets(buffer, sizeof(buffer), fp)) {
+ line++;
+
+ /* trim trailing space */
+ for (ptr = buffer + strlen(buffer) - 1; ptr >= buffer; ptr--)
+ if (!isspace((int) *ptr))
+ break;
+ ptr++;
+ *ptr = '\0';
+
+ /* trim leading space */
+ for (ptr = buffer; *ptr && isspace((int) *ptr); ptr++)
+ ;
+
+ if (!*ptr || *ptr == '#')
+ continue;
+
+ if (sscanf(ptr, "%llu %llu %as %n",
+ &start, &size, &ttype, &n) < 3) {
+ err("%s:%d Invalid format", file, line);
+ goto out;
+ }
+
+ ptr += n;
+ if ((comment = strchr(ptr, (int) '#')))
+ *comment = '\0';
+
+ if (!dm_task_add_target(dmt, start, size, ttype, ptr))
+ goto out;
+
+ free(ttype);
+ }
+ r = 1;
+
+ out:
+ fclose(fp);
+ return r;
+}
+
+static int _load(int task, const char *name, const char *file)
+{
+ int r = 0;
+ struct dm_task *dmt;
+
+ if (!(dmt = dm_task_create(task)))
+ return 0;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ if (!_parse_file(dmt, file))
+ goto out;
+
+ if (!dm_task_run(dmt))
+ goto out;
+
+ r = 1;
+
+out:
+ dm_task_destroy(dmt);
+
+ return r;
+}
+
+static int _create(int argc, char **argv)
+{
+ return _load(DM_DEVICE_CREATE, argv[1], argv[2]);
+}
+
+static int _reload(int argc, char **argv)
+{
+ return _load(DM_DEVICE_RELOAD, argv[1], argv[2]);
+}
+
+
+static int _simple(int task, const char *name)
+{
+ int r = 0;
+
+ /* remove <dev_name> */
+ struct dm_task *dmt;
+
+ if (!(dmt = dm_task_create(task)))
+ return 0;
+
+ if (!dm_task_set_name(dmt, name))
+ goto out;
+
+ r = dm_task_run(dmt);
+
+ out:
+ dm_task_destroy(dmt);
+ return r;
+}
+
+static int _remove(int argc, char **argv)
+{
+ return _simple(DM_DEVICE_REMOVE, argv[1]);
+}
+
+static int _suspend(int argc, char **argv)
+{
+ return _simple(DM_DEVICE_SUSPEND, argv[1]);
+}
+
+static int _resume(int argc, char **argv)
+{
+ return _simple(DM_DEVICE_RESUME, argv[1]);
+}
+
+static int _info(int argc, char **argv)
+{
+ int r = 0;
+
+ /* remove <dev_name> */
+ struct dm_task *dmt;
+ struct dm_info info;
+
+ if (!(dmt = dm_task_create(DM_DEVICE_INFO)))
+ return 0;
+
+ if (!dm_task_set_name(dmt, argv[1]))
+ goto out;
+
+ if (!dm_task_run(dmt))
+ goto out;
+
+ if (!dm_task_get_info(dmt, &info))
+ goto out;
+
+ if (!info.exists) {
+ printf("No such device.\n");
+ r = 1;
+ goto out;
+ }
+
+ printf("%s\t", info.suspended ? "SUSPENDED" : "ACTIVE");
+ printf("%d\t", info.open_count);
+ printf("%d\t", info.minor);
+ printf("%d\n", info.target_count);
+ r = 1;
+
+ out:
+ dm_task_destroy(dmt);
+ return r;
+}
+
+
+/*
+ * dispatch table
+ */
+typedef int (*command_fn)(int argc, char **argv);
+
+struct command {
+ char *name;
+ char *help;
+ int num_args;
+ command_fn fn;
+};
+
+static struct command _commands[] = {
+ {"create", "<dev_name> <table_file>", 2, _create},
+ {"remove", "<dev_name>", 1, _remove},
+ {"suspend", "<dev_name>", 1, _suspend},
+ {"resume", "<dev_name>", 1, _resume},
+ {"reload", "<dev_name> <table_file>", 2, _reload},
+ {"info", "<dev_name>", 1, _info},
+ {NULL, NULL, 0, NULL}
+};
+
+static void _usage(FILE *out)
+{
+ int i;
+
+ fprintf(out, "usage:\n");
+ for (i = 0; _commands[i].name; i++)
+ fprintf(out, "\t%s %s\n",
+ _commands[i].name, _commands[i].help);
+ return;
+}
+
+struct command *_find_command(const char *name)
+{
+ int i;
+
+ for (i = 0; _commands[i].name; i++)
+ if (!strcmp(_commands[i].name, name))
+ return _commands + i;
+
+ return NULL;
+}
+
+
+int main(int argc, char **argv)
+{
+ struct command *c;
+
+ if (argc < 2) {
+ _usage(stderr);
+ exit(1);
+ }
+
+ if (!(c = _find_command(argv[1]))) {
+ fprintf(stderr, "Unknown command\n");
+ _usage(stderr);
+ exit(1);
+ }
+
+ if (argc != c->num_args + 2) {
+ fprintf(stderr, "Incorrect number of arguments\n");
+ _usage(stderr);
+ exit(1);
+ }
+
+ if (!c->fn(argc - 1, argv + 1)) {
+ //fprintf(stderr, "Command failed\n");
+ exit(1);
+ }
+
+ return 0;
+}
+
--- /dev/null
+#
+# Copyright (C) 2001 Sistina Software (UK) Limited.
+#
+# This file is released under the LGPL.
+#
+
+srcdir = .
+top_srcdir = ..
+VPATH = .
+SHELL = /bin/sh
+
+CC = gcc
+RANLIB = ranlib
+SHELL = /bin/sh
+INSTALL = /usr/bin/install -c
+LN_S = ln -s
+
+prefix = /usr
+libdir = ${prefix}/lib
+incdir = ${prefix}/include
+kernelsrcdir = /scratch/alphalinux/linux
+
+OWNER=root
+GROUP=root
+
+INCLUDES=-I. -I${kernelsrcdir}/include
+CFLAGS+=-Wall
+#CFLAGS+=-O2
+CFLAGS+=-g -fno-omit-frame-pointer
+#CFLAGS+=-pg
+#LD_FLAGS=-pg
+
+libdevmapper.so: libdm.o
+ $(CC) -shared -o libdevmapper.so libdm.o
+
+SUFFIXES=
+SUFFIXES=.c .o .so
+
+%.o: %.c
+ $(CC) -c $(INCLUDES) $(CFLAGS) $< -o $@
+
+install: libdevmapper.so
+ $(INSTALL) -c -o $(OWNER) -g $(GROUP) -m 555 -s libdevmapper.so \
+ $(libdir)
+ $(INSTALL) -D -c -o $(OWNER) -g $(GROUP) -m 444 libdevmapper.h \
+ $(incdir)/devmapper/libdevmapper.h
+
+clean:
+ $(RM) libdevmapper.so libdm.o
+
+distclean: clean
+
+.PHONY: install test clean distclean
+
--- /dev/null
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the LGPL.
+ */
+
+#ifndef LIB_DEVICE_MAPPER_H
+#define LIB_DEVICE_MAPPER_H
+
+/*
+ * Since it is quite laborious to build the ioctl
+ * arguments for the device-mapper people are
+ * encouraged to use this library.
+ *
+ * You will need to build a struct dm_task for
+ * each ioctl command you want to execute.
+ */
+
+
+typedef void (*dm_log_fn)(int level, const char *file, int line,
+ const char *f, ...);
+
+/*
+ * The library user may wish to register their own
+ * logging function, by default errors go to
+ * stderr.
+ */
+void dm_log_init(dm_log_fn fn);
+
+enum {
+ DM_DEVICE_CREATE,
+ DM_DEVICE_RELOAD,
+ DM_DEVICE_REMOVE,
+
+ DM_DEVICE_SUSPEND,
+ DM_DEVICE_RESUME,
+
+ DM_DEVICE_INFO,
+};
+
+
+struct dm_task;
+
+struct dm_task *dm_task_create(int type);
+void dm_task_destroy(struct dm_task *dmt);
+
+int dm_task_set_name(struct dm_task *dmt, const char *name);
+
+/*
+ * Retrieve attributes after an info.
+ */
+struct dm_info {
+ int exists;
+ int suspended;
+ unsigned int open_count;
+ int minor; /* minor device number */
+ unsigned int target_count;
+};
+
+int dm_task_get_info(struct dm_task *dmt, struct dm_info *dmi);
+
+/*
+ * Use these to prepare for a create or reload.
+ */
+int dm_task_add_target(struct dm_task *dmt,
+ unsigned long long start,
+ unsigned long long size,
+ const char *ttype,
+ const char *params);
+
+/*
+ * Call this to actually run the ioctl.
+ */
+int dm_task_run(struct dm_task *dmt);
+
+/*
+ * Return the device-mapper directory
+ */
+const char *dm_dir(void);
+
+#endif /* LIB_DEVICE_MAPPER_H */
--- /dev/null
+/*
+ * Copyright (C) 2001 Sistina Software (UK) Limited.
+ *
+ * This file is released under the LGPL.
+ */
+
+#include "libdevmapper.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+
+#include <linux/dm-ioctl.h>
+
+#define DEVICE_MAPPER_CONTROL "/dev/device-mapper/control"
+#define ALIGNMENT sizeof(int)
+
+/*
+ * Library users can provide their own logging
+ * function.
+ */
+static void _default_log(int level, const char *file, int line,
+ const char *f, ...)
+{
+ va_list ap;
+
+ //fprintf(stderr, "%s:%d ", file, line);
+
+ va_start(ap, f);
+ vfprintf(stderr, f, ap);
+ va_end(ap);
+
+ fprintf(stderr, "\n");
+}
+
+static dm_log_fn _log = _default_log;
+
+void dm_log_init(dm_log_fn fn)
+{
+ _log = fn;
+}
+
+#define log(msg, x...) _log(1, __FILE__, __LINE__, msg, ## x)
+
+struct target {
+
+ unsigned long long start;
+ unsigned long long length;
+ char *type;
+ char *params;
+
+ struct target *next;
+};
+
+struct dm_task {
+ int type;
+ char *dev_name;
+
+ struct target *head, *tail;
+
+ struct dm_ioctl *dmi;
+};
+
+struct dm_task *dm_task_create(int type)
+{
+ struct dm_task *dmt = malloc(sizeof(*dmt));
+
+ if (!dmt)
+ return NULL;
+
+ memset(dmt, 0, sizeof(*dmt));
+
+ dmt->type = type;
+ return dmt;
+}
+
+void dm_task_destroy(struct dm_task *dmt)
+{
+ struct target *t, *n;
+
+ for (t = dmt->head; t; t = n) {
+ n = t->next;
+ free(t);
+ }
+
+ if (dmt->dmi)
+ free(dmt->dmi);
+
+ free(dmt);
+}
+
+int dm_task_set_name(struct dm_task *dmt, const char *name)
+{
+ if (dmt->dev_name)
+ free(dmt->dev_name);
+
+ return (dmt->dev_name = strdup(name)) ? 1 : 0;
+}
+
+int dm_task_get_info(struct dm_task *dmt, struct dm_info *info)
+{
+ if (!dmt->dmi)
+ return 0;
+
+ info->exists = dmt->dmi->exists;
+ info->suspended = dmt->dmi->suspend;
+ info->open_count = dmt->dmi->open_count;
+ info->minor = dmt->dmi->minor;
+ info->target_count = dmt->dmi->target_count;
+ return 1;
+}
+
+static struct target *_create_target(unsigned long long start,
+ unsigned long long len,
+ const char *type, const char *params)
+{
+ struct target *t = malloc(sizeof(*t));
+
+ if (!t)
+ return NULL;
+ memset(t, 0, sizeof(*t));
+
+ if (!(t->params = strdup(params))) {
+ log("Out of memory");
+ goto bad;
+ }
+
+ if (!(t->type = strdup(type))) {
+ log("Out of memory");
+ goto bad;
+ }
+
+ t->start = start;
+ t->length = len;
+ return t;
+
+ bad:
+ free(t->params);
+ free(t->type);
+ free(t);
+ return NULL;
+}
+
+int dm_task_add_target(struct dm_task *dmt,
+ unsigned long long start,
+ unsigned long long size,
+ const char *ttype,
+ const char *params)
+{
+ struct target *t = _create_target(start, size, ttype, params);
+
+ if (!t)
+ return 0;
+
+ if (!dmt->head)
+ dmt->head = dmt->tail = t;
+ else {
+ dmt->tail->next = t;
+ dmt->tail = t;
+ }
+
+ return 1;
+}
+
+static void *_align(void *ptr, unsigned int align)
+{
+ align--;
+ return (void *) (((long) ptr + align) & ~align);
+}
+
+static void *_add_target(struct target *t, void *out, void *end)
+{
+ void *out_sp = out;
+ struct dm_target_spec sp;
+ int len;
+ const char no_space[] = "Ran out of memory building ioctl parameter";
+
+ out += sizeof(struct dm_target_spec);
+ if (out >= end) {
+ log(no_space);
+ return NULL;
+ }
+
+ sp.status = 0;
+ sp.sector_start = t->start;
+ sp.length = t->length;
+ strncpy(sp.target_type, t->type, sizeof(sp.target_type));
+
+ len = strlen(t->params);
+
+ if ((out + len + 1) >= end) {
+ log(no_space);
+
+ log("t->params= '%s'", t->params);
+ return NULL;
+ }
+ strcpy((char *) out, t->params);
+ out += len + 1;
+
+ /* align next block */
+ out = _align(out, ALIGNMENT);
+
+ sp.next = out - out_sp;
+ memcpy(out_sp, &sp, sizeof(sp));
+
+ return out;
+}
+
+static struct dm_ioctl *_flatten(struct dm_task *dmt)
+{
+ struct dm_ioctl *dmi;
+ struct target *t;
+ size_t len = sizeof(struct dm_ioctl);
+ void *b, *e;
+ int count = 0;
+
+ for (t = dmt->head; t; t = t->next) {
+ len += sizeof(struct dm_target_spec);
+ len += strlen(t->params) + 1 + ALIGNMENT;
+ count++;
+ }
+
+ if (!(dmi = malloc(len)))
+ return NULL;
+
+ dmi->data_size = len;
+ strncpy(dmi->name, dmt->dev_name, sizeof(dmi->name));
+ dmi->suspend = (dmt->type == DM_DEVICE_SUSPEND) ? 1 : 0;
+ dmi->open_count = 0;
+ dmi->minor = -1;
+
+ dmi->target_count = count;
+
+ b = (void *) (dmi + 1);
+ e = (void *) ((char *) dmi + len);
+
+ for (t = dmt->head; t; t = t->next)
+ if (!(b = _add_target(t, b, e)))
+ goto bad;
+
+ return dmi;
+
+ bad:
+ free(dmi);
+ return NULL;
+}
+
+/*
+ * FIXME: This function is copied straight from
+ * LVM1 without an audit.
+ */
+static int __check_devfs(void)
+{
+ int r = 0, len;
+ char dir[PATH_MAX], line[512];
+ char type[32];
+ FILE *mounts = NULL;
+ const char *dev_dir = DM_DIR;
+
+ /* trim the trailing slash off dev_dir, yuck */
+ len = strlen(dev_dir) - 1;
+ while(len && dev_dir[len] == '/')
+ len--;
+
+ if (!(mounts = fopen("/proc/mounts", "r"))) {
+ log("Unable to open /proc/mounts to determine "
+ "if devfs is mounted");
+ return 0;
+ }
+
+ while (!feof(mounts)) {
+ fgets(line, sizeof(line) - 1, mounts);
+ if (sscanf(line, "%*s %s %s %*s", dir, type) != 2)
+ continue;
+
+ if (!strcmp(type, "devfs") && !strncmp(dir, dev_dir, len)) {
+ r = 1;
+ break;
+ }
+ }
+
+ fclose(mounts);
+ return r;
+}
+
+/*
+ * Memo the result of __check_devfs.
+ */
+static int _check_devfs(void)
+{
+ static int prev_result = -1;
+
+ if (prev_result >= 0)
+ return prev_result;
+
+ return (prev_result = __check_devfs());
+}
+
+static void _build_dev_path(char *buffer, size_t len, const char *dev_name)
+{
+ snprintf(buffer, len, "/dev/%s/%s", DM_DIR, dev_name);
+}
+
+static int _add_dev_node(const char *dev_name, dev_t dev)
+{
+ char path[PATH_MAX];
+
+ if (_check_devfs())
+ return 1;
+
+ _build_dev_path(path, sizeof(path), dev_name);
+
+ if (mknod(path, S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP, dev) < 0) {
+ log("Unable to make device node for '%s'", dev_name);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int _rm_dev_node(const char *dev_name)
+{
+ char path[PATH_MAX];
+
+ if (_check_devfs())
+ return 1;
+
+ _build_dev_path(path, sizeof(path), dev_name);
+
+ if (unlink(path) < 0) {
+ log("Unable to unlink device node for '%s'", dev_name);
+ return 0;
+ }
+
+ return 1;
+}
+
+int dm_task_run(struct dm_task *dmt)
+{
+ int fd = -1;
+ struct dm_ioctl *dmi = _flatten(dmt);
+ unsigned int command;
+
+ if (!dmi) {
+ log("Couldn't create ioctl argument");
+ return 0;
+ }
+
+ if ((fd = open(DEVICE_MAPPER_CONTROL, O_RDWR)) < 0) {
+ log("Couldn't open device-mapper control device");
+ goto bad;
+ }
+
+ switch (dmt->type) {
+ case DM_DEVICE_CREATE:
+ command = DM_CREATE;
+ break;
+
+ case DM_DEVICE_RELOAD:
+ command = DM_RELOAD;
+ break;
+
+ case DM_DEVICE_REMOVE:
+ command = DM_REMOVE;
+ break;
+
+ case DM_DEVICE_SUSPEND:
+ command = DM_SUSPEND;
+ break;
+
+ case DM_DEVICE_RESUME:
+ command = DM_SUSPEND;
+ break;
+
+ case DM_DEVICE_INFO:
+ command = DM_INFO;
+ break;
+
+ default:
+ log("Internal error: unknown device-mapper task %d",
+ dmt->type);
+ goto bad;
+ }
+
+ if (ioctl(fd, command, dmi) < 0) {
+ log("device-mapper ioctl cmd %d failed: %s", dmt->type,
+ strerror(errno));
+ goto bad;
+ }
+
+ switch (dmt->type) {
+ case DM_DEVICE_CREATE:
+ _add_dev_node(dmt->dev_name, dmt->dmi->minor);
+ break;
+
+ case DM_DEVICE_REMOVE:
+ _rm_dev_node(dmt->dev_name);
+ break;
+ }
+
+ dmt->dmi = dmi;
+ return 1;
+
+ bad:
+ free(dmi);
+ if (fd >= 0)
+ close(fd);
+ return 0;
+}
+
+const char *dm_dir(void)
+{
+ return DM_DIR;
+}
--- /dev/null
+As yet there has not been an official release of the driver.
+
+The patches in version_0.1.0 are still being added to.
+
+To apply untar your kernel then apply the patches in order.
+
+If you are using user mode linux look in misc.
\ No newline at end of file
--- /dev/null
+This directory contains miscellaneous patches that developers may find
+useful.
+
+uml_config - People using uml will need to apply this to be able to
+ configure dm.
+
+disable_io_hook - This removes the hooking of the b_end_io field in buffer
+ heads. suspend/resume of a device will be broken as a
+ result.
+
+instrument_defer - Useful printk's that I use for checking suspend/resume.
+
--- /dev/null
+--- linux-last/drivers/md/dm.c Wed Nov 14 14:42:24 2001
++++ linux/drivers/md/dm.c Thu Nov 15 10:29:35 2001
+@@ -319,39 +319,12 @@
+ int r;
+ dm_map_fn fn;
+ void *context;
+- struct io_hook *ih = NULL;
+ struct target *ti = md->map->targets + leaf;
+
+ fn = ti->type->map;
+ context = ti->private;
+
+- ih = alloc_io_hook();
+-
+- if (!ih)
+- return 0;
+-
+- ih->md = md;
+- ih->rw = rw;
+- ih->target = ti;
+- ih->end_io = bh->b_end_io;
+- ih->context = bh->b_private;
+-
+ r = fn(bh, rw, context);
+-
+- if (r > 0) {
+- /* hook the end io request fn */
+- atomic_inc(&md->pending);
+- bh->b_end_io = dec_pending;
+- bh->b_private = ih;
+-
+- } else if (r == 0)
+- /* we don't need to hook */
+- free_io_hook(ih);
+-
+- else if (r < 0) {
+- free_io_hook(ih);
+- return 0;
+- }
+
+ return 1;
+ }
--- /dev/null
+--- linux-last//drivers/md/dm.c Tue Nov 6 16:29:32 2001
++++ linux/drivers/md/dm.c Tue Nov 6 18:45:55 2001
+@@ -283,6 +283,7 @@
+ bh->b_private = ih->context;
+ free_io_hook(ih);
+
++ WARN("calling end_io on bh %d, %lu", bh->b_rdev, bh->b_rsector);
+ bh->b_end_io(bh, uptodate);
+ }
+
+@@ -306,6 +307,9 @@
+ di->rw = rw;
+ di->next = md->deferred;
+ md->deferred = di;
++
++ WARN("defering bh %d, %lu", bh->b_rdev, bh->b_rsector);
++
+ wu;
+
+ return 1;
+@@ -829,6 +837,9 @@
+
+ while (c) {
+ n = c->next;
++
++ WARN("flushing bh %d, %lu", c->bh->b_rdev, c->bh->b_rsector);
++
+ generic_make_request(c->rw, c->bh);
+ free_deferred(c);
+ c = n;
--- /dev/null
+--- uml_build/arch/um/config.in.orig Tue Jan 2 14:33:42 2001
++++ uml_build/arch/um/config.in Tue Jan 2 14:35:42 2001
+@@ -15,6 +15,8 @@
+ bool 'Prompt for development and/or incomplete code/drivers' CONFIG_EXPERIMENTAL
+ endmenu
+
++source drivers/md/Config.in
++
+ mainmenu_option next_comment
+ comment 'Processor features'
+ bool 'Symmetric multi-processing support' CONFIG_SMP
+
+
--- /dev/null
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/Config.in linux-dm/drivers/md/Config.in
+--- linux/drivers/md/Config.in Fri Sep 14 22:22:18 2001
++++ linux-dm/drivers/md/Config.in Wed Oct 31 18:08:58 2001
+@@ -14,5 +14,6 @@
+ dep_tristate ' Multipath I/O support' CONFIG_MD_MULTIPATH $CONFIG_BLK_DEV_MD
+
+ dep_tristate ' Logical volume manager (LVM) support' CONFIG_BLK_DEV_LVM $CONFIG_MD
++dep_tristate ' Device mapper support' CONFIG_BLK_DEV_DM $CONFIG_MD
+
+ endmenu
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/Makefile linux-dm/drivers/md/Makefile
+--- linux/drivers/md/Makefile Fri Sep 14 22:22:18 2001
++++ linux-dm/drivers/md/Makefile Wed Oct 31 18:09:02 2001
+@@ -4,9 +4,10 @@
+
+ O_TARGET := mddev.o
+
+-export-objs := md.o xor.o
++export-objs := md.o xor.o dm-table.o dm-target.o
+ list-multi := lvm-mod.o
+ lvm-mod-objs := lvm.o lvm-snap.o
++dm-mod-objs := dm.o dm-table.o dm-target.o dm-ioctl.o dm-linear.o
+
+ # Note: link order is important. All raid personalities
+ # and xor.o must come before md.o, as they each initialise
+@@ -20,8 +21,12 @@
+ obj-$(CONFIG_MD_MULTIPATH) += multipath.o
+ obj-$(CONFIG_BLK_DEV_MD) += md.o
+ obj-$(CONFIG_BLK_DEV_LVM) += lvm-mod.o
++obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
+
+ include $(TOPDIR)/Rules.make
+
+ lvm-mod.o: $(lvm-mod-objs)
+ $(LD) -r -o $@ $(lvm-mod-objs)
++
++dm-mod.o: $(dm-mod-objs)
++ $(LD) -r -o $@ $(dm-mod-objs)
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/dm-ioctl.c linux-dm/drivers/md/dm-ioctl.c
+--- linux/drivers/md/dm-ioctl.c Thu Jan 1 01:00:00 1970
++++ linux-dm/drivers/md/dm-ioctl.c Wed Oct 31 18:09:12 2001
+@@ -0,0 +1,292 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include <linux/fs.h>
++#include <linux/dm-ioctl.h>
++
++#include "dm.h"
++
++static void free_params(struct dm_ioctl *p)
++{
++ vfree(p);
++}
++
++static int copy_params(struct dm_ioctl *user, struct dm_ioctl **result)
++{
++ struct dm_ioctl tmp, *dmi;
++
++ if (copy_from_user(&tmp, user, sizeof(tmp)))
++ return -EFAULT;
++
++ if (!(dmi = vmalloc(tmp.data_size)))
++ return -ENOMEM;
++
++ if (copy_from_user(dmi, user, tmp.data_size))
++ return -EFAULT;
++
++ *result = dmi;
++ return 0;
++}
++
++/*
++ * check a string doesn't overrun the chunk of
++ * memory we copied from userland.
++ */
++static int valid_str(char *str, void *end)
++{
++ while ((str != end) && *str)
++ str++;
++
++ return *str ? 0 : 1;
++}
++
++static int first_target(struct dm_ioctl *a, void *end,
++ struct dm_target_spec **spec, char **params)
++{
++ *spec = (struct dm_target_spec *) ((unsigned char *) a) + a->data_size;
++ *params = (char *) (*spec + 1);
++
++ return valid_str(*params, end);
++}
++
++static int next_target(struct dm_target_spec *last, void *end,
++ struct dm_target_spec **spec, char **params)
++{
++ *spec = (struct dm_target_spec *)
++ (((unsigned char *) last) + last->next);
++ *params = (char *) (*spec + 1);
++
++ return valid_str(*params, end);
++}
++
++void err_fn(const char *message, void *private)
++{
++ printk(KERN_ERR "%s", message);
++}
++
++/*
++ * Checks to see if there's a gap in the table.
++ * Returns true iff there is a gap.
++ */
++static int gap(struct dm_table *table, struct dm_target_spec *spec)
++{
++ if (!table->num_targets)
++ return (spec->sector_start > 0) ? 1 : 0;
++
++ if (spec->sector_start != table->highs[table->num_targets - 1] + 1)
++ return 1;
++
++ return 0;
++}
++
++static int populate_table(struct dm_table *table, struct dm_ioctl *args)
++{
++ int i = 0, r, first = 1;
++ struct dm_target_spec *spec;
++ char *params;
++ struct target_type *ttype;
++ void *context, *end;
++ offset_t high = 0;
++
++ if (!args->target_count) {
++ WARN("No targets specified");
++ return -EINVAL;
++ }
++
++ end = ((void *) args) + args->data_size;
++
++#define PARSE_ERROR(msg) {err_fn(msg, NULL); return -EINVAL;}
++
++ for (i = 0; i < args->target_count; i++) {
++
++ r = first ? first_target(args, end, &spec, ¶ms) :
++ next_target(spec, end, &spec, ¶ms);
++
++ if (!r)
++ PARSE_ERROR("unable to find target");
++
++ /* lookup the target type */
++ if (!(ttype = dm_get_target_type(spec->target_type)))
++ PARSE_ERROR("unable to find target type");
++
++ if (gap(table, spec))
++ PARSE_ERROR("gap in target ranges");
++
++ /* build the target */
++ if (ttype->ctr(table, spec->sector_start, spec->length, params,
++ &context, err_fn, NULL))
++ PARSE_ERROR("target constructor failed");
++
++ /* add the target to the table */
++ high = spec->sector_start + (spec->length - 1);
++ if (dm_table_add_target(table, high, ttype, context))
++ PARSE_ERROR("internal error adding target to table");
++
++ first = 0;
++ }
++
++#undef PARSE_ERROR
++
++ r = dm_table_complete(table);
++ return r;
++}
++
++static int create(struct dm_ioctl *param)
++{
++ int r;
++ struct mapped_device *md;
++ struct dm_table *t;
++
++ if ((r = dm_create(param->name, param->minor, &md)))
++ return r;
++
++ if ((r = dm_table_create(&t))) {
++ dm_destroy(md);
++ return r;
++ }
++
++ if ((r = populate_table(t, param))) {
++ dm_destroy(md);
++ dm_table_destroy(t);
++ return r;
++ }
++
++ if ((r = dm_activate(md, t))) {
++ dm_destroy(md);
++ dm_table_destroy(t);
++ return r;
++ }
++
++ return 0;
++}
++
++static int remove(struct dm_ioctl *param)
++{
++ int r;
++ struct mapped_device *md = dm_get(param->name);
++
++ if (!md)
++ return -ENODEV;
++
++ if ((r = dm_deactivate(md)))
++ return r;
++
++ if (md->map)
++ dm_table_destroy(md->map);
++
++ if (!dm_destroy(md))
++ WARN("dm_ctl_ioctl: unable to remove device");
++
++ return 0;
++}
++
++static int suspend(struct dm_ioctl *param)
++{
++ return -EINVAL;
++}
++
++static int reload(struct dm_ioctl *param)
++{
++ return -EINVAL;
++}
++
++static int info(struct dm_ioctl *param)
++{
++ return -EINVAL;
++}
++
++static int ctl_open(struct inode *inode, struct file *file)
++{
++ /* only root can open this */
++ if (!capable(CAP_SYS_ADMIN))
++ return -EACCES;
++
++ MOD_INC_USE_COUNT;
++ return 0;
++}
++
++static int ctl_close(struct inode *inode, struct file *file)
++{
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++
++static int ctl_ioctl(struct inode *inode, struct file *file,
++ uint command, ulong a)
++{
++ int r = -EINVAL;
++ struct dm_ioctl *p;
++
++ if ((r = copy_params((struct dm_ioctl *) a, &p)))
++ return r;
++
++ switch (command) {
++ case DM_CREATE:
++ r = create(p);
++ break;
++
++ case DM_REMOVE:
++ r = remove(p);
++ break;
++
++ case DM_SUSPEND:
++ r = suspend(p);
++ break;
++
++ case DM_RELOAD:
++ r = reload(p);
++ break;
++
++ case DM_INFO:
++ r = info(p);
++
++ default:
++ WARN("dm_ctl_ioctl: unknown command 0x%x\n", command);
++ }
++
++ free_params(p);
++ return r;
++}
++
++
++static struct file_operations _ctl_fops = {
++ open: ctl_open,
++ release: ctl_close,
++ ioctl: ctl_ioctl,
++};
++
++static int dm_ioctl_init(void)
++{
++ int r;
++
++ if ((r = devfs_register_chrdev(DM_CHAR_MAJOR, "device-mapper",
++ &_ctl_fops)) < 0) {
++ WARN("devfs_register_chrdev failed for dm control dev");
++ return -EIO;
++ }
++
++ return r;
++}
++
++static void dm_ioctl_exit(void)
++{
++ if (devfs_unregister_chrdev(DM_CHAR_MAJOR, "device-mapper") < 0)
++ WARN("devfs_unregister_chrdev failed for dm control device");
++}
++
++/*
++ * module hooks
++ */
++module_init(dm_ioctl_init);
++module_exit(dm_ioctl_exit);
++
++MODULE_DESCRIPTION("device-mapper ioctl interface");
++MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
++
++#ifdef MODULE_LICENSE
++MODULE_LICENSE("GPL");
++#endif
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/dm-linear.c linux-dm/drivers/md/dm-linear.c
+--- linux/drivers/md/dm-linear.c Thu Jan 1 01:00:00 1970
++++ linux-dm/drivers/md/dm-linear.c Wed Oct 31 18:09:12 2001
+@@ -0,0 +1,113 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include <linux/config.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/fs.h>
++#include <linux/blkdev.h>
++#include <linux/device-mapper.h>
++
++#include "dm.h"
++
++/*
++ * linear: maps a linear range of a device.
++ */
++struct linear_c {
++ long delta; /* FIXME: we need a signed offset type */
++ struct dm_dev *dev;
++};
++
++/*
++ * construct a linear mapping.
++ * <dev_path> <offset>
++ */
++static int linear_ctr(struct dm_table *t, offset_t b, offset_t l,
++ const char *args, void **context,
++ dm_error_fn err, void *e_private)
++{
++ struct linear_c *lc;
++ unsigned int start;
++ char path[256]; /* FIXME: magic */
++ int r = -EINVAL;
++
++ if (!(lc = kmalloc(sizeof(lc), GFP_KERNEL))) {
++ err("couldn't allocate memory for linear context", e_private);
++ return -ENOMEM;
++ }
++
++ if (sscanf("%s %u", path, &start) != 2) {
++ err("target params should be of the form <dev_path> <sector>",
++ e_private);
++ goto bad;
++ }
++
++ if ((r = dm_table_get_device(t, path, &lc->dev))) {
++ err("couldn't lookup device", e_private);
++ r = -ENXIO;
++ goto bad;
++ }
++
++ lc->delta = (int) start - (int) b;
++ *context = lc;
++ return 0;
++
++ bad:
++ kfree(lc);
++ return r;
++}
++
++static void linear_dtr(struct dm_table *t, void *c)
++{
++ struct linear_c *lc = (struct linear_c *) c;
++ dm_table_put_device(t, lc->dev);
++ kfree(c);
++}
++
++static int linear_map(struct buffer_head *bh, int rw, void *context)
++{
++ struct linear_c *lc = (struct linear_c *) context;
++
++ bh->b_rdev = lc->dev->dev;
++ bh->b_rsector = bh->b_rsector + lc->delta;
++ return 1;
++}
++
++static struct target_type linear_target = {
++ name: "linear",
++ module: THIS_MODULE,
++ ctr: linear_ctr,
++ dtr: linear_dtr,
++ map: linear_map,
++};
++
++static int __init linear_init(void)
++{
++ int r;
++
++ if ((r = dm_register_target(&linear_target)) < 0)
++ printk(KERN_ERR "Device mapper: Linear: register failed\n");
++
++ return r;
++}
++
++static void __exit linear_exit(void)
++{
++ if (dm_unregister_target(&linear_target) < 0)
++ printk(KERN_ERR "Device mapper: Linear: unregister failed\n");
++}
++
++module_init(linear_init);
++module_exit(linear_exit);
++
++MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
++MODULE_DESCRIPTION("Device Mapper: Linear mapping");
++
++#ifdef MODULE_LICENSE
++MODULE_LICENSE("GPL");
++#endif
++
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/dm-table.c linux-dm/drivers/md/dm-table.c
+--- linux/drivers/md/dm-table.c Thu Jan 1 01:00:00 1970
++++ linux-dm/drivers/md/dm-table.c Wed Oct 31 18:09:12 2001
+@@ -0,0 +1,333 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++/* ceiling(n / size) * size */
++static inline ulong round_up(ulong n, ulong size)
++{
++ ulong r = n % size;
++ return n + (r ? (size - r) : 0);
++}
++
++/* ceiling(n / size) */
++static inline ulong div_up(ulong n, ulong size)
++{
++ return round_up(n, size) / size;
++}
++
++/* similar to ceiling(log_size(n)) */
++static uint int_log(ulong n, ulong base)
++{
++ int result = 0;
++
++ while (n > 1) {
++ n = div_up(n, base);
++ result++;
++ }
++
++ return result;
++}
++
++/*
++ * return the highest key that you could lookup
++ * from the n'th node on level l of the btree.
++ */
++static offset_t high(struct dm_table *t, int l, int n)
++{
++ for (; l < t->depth - 1; l++)
++ n = get_child(n, CHILDREN_PER_NODE - 1);
++
++ if (n >= t->counts[l])
++ return (offset_t) -1;
++
++ return get_node(t, l, n)[KEYS_PER_NODE - 1];
++}
++
++/*
++ * fills in a level of the btree based on the
++ * highs of the level below it.
++ */
++static int setup_btree_index(int l, struct dm_table *t)
++{
++ int n, k;
++ offset_t *node;
++
++ for (n = 0; n < t->counts[l]; n++) {
++ node = get_node(t, l, n);
++
++ for (k = 0; k < KEYS_PER_NODE; k++)
++ node[k] = high(t, l + 1, get_child(n, k));
++ }
++
++ return 0;
++}
++
++/*
++ * highs, and targets are managed as dynamic
++ * arrays during a table load.
++ */
++static int alloc_targets(struct dm_table *t, int num)
++{
++ offset_t *n_highs;
++ struct target *n_targets;
++ int n = t->num_targets;
++ int size = (sizeof(struct target) + sizeof(offset_t)) * num;
++
++ n_highs = vmalloc(size);
++ if (!n_highs)
++ return -ENOMEM;
++
++ n_targets = (struct target *) (n_highs + num);
++
++ if (n) {
++ memcpy(n_highs, t->highs, sizeof(*n_highs) * n);
++ memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
++ }
++
++ vfree(t->highs);
++
++ t->num_allocated = num;
++ t->highs = n_highs;
++ t->targets = n_targets;
++
++ return 0;
++}
++
++int dm_table_create(struct dm_table **result)
++{
++ struct dm_table *t = kmalloc(sizeof(struct dm_table), GFP_NOIO);
++
++ if (!t)
++ return -ENOMEM;
++
++ memset(t, 0, sizeof(*t));
++ INIT_LIST_HEAD(&t->devices);
++
++ /* allocate a single nodes worth of targets to
++ begin with */
++ if (alloc_targets(t, KEYS_PER_NODE)) {
++ kfree(t);
++ t = 0;
++ }
++
++ *result = t;
++ return 0;
++}
++
++static void free_devices(struct list_head *devices)
++{
++ struct list_head *tmp, *next;
++
++ for (tmp = devices->next; tmp != devices; tmp = next) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ next = tmp->next;
++ kfree(dd);
++ }
++}
++
++void dm_table_destroy(struct dm_table *t)
++{
++ int i;
++
++ /* free the indexes (see dm_table_complete) */
++ if (t->depth >= 2)
++ vfree(t->index[t->depth - 2]);
++
++
++ /* free the targets */
++ for (i = 0; i < t->num_targets; i++) {
++ struct target *tgt = &t->targets[i];
++ if (tgt->type->dtr)
++ tgt->type->dtr(t, tgt->private);
++ }
++
++ vfree(t->highs);
++
++ /* free the device list */
++ if (t->devices.next != &t->devices) {
++ WARN("there are still devices present, someone isn't "
++ "calling dm_table_remove_device");
++
++ free_devices(&t->devices);
++ }
++
++ kfree(t);
++}
++
++/*
++ * Checks to see if we need to extend
++ * highs or targets.
++ */
++static inline int check_space(struct dm_table *t)
++{
++ if (t->num_targets >= t->num_allocated)
++ return alloc_targets(t, t->num_allocated * 2);
++
++ return 0;
++}
++
++
++/*
++ * convert a device path to a kdev_t.
++ */
++int lookup_device(const char *path, kdev_t *dev)
++{
++ int r;
++ struct nameidata nd;
++ struct inode *inode;
++
++ if (!path_init(path, LOOKUP_FOLLOW, &nd))
++ return 0;
++
++ if ((r = path_walk(path, &nd)))
++ goto bad;
++
++ inode = nd.dentry->d_inode;
++ if (!inode) {
++ r = -ENOENT;
++ goto bad;
++ }
++
++ if (!S_ISBLK(inode->i_mode)) {
++ r = -EINVAL;
++ goto bad;
++ }
++
++ *dev = inode->i_rdev;
++
++ bad:
++ path_release(&nd);
++ return r;
++}
++
++/*
++ * see if we've already got a device in the list.
++ */
++static struct dm_dev *find_device(struct list_head *l, kdev_t dev)
++{
++ struct list_head *tmp;
++
++ for (tmp = l->next; tmp != l; tmp = tmp->next) {
++
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ if (dd->dev == dev)
++ return dd;
++ }
++
++ return NULL;
++}
++
++/*
++ * add a device to the list, or just increment the
++ * usage count if it's already present.
++ */
++int dm_table_get_device(struct dm_table *t, const char *path,
++ struct dm_dev **result)
++{
++ int r;
++ kdev_t dev;
++ struct dm_dev *dd;
++
++ /* convert the path to a device */
++ if ((r = lookup_device(path, &dev)))
++ return r;
++
++ dd = find_device(&t->devices, dev);
++ if (!dd) {
++ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
++ if (!dd)
++ return -ENOMEM;
++
++ dd->dev = dev;
++ dd->bd = 0;
++ atomic_set(&dd->count, 0);
++ list_add(&dd->list, &t->devices);
++ }
++ atomic_inc(&dd->count);
++ *result = dd;
++
++ return 0;
++}
++/*
++ * decrement a devices use count and remove it if
++ * neccessary.
++ */
++void dm_table_put_device(struct dm_table *t, struct dm_dev *dd)
++{
++ if (atomic_dec_and_test(&dd->count)) {
++ list_del(&dd->list);
++ kfree(dd);
++ }
++}
++
++/*
++ * adds a target to the map
++ */
++int dm_table_add_target(struct dm_table *t, offset_t high,
++ struct target_type *type, void *private)
++{
++ int r, n;
++
++ if ((r = check_space(t)))
++ return r;
++
++ n = t->num_targets++;
++ t->highs[n] = high;
++ t->targets[n].type = type;
++ t->targets[n].private = private;
++
++ return 0;
++}
++
++
++static int setup_indexes(struct dm_table *t)
++{
++ int i, total = 0;
++ offset_t *indexes;
++
++ /* allocate the space for *all* the indexes */
++ for (i = t->depth - 2; i >= 0; i--) {
++ t->counts[i] = div_up(t->counts[i + 1], CHILDREN_PER_NODE);
++ total += t->counts[i];
++ }
++
++ if (!(indexes = vmalloc(NODE_SIZE * total)))
++ return -ENOMEM;
++
++ /* set up internal nodes, bottom-up */
++ for (i = t->depth - 2, total = 0; i >= 0; i--) {
++ t->index[i] = indexes + (KEYS_PER_NODE * t->counts[i]);
++ setup_btree_index(i, t);
++ }
++
++ return 0;
++}
++
++
++/*
++ * builds the btree to index the map
++ */
++int dm_table_complete(struct dm_table *t)
++{
++ int leaf_nodes, r = 0;
++
++ /* how many indexes will the btree have ? */
++ leaf_nodes = div_up(t->num_targets, KEYS_PER_NODE);
++ t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE);
++
++ /* leaf layer has already been set up */
++ t->counts[t->depth - 1] = leaf_nodes;
++ t->index[t->depth - 1] = t->highs;
++
++ if (t->depth >= 2)
++ r = setup_indexes(t);
++
++ return r;
++}
++
++EXPORT_SYMBOL(dm_table_get_device);
++EXPORT_SYMBOL(dm_table_put_device);
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/dm-target.c linux-dm/drivers/md/dm-target.c
+--- linux/drivers/md/dm-target.c Thu Jan 1 01:00:00 1970
++++ linux-dm/drivers/md/dm-target.c Wed Oct 31 18:09:12 2001
+@@ -0,0 +1,175 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++#include <linux/kmod.h>
++
++struct tt_internal {
++ struct target_type tt;
++
++ struct list_head list;
++ long use;
++};
++
++static LIST_HEAD(_targets);
++static rwlock_t _lock = RW_LOCK_UNLOCKED;
++
++#define DM_MOD_NAME_SIZE 32
++
++static inline struct tt_internal *__find_target_type(const char *name)
++{
++ struct list_head *tmp;
++ struct tt_internal *ti;
++
++ for(tmp = _targets.next; tmp != &_targets; tmp = tmp->next) {
++
++ ti = list_entry(tmp, struct tt_internal, list);
++ if (!strcmp(name, ti->tt.name))
++ return ti;
++ }
++
++ return NULL;
++}
++
++static struct tt_internal *get_target_type(const char *name)
++{
++ struct tt_internal *ti;
++
++ read_lock(&_lock);
++ ti = __find_target_type(name);
++
++ if (ti) {
++ if (ti->use == 0 && ti->tt.module)
++ __MOD_INC_USE_COUNT(ti->tt.module);
++ ti->use++;
++ }
++ read_unlock(&_lock);
++
++ return ti;
++}
++
++static void load_module(const char *name)
++{
++ char module_name[DM_MOD_NAME_SIZE] = "dm-";
++
++ /* Length check for strcat() below */
++ if (strlen(name) > (DM_MOD_NAME_SIZE - 4))
++ return;
++
++ strcat(module_name, name);
++ request_module(module_name);
++}
++
++struct target_type *dm_get_target_type(const char *name)
++{
++ struct tt_internal *ti = get_target_type(name);
++
++ if (!ti) {
++ load_module(name);
++ ti = get_target_type(name);
++ }
++
++ return ti ? &ti->tt : 0;
++}
++
++void dm_put_target_type(struct target_type *t)
++{
++ struct tt_internal *ti = (struct tt_internal *) t;
++
++ read_lock(&_lock);
++ if (--ti->use == 0 && ti->tt.module)
++ __MOD_DEC_USE_COUNT(ti->tt.module);
++
++ if (ti->use < 0)
++ BUG();
++ read_unlock(&_lock);
++}
++
++static struct tt_internal *alloc_target(struct target_type *t)
++{
++ struct tt_internal *ti = kmalloc(sizeof(*ti), GFP_KERNEL);
++
++ if (ti) {
++ memset(ti, 0, sizeof(*ti));
++ ti->tt = *t;
++ }
++
++ return ti;
++}
++
++int dm_register_target(struct target_type *t)
++{
++ int rv = 0;
++ struct tt_internal *ti = alloc_target(t);
++
++ if (!ti)
++ return -ENOMEM;
++
++ write_lock(&_lock);
++ if (__find_target_type(t->name))
++ rv = -EEXIST;
++ else
++ list_add(&ti->list, &_targets);
++
++ write_unlock(&_lock);
++ return rv;
++}
++
++int dm_unregister_target(struct target_type *t)
++{
++ struct tt_internal *ti = (struct tt_internal *) t;
++ int rv = -ETXTBSY;
++
++ write_lock(&_lock);
++ if (ti->use == 0) {
++ list_del(&ti->list);
++ kfree(ti);
++ rv = 0;
++ }
++ write_unlock(&_lock);
++
++ return rv;
++}
++
++/*
++ * io-err: always fails an io, useful for bringing
++ * up LV's that have holes in them.
++ */
++static int io_err_ctr(struct dm_table *t, offset_t b, offset_t l,
++ const char *args, void **context,
++ dm_error_fn err, void *e_private)
++{
++ *context = 0;
++ return 0;
++}
++
++static void io_err_dtr(struct dm_table *t, void *c)
++{
++ /* empty */
++}
++
++static int io_err_map(struct buffer_head *bh, int rw, void *context)
++{
++ buffer_IO_error(bh);
++ return 0;
++}
++
++static struct target_type error_target = {
++ name: "error",
++ ctr: io_err_ctr,
++ dtr: io_err_dtr,
++ map: io_err_map
++};
++
++
++int dm_target_init(void)
++{
++ return dm_register_target(&error_target);
++}
++
++EXPORT_SYMBOL(dm_register_target);
++EXPORT_SYMBOL(dm_unregister_target);
++
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/dm.c linux-dm/drivers/md/dm.c
+--- linux/drivers/md/dm.c Thu Jan 1 01:00:00 1970
++++ linux-dm/drivers/md/dm.c Wed Oct 31 18:09:12 2001
+@@ -0,0 +1,921 @@
++/*
++ * Copyright (C) 2001 Sistina Software
++ *
++ * This file is released under the GPL.
++ */
++
++#include "dm.h"
++
++/* defines for blk.h */
++#define MAJOR_NR DM_BLK_MAJOR
++#define DEVICE_NR(device) MINOR(device) /* has no partition bits */
++#define DEVICE_NAME "device-mapper" /* name for messaging */
++#define DEVICE_NO_RANDOM /* no entropy to contribute */
++#define DEVICE_OFF(d) /* do-nothing */
++
++#include <linux/blk.h>
++#include <linux/blkpg.h>
++
++/* we only need this for the lv_bmap struct definition, not happy */
++#include <linux/lvm.h>
++
++#define MAX_DEVICES 64
++#define DEFAULT_READ_AHEAD 64
++
++const char *_name = "device-mapper";
++int _version[3] = {0, 1, 0};
++
++struct io_hook {
++ struct mapped_device *md;
++ struct target *target;
++ int rw;
++
++ void (*end_io)(struct buffer_head * bh, int uptodate);
++ void *context;
++};
++
++kmem_cache_t *_io_hook_cache;
++
++#define rl down_read(&_dev_lock)
++#define ru up_read(&_dev_lock)
++#define wl down_write(&_dev_lock)
++#define wu up_write(&_dev_lock)
++
++struct rw_semaphore _dev_lock;
++static struct mapped_device *_devs[MAX_DEVICES];
++
++/* block device arrays */
++static int _block_size[MAX_DEVICES];
++static int _blksize_size[MAX_DEVICES];
++static int _hardsect_size[MAX_DEVICES];
++
++const char *_fs_dir = "device-mapper";
++static devfs_handle_t _dev_dir;
++
++static int request(request_queue_t *q, int rw, struct buffer_head *bh);
++static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb);
++
++/*
++ * setup and teardown the driver
++ */
++static int dm_init(void)
++{
++ int ret;
++
++ init_rwsem(&_dev_lock);
++
++ if (!_io_hook_cache)
++ _io_hook_cache = kmem_cache_create("dm io hooks",
++ sizeof(struct io_hook),
++ 0, 0, NULL, NULL);
++
++ if (!_io_hook_cache)
++ return -ENOMEM;
++
++ if ((ret = dm_target_init()))
++ return ret;
++
++ /* set up the arrays */
++ read_ahead[MAJOR_NR] = DEFAULT_READ_AHEAD;
++ blk_size[MAJOR_NR] = _block_size;
++ blksize_size[MAJOR_NR] = _blksize_size;
++ hardsect_size[MAJOR_NR] = _hardsect_size;
++
++ if (devfs_register_blkdev(MAJOR_NR, _name, &dm_blk_dops) < 0) {
++ printk(KERN_ERR "%s -- register_blkdev failed\n", _name);
++ return -EIO;
++ }
++
++ blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), request);
++
++ _dev_dir = devfs_mk_dir(0, _fs_dir, NULL);
++
++ printk(KERN_INFO "%s %d.%d.%d initialised\n", _name,
++ _version[0], _version[1], _version[2]);
++ return 0;
++}
++
++static void dm_exit(void)
++{
++ if (kmem_cache_destroy(_io_hook_cache))
++ WARN("it looks like there are still some io_hooks allocated");
++ _io_hook_cache = 0;
++
++ if (devfs_unregister_blkdev(MAJOR_NR, _name) < 0)
++ printk(KERN_ERR "%s -- unregister_blkdev failed\n", _name);
++
++ read_ahead[MAJOR_NR] = 0;
++ blk_size[MAJOR_NR] = 0;
++ blksize_size[MAJOR_NR] = 0;
++ hardsect_size[MAJOR_NR] = 0;
++
++ printk(KERN_INFO "%s %d.%d.%d cleaned up\n", _name,
++ _version[0], _version[1], _version[2]);
++}
++
++/*
++ * block device functions
++ */
++static int dm_blk_open(struct inode *inode, struct file *file)
++{
++ int minor = MINOR(inode->i_rdev);
++ struct mapped_device *md;
++
++ if (minor >= MAX_DEVICES)
++ return -ENXIO;
++
++ wl;
++ md = _devs[minor];
++
++ if (!md || !is_active(md)) {
++ wu;
++ return -ENXIO;
++ }
++
++ md->use_count++;
++ wu;
++
++ MOD_INC_USE_COUNT;
++ return 0;
++}
++
++static int dm_blk_close(struct inode *inode, struct file *file)
++{
++ int minor = MINOR(inode->i_rdev);
++ struct mapped_device *md;
++
++ if (minor >= MAX_DEVICES)
++ return -ENXIO;
++
++ wl;
++ md = _devs[minor];
++ if (!md || md->use_count < 1) {
++ WARN("reference count in mapped_device incorrect");
++ wu;
++ return -ENXIO;
++ }
++
++ md->use_count--;
++ wu;
++
++ MOD_DEC_USE_COUNT;
++ return 0;
++}
++
++/* In 512-byte units */
++#define VOLUME_SIZE(minor) (_block_size[(minor)] >> 1)
++
++static int dm_blk_ioctl(struct inode *inode, struct file *file,
++ uint command, ulong a)
++{
++ int minor = MINOR(inode->i_rdev);
++ long size;
++
++ if (minor >= MAX_DEVICES)
++ return -ENXIO;
++
++ switch (command) {
++ case BLKSSZGET:
++ case BLKROGET:
++ case BLKROSET:
++#if 0
++ case BLKELVSET:
++ case BLKELVGET:
++#endif
++ return blk_ioctl(inode->i_dev, command, a);
++ break;
++
++ case BLKGETSIZE:
++ size = VOLUME_SIZE(minor);
++ if (copy_to_user((void *) a, &size, sizeof (long)))
++ return -EFAULT;
++ break;
++
++ case BLKFLSBUF:
++ if (!capable(CAP_SYS_ADMIN))
++ return -EACCES;
++ fsync_dev(inode->i_rdev);
++ invalidate_buffers(inode->i_rdev);
++ return 0;
++
++ case BLKRAGET:
++ if (copy_to_user
++ ((void *) a, &read_ahead[MAJOR(inode->i_rdev)],
++ sizeof (long)))
++ return -EFAULT;
++ return 0;
++
++ case BLKRASET:
++ if (!capable(CAP_SYS_ADMIN))
++ return -EACCES;
++ read_ahead[MAJOR(inode->i_rdev)] = a;
++ return 0;
++
++ case BLKRRPART:
++ return -EINVAL;
++
++ case LV_BMAP:
++ return dm_user_bmap(inode, (struct lv_bmap *) a);
++
++ default:
++ WARN("unknown block ioctl %d", command);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static inline struct io_hook *alloc_io_hook(void)
++{
++ return kmem_cache_alloc(_io_hook_cache, GFP_NOIO);
++}
++
++static inline void free_io_hook(struct io_hook *ih)
++{
++ kmem_cache_free(_io_hook_cache, ih);
++}
++
++/*
++ * FIXME: need to decide if deferred_io's need
++ * their own slab, I say no for now since they are
++ * only used when the device is suspended.
++ */
++static inline struct deferred_io *alloc_deferred(void)
++{
++ return kmalloc(sizeof(struct deferred_io), GFP_NOIO);
++}
++
++static inline void free_deferred(struct deferred_io *di)
++{
++ kfree(di);
++}
++
++/*
++ * call a targets optional error function if
++ * an io failed.
++ */
++static inline int call_err_fn(struct io_hook *ih, struct buffer_head *bh)
++{
++ dm_err_fn err = ih->target->type->err;
++ if (err)
++ return err(bh, ih->rw, ih->target->private);
++
++ return 0;
++}
++
++/*
++ * bh->b_end_io routine that decrements the
++ * pending count and then calls the original
++ * bh->b_end_io fn.
++ */
++static void dec_pending(struct buffer_head *bh, int uptodate)
++{
++ struct io_hook *ih = bh->b_private;
++
++ if (!uptodate && call_err_fn(ih, bh))
++ return;
++
++ if (atomic_dec_and_test(&ih->md->pending))
++ /* nudge anyone waiting on suspend queue */
++ wake_up(&ih->md->wait);
++
++ bh->b_end_io = ih->end_io;
++ bh->b_private = ih->context;
++ free_io_hook(ih);
++
++ bh->b_end_io(bh, uptodate);
++}
++
++/*
++ * add the bh to the list of deferred io.
++ */
++static int queue_io(struct mapped_device *md, struct buffer_head *bh, int rw)
++{
++ struct deferred_io *di = alloc_deferred();
++
++ if (!di)
++ return -ENOMEM;
++
++ wl;
++ if (test_bit(DM_ACTIVE, &md->state)) {
++ wu;
++ return 0;
++ }
++
++ di->bh = bh;
++ di->rw = rw;
++ di->next = md->deferred;
++ md->deferred = di;
++ wu;
++
++ return 1;
++}
++
++/*
++ * do the bh mapping for a given leaf
++ */
++static inline int __map_buffer(struct mapped_device *md,
++ struct buffer_head *bh, int rw, int leaf)
++{
++ int r;
++ dm_map_fn fn;
++ void *context;
++ struct io_hook *ih = NULL;
++ struct target *ti = md->map->targets + leaf;
++
++ fn = ti->type->map;
++ context = ti->private;
++
++ ih = alloc_io_hook();
++
++ if (!ih)
++ return 0;
++
++ ih->md = md;
++ ih->rw = rw;
++ ih->target = ti;
++ ih->end_io = bh->b_end_io;
++ ih->context = bh->b_private;
++
++ r = fn(bh, rw, context);
++
++ if (r > 0) {
++ /* hook the end io request fn */
++ atomic_inc(&md->pending);
++ bh->b_end_io = dec_pending;
++ bh->b_private = ih;
++
++ } else if (r == 0)
++ /* we don't need to hook */
++ free_io_hook(ih);
++
++ else if (r < 0) {
++ free_io_hook(ih);
++ return 0;
++ }
++
++ return 1;
++}
++
++/*
++ * search the btree for the correct target.
++ */
++static inline int __find_node(struct dm_table *t, struct buffer_head *bh)
++{
++ int l, n = 0, k = 0;
++ offset_t *node;
++
++ for (l = 0; l < t->depth; l++) {
++ n = get_child(n, k);
++ node = get_node(t, l, n);
++
++ for (k = 0; k < KEYS_PER_NODE; k++)
++ if (node[k] >= bh->b_rsector)
++ break;
++ }
++
++ return (KEYS_PER_NODE * n) + k;
++}
++
++static int request(request_queue_t *q, int rw, struct buffer_head *bh)
++{
++ struct mapped_device *md;
++ int r, minor = MINOR(bh->b_rdev);
++
++ if (minor >= MAX_DEVICES)
++ goto bad_no_lock;
++
++ rl;
++ md = _devs[minor];
++
++ if (!md || !md->map)
++ goto bad;
++
++ /* if we're suspended we have to queue this io for later */
++ if (!test_bit(DM_ACTIVE, &md->state)) {
++ ru;
++ r = queue_io(md, bh, rw);
++
++ if (r < 0)
++ goto bad_no_lock;
++
++ else if (r > 0)
++ return 0; /* deferred successfully */
++
++ rl; /* FIXME: there's still a race here */
++ }
++
++ if (!__map_buffer(md, bh, rw, __find_node(md->map, bh)))
++ goto bad;
++
++ ru;
++ return 1;
++
++ bad:
++ ru;
++
++ bad_no_lock:
++ buffer_IO_error(bh);
++ return 0;
++}
++
++static int check_dev_size(int minor, unsigned long block)
++{
++ /* FIXME: check this */
++ unsigned long max_sector = (_block_size[minor] << 1) + 1;
++ unsigned long sector = (block + 1) * (_blksize_size[minor] >> 9);
++
++ return (sector > max_sector) ? 0 : 1;
++}
++
++/*
++ * creates a dummy buffer head and maps it (for lilo).
++ */
++static int do_bmap(kdev_t dev, unsigned long block,
++ kdev_t *r_dev, unsigned long *r_block)
++{
++ struct mapped_device *md;
++ struct buffer_head bh;
++ int minor = MINOR(dev), r;
++ struct target *t;
++
++ rl;
++ if ((minor >= MAX_DEVICES) || !(md = _devs[minor]) ||
++ !test_bit(DM_ACTIVE, &md->state)) {
++ r = -ENXIO;
++ goto out;
++ }
++
++ if (!check_dev_size(minor, block)) {
++ r = -EINVAL;
++ goto out;
++ }
++
++ /* setup dummy bh */
++ memset(&bh, 0, sizeof(bh));
++ bh.b_blocknr = block;
++ bh.b_dev = bh.b_rdev = dev;
++ bh.b_size = _blksize_size[minor];
++ bh.b_rsector = block * (bh.b_size >> 9);
++
++ /* find target */
++ t = md->map->targets + __find_node(md->map, &bh);
++
++ /* do the mapping */
++ r = t->type->map(&bh, READ, t->private);
++
++ *r_dev = bh.b_rdev;
++ *r_block = bh.b_rsector / (bh.b_size >> 9);
++
++ out:
++ ru;
++ return r;
++}
++
++/*
++ * marshals arguments and results between user and
++ * kernel space.
++ */
++static int dm_user_bmap(struct inode *inode, struct lv_bmap *lvb)
++{
++ unsigned long block, r_block;
++ kdev_t r_dev;
++ int r;
++
++ if (get_user(block, &lvb->lv_block))
++ return -EFAULT;
++
++ if ((r = do_bmap(inode->i_rdev, block, &r_dev, &r_block)))
++ return r;
++
++ if (put_user(kdev_t_to_nr(r_dev), &lvb->lv_dev) ||
++ put_user(r_block, &lvb->lv_block))
++ return -EFAULT;
++
++ return 0;
++}
++
++/*
++ * see if the device with a specific minor # is
++ * free.
++ */
++static inline int __specific_dev(int minor)
++{
++ if (minor > MAX_DEVICES) {
++ WARN("request for a mapped_device > than MAX_DEVICES");
++ return 0;
++ }
++
++ if (!_devs[minor])
++ return minor;
++
++ return -1;
++}
++
++/*
++ * find the first free device.
++ */
++static inline int __any_old_dev(void)
++{
++ int i;
++
++ for (i = 0; i < MAX_DEVICES; i++)
++ if (!_devs[i])
++ return i;
++
++ return -1;
++}
++
++/*
++ * allocate and initialise a blank device.
++ */
++static struct mapped_device *alloc_dev(int minor)
++{
++ struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
++
++ if (!md)
++ return 0;
++
++ memset(md, 0, sizeof (*md));
++
++ wl;
++ minor = (minor < 0) ? __any_old_dev() : __specific_dev(minor);
++
++ if (minor < 0) {
++ WARN("no free devices available");
++ wu;
++ kfree(md);
++ return 0;
++ }
++
++ md->dev = MKDEV(DM_BLK_MAJOR, minor);
++ md->name[0] = '\0';
++ md->state = 0;
++
++ init_waitqueue_head(&md->wait);
++
++ _devs[minor] = md;
++ wu;
++
++ return md;
++}
++
++static void free_dev(struct mapped_device *md)
++{
++ kfree(md);
++}
++
++/*
++ * open a device so we can use it as a map
++ * destination.
++ */
++static int open_dev(struct dm_dev *d)
++{
++ int err;
++
++ if (d->bd)
++ BUG();
++
++ if (!(d->bd = bdget(kdev_t_to_nr(d->dev))))
++ return -ENOMEM;
++
++ if ((err = blkdev_get(d->bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE))) {
++ bdput(d->bd);
++ return err;
++ }
++
++ return 0;
++}
++
++/*
++ * close a device that we've been using.
++ */
++static void close_dev(struct dm_dev *d)
++{
++ if (!d->bd)
++ return;
++
++ blkdev_put(d->bd, BDEV_FILE);
++ bdput(d->bd);
++ d->bd = 0;
++}
++
++/*
++ * Close a list of devices.
++ */
++static void close_devices(struct list_head *devices)
++{
++ struct list_head *tmp;
++
++ for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ close_dev(dd);
++ }
++}
++
++/*
++ * Open a list of devices.
++ */
++static int open_devices(struct list_head *devices)
++{
++ int r = 0;
++ struct list_head *tmp;
++
++ for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ if ((r = open_dev(dd)))
++ goto bad;
++ }
++ return 0;
++
++ bad:
++ close_devices(devices);
++ return r;
++}
++
++
++struct mapped_device *dm_find_by_minor(int minor)
++{
++ struct mapped_device *md;
++
++ rl;
++ md = _devs[minor];
++ ru;
++
++ return md;
++}
++
++static int register_device(struct mapped_device *md)
++{
++ md->devfs_entry =
++ devfs_register(_dev_dir, md->name, DEVFS_FL_CURRENT_OWNER,
++ MAJOR(md->dev), MINOR(md->dev),
++ S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP,
++ &dm_blk_dops, NULL);
++
++ if (!md->devfs_entry)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static int unregister_device(struct mapped_device *md)
++{
++ devfs_unregister(md->devfs_entry);
++ return 0;
++}
++
++/*
++ * constructor for a new device
++ */
++int dm_create(const char *name, int minor, struct mapped_device **result)
++{
++ int r;
++ struct mapped_device *md;
++
++ if (minor >= MAX_DEVICES)
++ return -ENXIO;
++
++ if (!(md = alloc_dev(minor)))
++ return -ENXIO;
++
++ wl;
++ strcpy(md->name, name);
++ _devs[minor] = md;
++ if ((r = register_device(md))) {
++ wu;
++ free_dev(md);
++ return r;
++ }
++ wu;
++
++ *result = md;
++ return 0;
++}
++
++/*
++ * destructor for the device. md->map is
++ * deliberately not destroyed, dm-fs/dm-ioctl
++ * should manage table objects.
++ */
++int dm_destroy(struct mapped_device *md)
++{
++ int minor, r;
++
++ wl;
++ if (md->use_count) {
++ wu;
++ return -EPERM;
++ }
++
++ if ((r = unregister_device(md))) {
++ wu;
++ return r;
++ }
++
++ minor = MINOR(md->dev);
++ _devs[minor] = 0;
++ wu;
++
++ kfree(md);
++
++ return 0;
++}
++
++/*
++ * the hardsect size for a mapped device is the
++ * smallest hard sect size from the devices it
++ * maps onto.
++ */
++static int __find_hardsect_size(struct list_head *devices)
++{
++ int result = INT_MAX, size;
++ struct list_head *tmp;
++
++ for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ size = get_hardsect_size(dd->dev);
++ if (size < result)
++ result = size;
++ }
++ return result;
++}
++
++/*
++ * Bind a table to the device.
++ */
++void __bind(struct mapped_device *md, struct dm_table *t)
++{
++ int minor = MINOR(md->dev);
++
++ md->map = t;
++
++ /* in k */
++ _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1;
++
++ _blksize_size[minor] = BLOCK_SIZE;
++ _hardsect_size[minor] = __find_hardsect_size(&t->devices);
++ register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]);
++}
++
++/*
++ * requeue the deferred buffer_heads by calling
++ * generic_make_request.
++ */
++static void __flush_deferred_io(struct mapped_device *md)
++{
++ struct deferred_io *c, *n;
++
++ for (c = md->deferred, md->deferred = 0; c; c = n) {
++ n = c->next;
++ generic_make_request(c->rw, c->bh);
++ free_deferred(c);
++ }
++}
++
++/*
++ * make the device available for use, if was
++ * previously suspended rather than newly created
++ * then all queued io is flushed
++ */
++int dm_activate(struct mapped_device *md, struct dm_table *table)
++{
++ int r;
++
++ /* check that the mapping has at least been loaded. */
++ if (!table->num_targets)
++ return -EINVAL;
++
++ wl;
++
++ /* you must be deactivated first */
++ if (is_active(md)) {
++ wu;
++ return -EPERM;
++ }
++
++ __bind(md, table);
++
++ if ((r = open_devices(&md->map->devices))) {
++ wu;
++ return r;
++ }
++
++ set_bit(DM_ACTIVE, &md->state);
++ __flush_deferred_io(md);
++ wu;
++
++ return 0;
++}
++
++/*
++ * Deactivate the device, the device must not be
++ * opened by anyone.
++ */
++int dm_deactivate(struct mapped_device *md)
++{
++ rl;
++ if (md->use_count) {
++ ru;
++ return -EPERM;
++ }
++
++ fsync_dev(md->dev);
++
++ ru;
++
++ wl;
++ if (md->use_count) {
++ /* drat, somebody got in quick ... */
++ wu;
++ return -EPERM;
++ }
++
++ close_devices(&md->map->devices);
++ md->map = 0;
++ clear_bit(DM_ACTIVE, &md->state);
++ wu;
++
++ return 0;
++}
++
++/*
++ * We need to be able to change a mapping table
++ * under a mounted filesystem. for example we
++ * might want to move some data in the background.
++ * Before the table can be swapped with
++ * dm_bind_table, dm_suspend must be called to
++ * flush any in flight buffer_heads and ensure
++ * that any further io gets deferred.
++ */
++void dm_suspend(struct mapped_device *md)
++{
++ DECLARE_WAITQUEUE(wait, current);
++
++ wl;
++ if (!is_active(md)) {
++ wu;
++ return;
++ }
++
++ clear_bit(DM_ACTIVE, &md->state);
++ wu;
++
++ /* wait for all the pending io to flush */
++ add_wait_queue(&md->wait, &wait);
++ current->state = TASK_UNINTERRUPTIBLE;
++ do {
++ wl;
++ if (!atomic_read(&md->pending))
++ break;
++
++ wu;
++ schedule();
++
++ } while (1);
++
++ current->state = TASK_RUNNING;
++ remove_wait_queue(&md->wait, &wait);
++ close_devices(&md->map->devices);
++
++ md->map = 0;
++ wu;
++}
++
++/*
++ * Search for a device with a particular name.
++ */
++struct mapped_device *dm_get(const char *name)
++{
++ int i;
++ struct mapped_device *md = NULL;
++
++ rl;
++ for (i = 0; i < MAX_DEVICES; i++)
++ if (_devs[i] && !strcmp(_devs[i]->name, name)) {
++ md = _devs[i];
++ break;
++ }
++ ru;
++
++ return md;
++}
++
++struct block_device_operations dm_blk_dops = {
++ open: dm_blk_open,
++ release: dm_blk_close,
++ ioctl: dm_blk_ioctl
++};
++
++/*
++ * module hooks
++ */
++module_init(dm_init);
++module_exit(dm_exit);
++
++MODULE_DESCRIPTION("device-mapper driver");
++MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
++
++#ifdef MODULE_LICENSE
++MODULE_LICENSE("GPL");
++#endif
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/drivers/md/dm.h linux-dm/drivers/md/dm.h
+--- linux/drivers/md/dm.h Thu Jan 1 01:00:00 1970
++++ linux-dm/drivers/md/dm.h Wed Oct 31 18:09:12 2001
+@@ -0,0 +1,165 @@
++/*
++ * Copyright (C) 2001 Sistina Software
++ *
++ * This file is released under the GPL.
++ */
++
++#ifndef DM_INTERNAL_H
++#define DM_INTERNAL_H
++
++#include <linux/version.h>
++#include <linux/major.h>
++#include <linux/iobuf.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/compatmac.h>
++#include <linux/cache.h>
++#include <linux/devfs_fs_kernel.h>
++#include <linux/ctype.h>
++#include <linux/device-mapper.h>
++#include <linux/list.h>
++
++#define MAX_DEPTH 16
++#define NODE_SIZE L1_CACHE_BYTES
++#define KEYS_PER_NODE (NODE_SIZE / sizeof(offset_t))
++#define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
++
++enum {
++ DM_BOUND = 0, /* device has been bound to a table */
++ DM_ACTIVE, /* device is running */
++};
++
++
++/*
++ * list of devices that a metadevice uses
++ * and hence should open/close.
++ */
++struct dm_dev {
++ atomic_t count;
++ struct list_head list;
++
++ kdev_t dev;
++ struct block_device *bd;
++};
++
++/*
++ * io that had to be deferred while we were
++ * suspended
++ */
++struct deferred_io {
++ int rw;
++ struct buffer_head *bh;
++ struct deferred_io *next;
++};
++
++/*
++ * btree leaf, these do the actual mapping
++ */
++struct target {
++ struct target_type *type;
++ void *private;
++};
++
++/*
++ * the btree
++ */
++struct dm_table {
++ /* btree table */
++ int depth;
++ int counts[MAX_DEPTH]; /* in nodes */
++ offset_t *index[MAX_DEPTH];
++
++ int num_targets;
++ int num_allocated;
++ offset_t *highs;
++ struct target *targets;
++
++ /* a list of devices used by this table */
++ struct list_head devices;
++};
++
++/*
++ * the actual device struct
++ */
++struct mapped_device {
++ kdev_t dev;
++ char name[DM_NAME_LEN];
++
++ int use_count;
++ int state;
++
++ /* a list of io's that arrived while we were suspended */
++ atomic_t pending;
++ wait_queue_head_t wait;
++ struct deferred_io *deferred;
++
++ struct dm_table *map;
++
++ /* used by dm-fs.c */
++ devfs_handle_t devfs_entry;
++};
++
++extern struct block_device_operations dm_blk_dops;
++
++
++/* dm-target.c */
++int dm_target_init(void);
++struct target_type *dm_get_target_type(const char *name);
++void dm_put_target_type(struct target_type *t);
++
++/* dm.c */
++struct mapped_device *dm_find_by_minor(int minor);
++
++int dm_create(const char *name, int minor, struct mapped_device **result);
++int dm_destroy(struct mapped_device *md);
++
++int dm_activate(struct mapped_device *md, struct dm_table *t);
++int dm_deactivate(struct mapped_device *md);
++
++void dm_suspend(struct mapped_device *md);
++
++struct mapped_device *dm_get(const char *name);
++
++
++/* dm-table.c */
++int dm_table_create(struct dm_table **result);
++void dm_table_destroy(struct dm_table *t);
++
++int dm_table_add_target(struct dm_table *t, offset_t high,
++ struct target_type *type, void *private);
++int dm_table_complete(struct dm_table *t);
++
++
++/* dm-fs.c */
++int dm_fs_init(void);
++void dm_fs_exit(void);
++
++
++
++#define WARN(f, x...) printk(KERN_WARNING "device-mapper: " f "\n" , ## x)
++
++/*
++ * calculate the index of the child node of the
++ * n'th node k'th key.
++ */
++static inline int get_child(int n, int k)
++{
++ return (n * CHILDREN_PER_NODE) + k;
++}
++
++/*
++ * returns the n'th node of level l from table t.
++ */
++static inline offset_t *get_node(struct dm_table *t, int l, int n)
++{
++ return t->index[l] + (n * KEYS_PER_NODE);
++}
++
++static inline int is_active(struct mapped_device *md)
++{
++ return test_bit(DM_ACTIVE, &md->state);
++}
++
++#endif
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/include/linux/device-mapper.h linux-dm/include/linux/device-mapper.h
+--- linux/include/linux/device-mapper.h Thu Jan 1 01:00:00 1970
++++ linux-dm/include/linux/device-mapper.h Wed Oct 31 18:08:44 2001
+@@ -0,0 +1,60 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#ifndef DEVICE_MAPPER_H
++#define DEVICE_MAPPER_H
++
++#include <linux/major.h>
++
++/* FIXME: Use value from local range for now, for co-existence with LVM 1 */
++#define DM_BLK_MAJOR 124
++#define DM_NAME_LEN 64
++#define DM_MAX_TYPE_NAME 16
++
++
++struct dm_table;
++struct dm_dev;
++typedef unsigned int offset_t;
++
++typedef void (*dm_error_fn)(const char *message, void *private);
++
++/*
++ * constructor, destructor and map fn types
++ */
++typedef int (*dm_ctr_fn)(struct dm_table *t, offset_t b, offset_t l,
++ const char *args, void **context,
++ dm_error_fn err, void *e_private);
++
++typedef void (*dm_dtr_fn)(struct dm_table *t, void *c);
++typedef int (*dm_map_fn)(struct buffer_head *bh, int rw, void *context);
++typedef int (*dm_err_fn)(struct buffer_head *bh, int rw, void *context);
++
++
++/*
++ * Contructors should call this to make sure any
++ * destination devices are handled correctly
++ * (ie. opened/closed).
++ */
++int dm_table_get_device(struct dm_table *t, const char *path,
++ struct dm_dev **result);
++void dm_table_put_device(struct dm_table *table, struct dm_dev *d);
++
++/*
++ * information about a target type
++ */
++struct target_type {
++ const char *name;
++ struct module *module;
++ dm_ctr_fn ctr;
++ dm_dtr_fn dtr;
++ dm_map_fn map;
++ dm_err_fn err;
++};
++
++int dm_register_target(struct target_type *t);
++int dm_unregister_target(struct target_type *t);
++
++#endif /* DEVICE_MAPPER_H */
+diff -ruN -X /home/thornber/packages/2.4/dontdiff linux/include/linux/dm-ioctl.h linux-dm/include/linux/dm-ioctl.h
+--- linux/include/linux/dm-ioctl.h Thu Jan 1 01:00:00 1970
++++ linux-dm/include/linux/dm-ioctl.h Wed Oct 31 18:08:40 2001
+@@ -0,0 +1,57 @@
++/*
++ * Copyright (C) 2001 Sistina Software (UK) Limited.
++ *
++ * This file is released under the GPL.
++ */
++
++#ifndef _DM_IOCTL_H
++#define _DM_IOCTL_H
++
++// FIXME: just for now to steal LVM_CHR_MAJOR
++#include <linux/lvm.h>
++
++#include "device-mapper.h"
++
++/*
++ * Implements a traditional ioctl interface to the
++ * device mapper. Yuck.
++ */
++
++struct dm_target_spec {
++ int32_t status; /* used when reading from kernel only */
++ uint64_t sector_start;
++ uint64_t length;
++
++ char target_type[DM_MAX_TYPE_NAME];
++
++ uint32_t next; /* offset in bytes to next target_spec */
++
++ /*
++ * Parameter string starts immediately
++ * after this object. Be careful to add
++ * padding after string to ensure correct
++ * alignment of subsequent dm_target_spec.
++ */
++};
++
++struct dm_ioctl {
++ uint32_t data_size; /* the size of this structure */
++ char name[DM_NAME_LEN];
++ int suspend;
++ int open_count; /* out field */
++ int minor;
++
++ int target_count;
++};
++
++/* FIXME: find own # */
++#define DM_IOCTL 0xfd
++#define DM_CHAR_MAJOR LVM_CHAR_MAJOR
++
++#define DM_CREATE _IOW(DM_IOCTL, 0x00, struct dm_ioctl)
++#define DM_REMOVE _IOW(DM_IOCTL, 0x01, struct dm_ioctl)
++#define DM_SUSPEND _IOW(DM_IOCTL, 0x02, struct dm_ioctl)
++#define DM_RELOAD _IOWR(DM_IOCTL, 0x03, struct dm_ioctl)
++#define DM_INFO _IOWR(DM_IOCTL, 0x04, struct dm_ioctl)
++
++#endif
--- /dev/null
+--- linux/include/linux/device-mapper.h Thu Nov 1 12:25:55 2001
++++ linux-dm/include/linux/device-mapper.h Thu Nov 1 11:46:57 2001
+@@ -8,12 +8,14 @@
+ #define DEVICE_MAPPER_H
+
+ #include <linux/major.h>
++#include <linux/fs.h>
+
+ /* FIXME: Use value from local range for now, for co-existence with LVM 1 */
+ #define DM_BLK_MAJOR 124
+ #define DM_NAME_LEN 64
+ #define DM_MAX_TYPE_NAME 16
+
++#ifdef __KERNEL__
+
+ struct dm_table;
+ struct dm_dev;
+@@ -56,5 +58,7 @@
+
+ int dm_register_target(struct target_type *t);
+ int dm_unregister_target(struct target_type *t);
++
++#endif /* __KERNEL__ */
+
+ #endif /* DEVICE_MAPPER_H */
+--- linux/include/linux/dm-ioctl.h Thu Nov 1 12:25:55 2001
++++ linux-dm/include/linux/dm-ioctl.h Thu Nov 1 11:51:36 2001
+@@ -7,9 +7,6 @@
+ #ifndef _DM_IOCTL_H
+ #define _DM_IOCTL_H
+
+-// FIXME: just for now to steal LVM_CHR_MAJOR
+-#include <linux/lvm.h>
+-
+ #include "device-mapper.h"
+
+ /*
+@@ -19,12 +16,12 @@
+
+ struct dm_target_spec {
+ int32_t status; /* used when reading from kernel only */
+- uint64_t sector_start;
+- uint64_t length;
++ unsigned long long sector_start;
++ unsigned long long length;
+
+ char target_type[DM_MAX_TYPE_NAME];
+
+- uint32_t next; /* offset in bytes to next target_spec */
++ unsigned long next; /* offset in bytes to next target_spec */
+
+ /*
+ * Parameter string starts immediately
+@@ -35,7 +32,7 @@
+ };
+
+ struct dm_ioctl {
+- uint32_t data_size; /* the size of this structure */
++ unsigned long data_size; /* the size of this structure */
+ char name[DM_NAME_LEN];
+ int suspend;
+ int open_count; /* out field */
+@@ -44,9 +41,9 @@
+ int target_count;
+ };
+
+-/* FIXME: find own # */
++/* FIXME: find own numbers, 109 is pinched from LVM */
+ #define DM_IOCTL 0xfd
+-#define DM_CHAR_MAJOR LVM_CHAR_MAJOR
++#define DM_CHAR_MAJOR 109
+
+ #define DM_CREATE _IOW(DM_IOCTL, 0x00, struct dm_ioctl)
+ #define DM_REMOVE _IOW(DM_IOCTL, 0x01, struct dm_ioctl)
--- /dev/null
+--- linux-last/drivers/md/dm-ioctl.c Thu Nov 1 13:20:44 2001
++++ linux/drivers/md/dm-ioctl.c Thu Nov 1 13:30:58 2001
+@@ -259,21 +259,32 @@
+ ioctl: ctl_ioctl,
+ };
+
++
++static devfs_handle_t _ctl_handle;
++
+ static int dm_ioctl_init(void)
+ {
+ int r;
+
++
+ if ((r = devfs_register_chrdev(DM_CHAR_MAJOR, "device-mapper",
+ &_ctl_fops)) < 0) {
+ WARN("devfs_register_chrdev failed for dm control dev");
+ return -EIO;
+ }
+
++ _ctl_handle = devfs_register(0 , "device-mapper/control", 0,
++ DM_CHAR_MAJOR, 0,
++ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
++ &_ctl_fops, NULL);
++
+ return r;
+ }
+
+ static void dm_ioctl_exit(void)
+ {
++ // FIXME: remove control device
++
+ if (devfs_unregister_chrdev(DM_CHAR_MAJOR, "device-mapper") < 0)
+ WARN("devfs_unregister_chrdev failed for dm control device");
+ }
--- /dev/null
+--- linux-last/drivers/md/dm-ioctl.c Thu Nov 1 13:34:07 2001
++++ linux/drivers/md/dm-ioctl.c Thu Nov 1 13:47:08 2001
+@@ -37,7 +37,7 @@
+ */
+ static int valid_str(char *str, void *end)
+ {
+- while ((str != end) && *str)
++ while ((str < end) && *str)
+ str++;
+
+ return *str ? 0 : 1;
+@@ -46,7 +46,7 @@
+ static int first_target(struct dm_ioctl *a, void *end,
+ struct dm_target_spec **spec, char **params)
+ {
+- *spec = (struct dm_target_spec *) ((unsigned char *) a) + a->data_size;
++ *spec = (struct dm_target_spec *) (a + 1);
+ *params = (char *) (*spec + 1);
+
+ return valid_str(*params, end);
--- /dev/null
+--- linux-last/drivers/md/dm-linear.c Thu Nov 1 13:20:44 2001
++++ linux/drivers/md/dm-linear.c Thu Nov 1 15:01:28 2001
+@@ -31,7 +31,9 @@
+ dm_error_fn err, void *e_private)
+ {
+ struct linear_c *lc;
+- unsigned int start;
++ unsigned long start; /* FIXME: should be unsigned long long,
++ need to fix sscanf */
++
+ char path[256]; /* FIXME: magic */
+ int r = -EINVAL;
+
+@@ -40,7 +42,7 @@
+ return -ENOMEM;
+ }
+
+- if (sscanf("%s %u", path, &start) != 2) {
++ if (sscanf(args, "%s %lu", path, &start) != 2) {
+ err("target params should be of the form <dev_path> <sector>",
+ e_private);
+ goto bad;
--- /dev/null
+--- linux-last/drivers/md/dm-ioctl.c Thu Nov 1 14:47:50 2001
++++ linux/drivers/md/dm-ioctl.c Thu Nov 1 15:14:02 2001
+@@ -64,7 +64,7 @@
+
+ void err_fn(const char *message, void *private)
+ {
+- printk(KERN_ERR "%s", message);
++ printk(KERN_WARNING "%s\n", message);
+ }
+
+ /*
--- /dev/null
+--- linux-last/drivers/md/dm.c Thu Nov 1 13:20:46 2001
++++ linux/drivers/md/dm.c Thu Nov 1 18:09:18 2001
+@@ -127,7 +127,7 @@
+ wl;
+ md = _devs[minor];
+
+- if (!md || !is_active(md)) {
++ if (!md) {
+ wu;
+ return -ENXIO;
+ }
+@@ -297,7 +297,7 @@
+ return -ENOMEM;
+
+ wl;
+- if (test_bit(DM_ACTIVE, &md->state)) {
++ if (!md->suspended) {
+ wu;
+ return 0;
+ }
+@@ -388,11 +388,14 @@
+ rl;
+ md = _devs[minor];
+
+- if (!md || !md->map)
++ if (!md)
+ goto bad;
+
+- /* if we're suspended we have to queue this io for later */
+- if (!test_bit(DM_ACTIVE, &md->state)) {
++ /*
++ * If we're suspended we have to queue
++ * this io for later.
++ */
++ if (md->suspended) {
+ ru;
+ r = queue_io(md, bh, rw);
+
+@@ -440,8 +443,7 @@
+ struct target *t;
+
+ rl;
+- if ((minor >= MAX_DEVICES) || !(md = _devs[minor]) ||
+- !test_bit(DM_ACTIVE, &md->state)) {
++ if ((minor >= MAX_DEVICES) || !(md = _devs[minor]) || md->suspended) {
+ r = -ENXIO;
+ goto out;
+ }
+@@ -550,7 +552,7 @@
+
+ md->dev = MKDEV(DM_BLK_MAJOR, minor);
+ md->name[0] = '\0';
+- md->state = 0;
++ md->suspended = 0;
+
+ init_waitqueue_head(&md->wait);
+
+@@ -633,18 +635,6 @@
+ return r;
+ }
+
+-
+-struct mapped_device *dm_find_by_minor(int minor)
+-{
+- struct mapped_device *md;
+-
+- rl;
+- md = _devs[minor];
+- ru;
+-
+- return md;
+-}
+-
+ static int register_device(struct mapped_device *md)
+ {
+ md->devfs_entry =
+@@ -666,9 +656,58 @@
+ }
+
+ /*
++ * the hardsect size for a mapped device is the
++ * smallest hard sect size from the devices it
++ * maps onto.
++ */
++static int __find_hardsect_size(struct list_head *devices)
++{
++ int result = INT_MAX, size;
++ struct list_head *tmp;
++
++ for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
++ size = get_hardsect_size(dd->dev);
++ if (size < result)
++ result = size;
++ }
++ return result;
++}
++
++/*
++ * Bind a table to the device.
++ */
++int __bind(struct mapped_device *md, struct dm_table *t)
++{
++ int minor = MINOR(md->dev);
++
++ if (!t->num_targets)
++ return -EINVAL;
++
++ md->map = t;
++
++ /* in k */
++ _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1;
++
++ _blksize_size[minor] = BLOCK_SIZE;
++ _hardsect_size[minor] = __find_hardsect_size(&t->devices);
++ register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]);
++
++ return open_devices(&md->map->devices);
++}
++
++void __unbind(struct mapped_device *md)
++{
++ close_devices(&md->map->devices);
++ md->map = NULL;
++}
++
++/*
+ * constructor for a new device
+ */
+-int dm_create(const char *name, int minor, struct mapped_device **result)
++int dm_create(const char *name, int minor,
++ struct dm_table *table,
++ struct mapped_device **result)
+ {
+ int r;
+ struct mapped_device *md;
+@@ -687,6 +726,12 @@
+ free_dev(md);
+ return r;
+ }
++
++ if ((r = __bind(md, table))) {
++ wu;
++ free_dev(md);
++ return r;
++ }
+ wu;
+
+ *result = md;
+@@ -696,12 +741,22 @@
+ /*
+ * destructor for the device. md->map is
+ * deliberately not destroyed, dm-fs/dm-ioctl
+- * should manage table objects.
++ * should manage table objects. You cannot
++ * destroy a suspended device.
+ */
+ int dm_destroy(struct mapped_device *md)
+ {
+ int minor, r;
+
++ rl;
++ if (md->suspended || md->use_count) {
++ ru;
++ return -EPERM;
++ }
++
++ fsync_dev(md->dev);
++ ru;
++
+ wl;
+ if (md->use_count) {
+ wu;
+@@ -715,48 +770,15 @@
+
+ minor = MINOR(md->dev);
+ _devs[minor] = 0;
++ __unbind(md);
++
+ wu;
+
+- kfree(md);
++ free_dev(md);
+
+ return 0;
+ }
+
+-/*
+- * the hardsect size for a mapped device is the
+- * smallest hard sect size from the devices it
+- * maps onto.
+- */
+-static int __find_hardsect_size(struct list_head *devices)
+-{
+- int result = INT_MAX, size;
+- struct list_head *tmp;
+-
+- for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
+- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+- size = get_hardsect_size(dd->dev);
+- if (size < result)
+- result = size;
+- }
+- return result;
+-}
+-
+-/*
+- * Bind a table to the device.
+- */
+-void __bind(struct mapped_device *md, struct dm_table *t)
+-{
+- int minor = MINOR(md->dev);
+-
+- md->map = t;
+-
+- /* in k */
+- _block_size[minor] = (t->highs[t->num_targets - 1] + 1) >> 1;
+-
+- _blksize_size[minor] = BLOCK_SIZE;
+- _hardsect_size[minor] = __find_hardsect_size(&t->devices);
+- register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]);
+-}
+
+ /*
+ * requeue the deferred buffer_heads by calling
+@@ -774,70 +796,32 @@
+ }
+
+ /*
+- * make the device available for use, if was
+- * previously suspended rather than newly created
+- * then all queued io is flushed
++ * Swap in a new table.
+ */
+-int dm_activate(struct mapped_device *md, struct dm_table *table)
++int dm_swap_table(struct mapped_device *md, struct dm_table *table)
+ {
+ int r;
+
+- /* check that the mapping has at least been loaded. */
+- if (!table->num_targets)
+- return -EINVAL;
+-
+ wl;
+
+- /* you must be deactivated first */
+- if (is_active(md)) {
++ /* device must be suspended */
++ if (!md->suspended) {
+ wu;
+ return -EPERM;
+ }
+
+- __bind(md, table);
++ __unbind(md);
+
+- if ((r = open_devices(&md->map->devices))) {
++ if ((r = __bind(md, table))) {
+ wu;
+ return r;
+ }
+
+- set_bit(DM_ACTIVE, &md->state);
+- __flush_deferred_io(md);
+ wu;
+
+ return 0;
+ }
+
+-/*
+- * Deactivate the device, the device must not be
+- * opened by anyone.
+- */
+-int dm_deactivate(struct mapped_device *md)
+-{
+- rl;
+- if (md->use_count) {
+- ru;
+- return -EPERM;
+- }
+-
+- fsync_dev(md->dev);
+-
+- ru;
+-
+- wl;
+- if (md->use_count) {
+- /* drat, somebody got in quick ... */
+- wu;
+- return -EPERM;
+- }
+-
+- close_devices(&md->map->devices);
+- md->map = 0;
+- clear_bit(DM_ACTIVE, &md->state);
+- wu;
+-
+- return 0;
+-}
+
+ /*
+ * We need to be able to change a mapping table
+@@ -848,17 +832,17 @@
+ * flush any in flight buffer_heads and ensure
+ * that any further io gets deferred.
+ */
+-void dm_suspend(struct mapped_device *md)
++int dm_suspend(struct mapped_device *md)
+ {
+ DECLARE_WAITQUEUE(wait, current);
+
+ wl;
+- if (!is_active(md)) {
++ if (md->suspended) {
+ wu;
+- return;
++ return -EINVAL;
+ }
+
+- clear_bit(DM_ACTIVE, &md->state);
++ md->suspended = 1;
+ wu;
+
+ /* wait for all the pending io to flush */
+@@ -876,10 +860,24 @@
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(&md->wait, &wait);
+- close_devices(&md->map->devices);
++ wu;
++
++ return 0;
++}
+
+- md->map = 0;
++int dm_resume(struct mapped_device *md)
++{
++ wl;
++ if (!md->suspended) {
++ wu;
++ return -EINVAL;
++ }
++
++ md->suspended = 0;
++ __flush_deferred_io(md);
+ wu;
++
++ return 0;
+ }
+
+ /*
+--- linux-last/drivers/md/dm.h Thu Nov 1 13:20:46 2001
++++ linux/drivers/md/dm.h Thu Nov 1 17:45:14 2001
+@@ -26,11 +26,6 @@
+ #define KEYS_PER_NODE (NODE_SIZE / sizeof(offset_t))
+ #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1)
+
+-enum {
+- DM_BOUND = 0, /* device has been bound to a table */
+- DM_ACTIVE, /* device is running */
+-};
+-
+
+ /*
+ * list of devices that a metadevice uses
+@@ -88,7 +83,7 @@
+ char name[DM_NAME_LEN];
+
+ int use_count;
+- int state;
++ int suspended;
+
+ /* a list of io's that arrived while we were suspended */
+ atomic_t pending;
+@@ -110,17 +105,27 @@
+ void dm_put_target_type(struct target_type *t);
+
+ /* dm.c */
+-struct mapped_device *dm_find_by_minor(int minor);
++struct mapped_device *dm_get(const char *name);
++
++int dm_create(const char *name, int minor,
++ struct dm_table *table,
++ struct mapped_device **result);
+
+-int dm_create(const char *name, int minor, struct mapped_device **result);
+ int dm_destroy(struct mapped_device *md);
+
+-int dm_activate(struct mapped_device *md, struct dm_table *t);
+-int dm_deactivate(struct mapped_device *md);
+
+-void dm_suspend(struct mapped_device *md);
++/*
++ * The device must be suspended before calling
++ * this method.
++ */
++int dm_swap_table(struct mapped_device *md, struct dm_table *t);
++
++/*
++ * People can still use a suspended device.
++ */
++int dm_suspend(struct mapped_device *md);
++int dm_resume(struct mapped_device *md);
+
+-struct mapped_device *dm_get(const char *name);
+
+
+ /* dm-table.c */
+@@ -155,11 +160,6 @@
+ static inline offset_t *get_node(struct dm_table *t, int l, int n)
+ {
+ return t->index[l] + (n * KEYS_PER_NODE);
+-}
+-
+-static inline int is_active(struct mapped_device *md)
+-{
+- return test_bit(DM_ACTIVE, &md->state);
+ }
+
+ #endif
+--- linux-last/drivers/md/dm-ioctl.c Thu Nov 1 14:47:50 2001
++++ linux/drivers/md/dm-ioctl.c Thu Nov 1 17:53:16 2001
+@@ -37,7 +37,7 @@
+ */
+ static int valid_str(char *str, void *end)
+ {
+- while ((str < end) && *str)
++ while (((void *) str < end) && *str)
+ str++;
+
+ return *str ? 0 : 1;
+@@ -140,52 +140,41 @@
+ struct mapped_device *md;
+ struct dm_table *t;
+
+- if ((r = dm_create(param->name, param->minor, &md)))
++ if ((r = dm_table_create(&t)))
+ return r;
+
+- if ((r = dm_table_create(&t))) {
+- dm_destroy(md);
+- return r;
+- }
+-
+ if ((r = populate_table(t, param))) {
+- dm_destroy(md);
+ dm_table_destroy(t);
+ return r;
+ }
+
+- if ((r = dm_activate(md, t))) {
+- dm_destroy(md);
+- dm_table_destroy(t);
++ if ((r = dm_create(param->name, param->minor, t, &md)))
+ return r;
+- }
+
+ return 0;
+ }
+
+ static int remove(struct dm_ioctl *param)
+ {
+- int r;
+ struct mapped_device *md = dm_get(param->name);
+
+ if (!md)
+- return -ENODEV;
+-
+- if ((r = dm_deactivate(md)))
+- return r;
++ return -ENXIO;
+
+- if (md->map)
+- dm_table_destroy(md->map);
+-
+- if (!dm_destroy(md))
+- WARN("dm_ctl_ioctl: unable to remove device");
+-
+- return 0;
++ return dm_destroy(md);
+ }
+
+ static int suspend(struct dm_ioctl *param)
+ {
+- return -EINVAL;
++ struct mapped_device *md = dm_get(param->name);
++
++ if (!md)
++ return -ENXIO;
++
++ if (param->suspend)
++ return dm_suspend(md);
++
++ return dm_resume(md);
+ }
+
+ static int reload(struct dm_ioctl *param)
--- /dev/null
+--- linux-last/drivers/md/dm.h Thu Nov 1 18:13:33 2001
++++ linux/drivers/md/dm.h Thu Nov 1 18:19:22 2001
+@@ -137,11 +137,6 @@
+ int dm_table_complete(struct dm_table *t);
+
+
+-/* dm-fs.c */
+-int dm_fs_init(void);
+-void dm_fs_exit(void);
+-
+-
+
+ #define WARN(f, x...) printk(KERN_WARNING "device-mapper: " f "\n" , ## x)
+
--- /dev/null
+--- linux-last/drivers/md/dm.c Fri Nov 2 13:07:57 2001
++++ linux/drivers/md/dm.c Fri Nov 2 13:05:39 2001
+@@ -702,6 +702,21 @@
+ md->map = NULL;
+ }
+
++static int check_name(const char *name)
++{
++ if (strchr(name, '/')) {
++ WARN("invalid device name");
++ return 0;
++ }
++
++ if (dm_get(name)) {
++ WARN("device name already in use");
++ return 0;
++ }
++
++ return 1;
++}
++
+ /*
+ * constructor for a new device
+ */
+@@ -719,6 +734,12 @@
+ return -ENXIO;
+
+ wl;
++ if (!check_name(name)) {
++ wu;
++ free_dev(md);
++ return -EINVAL;
++ }
++
+ strcpy(md->name, name);
+ _devs[minor] = md;
+ if ((r = register_device(md))) {
+--- linux-last/drivers/md/dm.c Fri Nov 2 14:14:27 2001
++++ linux/drivers/md/dm.c Fri Nov 2 15:36:37 2001
+@@ -702,6 +702,18 @@
+ md->map = NULL;
+ }
+
++
++static struct mapped_device *__get_by_name(const char *name)
++{
++ int i;
++
++ for (i = 0; i < MAX_DEVICES; i++)
++ if (_devs[i] && !strcmp(_devs[i]->name, name))
++ return _devs[i];
++
++ return NULL;
++}
++
+ static int check_name(const char *name)
+ {
+ if (strchr(name, '/')) {
+@@ -709,7 +721,7 @@
+ return 0;
+ }
+
+- if (dm_get(name)) {
++ if (__get_by_name(name)) {
+ WARN("device name already in use");
+ return 0;
+ }
+@@ -906,15 +918,10 @@
+ */
+ struct mapped_device *dm_get(const char *name)
+ {
+- int i;
+- struct mapped_device *md = NULL;
++ struct mapped_device *md;
+
+ rl;
+- for (i = 0; i < MAX_DEVICES; i++)
+- if (_devs[i] && !strcmp(_devs[i]->name, name)) {
+- md = _devs[i];
+- break;
+- }
++ md = __get_by_name(name);
+ ru;
+
+ return md;
--- /dev/null
+--- linux-last/drivers/md/dm.c Fri Nov 2 13:20:42 2001
++++ linux/drivers/md/dm.c Fri Nov 2 14:03:15 2001
+@@ -609,7 +609,7 @@
+ {
+ struct list_head *tmp;
+
+- for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ list_for_each(tmp, devices) {
+ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ close_dev(dd);
+ }
+@@ -623,7 +623,7 @@
+ int r = 0;
+ struct list_head *tmp;
+
+- for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ list_for_each(tmp, devices) {
+ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ if ((r = open_dev(dd)))
+ goto bad;
+@@ -665,7 +665,7 @@
+ int result = INT_MAX, size;
+ struct list_head *tmp;
+
+- for (tmp = devices->next; tmp != devices; tmp = tmp->next) {
++ list_for_each(tmp, devices) {
+ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ size = get_hardsect_size(dd->dev);
+ if (size < result)
+--- linux-last/drivers/md/dm-table.c Fri Nov 2 13:07:57 2001
++++ linux/drivers/md/dm-table.c Fri Nov 2 14:01:36 2001
+@@ -137,10 +137,12 @@
+ if (t->depth >= 2)
+ vfree(t->index[t->depth - 2]);
+
+-
+ /* free the targets */
+ for (i = 0; i < t->num_targets; i++) {
+ struct target *tgt = &t->targets[i];
++
++ dm_put_target_type(t->targets[i].type);
++
+ if (tgt->type->dtr)
+ tgt->type->dtr(t, tgt->private);
+ }
+@@ -211,8 +213,7 @@
+ {
+ struct list_head *tmp;
+
+- for (tmp = l->next; tmp != l; tmp = tmp->next) {
+-
++ list_for_each(tmp, l) {
+ struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+ if (dd->dev == dev)
+ return dd;
+--- linux-last/drivers/md/dm-target.c Fri Nov 2 13:07:57 2001
++++ linux/drivers/md/dm-target.c Fri Nov 2 13:52:32 2001
+@@ -24,9 +24,9 @@
+ struct list_head *tmp;
+ struct tt_internal *ti;
+
+- for(tmp = _targets.next; tmp != &_targets; tmp = tmp->next) {
+-
++ list_for_each(tmp, &_targets) {
+ ti = list_entry(tmp, struct tt_internal, list);
++
+ if (!strcmp(name, ti->tt.name))
+ return ti;
+ }
+@@ -72,7 +72,7 @@
+ ti = get_target_type(name);
+ }
+
+- return ti ? &ti->tt : 0;
++ return ti ? &ti->tt : NULL;
+ }
+
+ void dm_put_target_type(struct target_type *t)
+--- linux-last/drivers/md/dm-linear.c Fri Nov 2 13:07:57 2001
++++ linux/drivers/md/dm-linear.c Fri Nov 2 14:11:26 2001
+@@ -92,15 +92,15 @@
+ int r;
+
+ if ((r = dm_register_target(&linear_target)) < 0)
+- printk(KERN_ERR "Device mapper: Linear: register failed\n");
++ WARN("linear target register failed");
+
+ return r;
+ }
+
+ static void __exit linear_exit(void)
+ {
+- if (dm_unregister_target(&linear_target) < 0)
+- printk(KERN_ERR "Device mapper: Linear: unregister failed\n");
++ if (dm_unregister_target(&linear_target))
++ WARN("linear target unregister failed");
+ }
+
+ module_init(linear_init);
--- /dev/null
+--- linux-last/include/linux/device-mapper.h Fri Nov 2 13:07:57 2001
++++ linux/include/linux/device-mapper.h Fri Nov 2 14:20:50 2001
+@@ -57,7 +57,7 @@
+ };
+
+ int dm_register_target(struct target_type *t);
+-int dm_unregister_target(struct target_type *t);
++int dm_unregister_target(const char *name);
+
+ #endif /* __KERNEL__ */
+
+--- linux-last/drivers/md/dm-target.c Fri Nov 2 14:14:27 2001
++++ linux/drivers/md/dm-target.c Fri Nov 2 14:20:11 2001
+@@ -118,20 +118,26 @@
+ return rv;
+ }
+
+-int dm_unregister_target(struct target_type *t)
++int dm_unregister_target(const char *name)
+ {
+- struct tt_internal *ti = (struct tt_internal *) t;
+- int rv = -ETXTBSY;
++ struct tt_internal *ti;
+
+ write_lock(&_lock);
+- if (ti->use == 0) {
+- list_del(&ti->list);
+- kfree(ti);
+- rv = 0;
++ if (!(ti = __find_target_type(name))) {
++ write_unlock(&_lock);
++ return -EINVAL;
++ }
++
++ if (ti->use) {
++ write_unlock(&_lock);
++ return -ETXTBSY;
+ }
+- write_unlock(&_lock);
+
+- return rv;
++ list_del(&ti->list);
++ kfree(ti);
++
++ write_unlock(&_lock);
++ return 0;
+ }
+
+ /*
+--- linux-last/drivers/md/dm-linear.c Fri Nov 2 14:14:27 2001
++++ linux/drivers/md/dm-linear.c Fri Nov 2 14:17:12 2001
+@@ -99,7 +99,7 @@
+
+ static void __exit linear_exit(void)
+ {
+- if (dm_unregister_target(&linear_target))
++ if (dm_unregister_target(linear_target.name))
+ WARN("linear target unregister failed");
+ }
+
--- /dev/null
+--- linux-last/drivers/md/dm-ioctl.c Tue Nov 6 14:44:22 2001
++++ linux/drivers/md/dm-ioctl.c Tue Nov 6 14:50:58 2001
+@@ -179,7 +179,30 @@
+
+ static int reload(struct dm_ioctl *param)
+ {
+- return -EINVAL;
++ int r;
++ struct mapped_device *md = dm_get(param->name);
++ struct dm_table *t, *old;
++
++ if (!md)
++ return -ENXIO;
++
++ if ((r = dm_table_create(&t)))
++ return r;
++
++ if ((r = populate_table(t, param))) {
++ dm_table_destroy(t);
++ return r;
++ }
++
++ old = md->map;
++
++ if ((r = dm_swap_table(md, t))) {
++ dm_table_destroy(t);
++ return r;
++ }
++
++ dm_table_destroy(old);
++ return 0;
+ }
+
+ static int info(struct dm_ioctl *param)
--- /dev/null
+--- linux-last//drivers/md/dm.c Tue Nov 6 14:44:22 2001
++++ linux/drivers/md/dm.c Tue Nov 6 16:22:10 2001
+@@ -395,7 +395,7 @@
+ * If we're suspended we have to queue
+ * this io for later.
+ */
+- if (md->suspended) {
++ while (md->suspended) {
+ ru;
+ r = queue_io(md, bh, rw);
+
+@@ -405,7 +405,13 @@
+ else if (r > 0)
+ return 0; /* deferred successfully */
+
+- rl; /* FIXME: there's still a race here */
++ /*
++ * We're in a while loop, because
++ * someone could suspend before we
++ * get to the following read
++ * lock
++ */
++ rl;
+ }
+
+ if (!__map_buffer(md, bh, rw, __find_node(md->map, bh)))
+@@ -817,14 +823,15 @@
+ * requeue the deferred buffer_heads by calling
+ * generic_make_request.
+ */
+-static void __flush_deferred_io(struct mapped_device *md)
++static void flush_deferred_io(struct deferred_io *c)
+ {
+- struct deferred_io *c, *n;
++ struct deferred_io *n;
+
+- for (c = md->deferred, md->deferred = 0; c; c = n) {
++ while (c) {
+ n = c->next;
+ generic_make_request(c->rw, c->bh);
+ free_deferred(c);
++ c = n;
+ }
+ }
+
+@@ -900,6 +907,8 @@
+
+ int dm_resume(struct mapped_device *md)
+ {
++ struct deferred_io *def;
++
+ wl;
+ if (!md->suspended) {
+ wu;
+@@ -907,8 +916,11 @@
+ }
+
+ md->suspended = 0;
+- __flush_deferred_io(md);
++ def = md->deferred;
++ md->deferred = NULL;
+ wu;
++
++ flush_deferred_io(def);
+
+ return 0;
+ }
+--- linux-last//drivers/md/dm-ioctl.c Tue Nov 6 15:00:39 2001
++++ linux/drivers/md/dm-ioctl.c Tue Nov 6 15:01:11 2001
+@@ -171,10 +171,7 @@
+ if (!md)
+ return -ENXIO;
+
+- if (param->suspend)
+- return dm_suspend(md);
+-
+- return dm_resume(md);
++ return param->suspend ? dm_suspend(md) : dm_resume(md);
+ }
+
+ static int reload(struct dm_ioctl *param)
--- /dev/null
+--- linux-last/drivers/md/dm.c Tue Nov 6 16:29:32 2001
++++ linux/drivers/md/dm.c Wed Nov 7 09:01:43 2001
+@@ -397,6 +397,10 @@
+ */
+ while (md->suspended) {
+ ru;
++
++ if (rw == READA)
++ goto bad_no_lock;
++
+ r = queue_io(md, bh, rw);
+
+ if (r < 0)
--- /dev/null
+--- linux-last/include/linux/dm-ioctl.h Tue Nov 6 14:44:22 2001
++++ linux/include/linux/dm-ioctl.h Wed Nov 7 09:36:30 2001
+@@ -34,16 +34,18 @@
+ struct dm_ioctl {
+ unsigned long data_size; /* the size of this structure */
+ char name[DM_NAME_LEN];
+- int suspend;
+- int open_count; /* out field */
+- int minor;
+
+- int target_count;
++ int exists; /* out */
++ int suspend; /* in/out */
++ int open_count; /* out */
++ int minor; /* in/out */
++
++ int target_count; /* in/out */
+ };
+
+ /* FIXME: find own numbers, 109 is pinched from LVM */
+ #define DM_IOCTL 0xfd
+-#define DM_CHAR_MAJOR 109
++#define DM_CHAR_MAJOR 124
+
+ #define DM_CREATE _IOW(DM_IOCTL, 0x00, struct dm_ioctl)
+ #define DM_REMOVE _IOW(DM_IOCTL, 0x01, struct dm_ioctl)
+--- linux-last/drivers/md/dm-ioctl.c Tue Nov 6 16:29:32 2001
++++ linux/drivers/md/dm-ioctl.c Wed Nov 7 10:13:23 2001
+@@ -202,9 +202,26 @@
+ return 0;
+ }
+
+-static int info(struct dm_ioctl *param)
++static int info(struct dm_ioctl *param, struct dm_ioctl *user)
+ {
+- return -EINVAL;
++ struct mapped_device *md = dm_get(param->name);
++
++ if (!md) {
++ param->exists = 0;
++ goto out;
++ }
++
++ param->exists = 1;
++ param->suspend = md->suspended;
++ param->open_count = md->use_count;
++ param->minor = MINOR(md->dev);
++ param->target_count = md->map->num_targets;
++
++ out:
++ if (copy_to_user(user, param, sizeof(*param)))
++ return -EFAULT;
++
++ return 0;
+ }
+
+ static int ctl_open(struct inode *inode, struct file *file)
+@@ -251,7 +268,8 @@
+ break;
+
+ case DM_INFO:
+- r = info(p);
++ r = info(p, (struct dm_ioctl *) a);
++ break;
+
+ default:
+ WARN("dm_ctl_ioctl: unknown command 0x%x\n", command);
--- /dev/null
+--- linux-last/include/linux/device-mapper.h Tue Nov 13 14:33:39 2001
++++ linux/include/linux/device-mapper.h Tue Nov 13 14:31:12 2001
+@@ -14,6 +14,7 @@
+ #define DM_BLK_MAJOR 124
+ #define DM_NAME_LEN 64
+ #define DM_MAX_TYPE_NAME 16
++#define DM_DIR "device-mapper"
+
+ #ifdef __KERNEL__
+
+--- linux-last/drivers/md/dm.c Tue Nov 13 14:33:39 2001
++++ linux/drivers/md/dm.c Tue Nov 13 14:32:12 2001
+@@ -22,7 +22,7 @@
+ #define MAX_DEVICES 64
+ #define DEFAULT_READ_AHEAD 64
+
+-const char *_name = "device-mapper";
++const char *_name = DEVICE_NAME;
+ int _version[3] = {0, 1, 0};
+
+ struct io_hook {
+@@ -49,7 +49,6 @@
+ static int _blksize_size[MAX_DEVICES];
+ static int _hardsect_size[MAX_DEVICES];
+
+-const char *_fs_dir = "device-mapper";
+ static devfs_handle_t _dev_dir;
+
+ static int request(request_queue_t *q, int rw, struct buffer_head *bh);
+@@ -88,7 +87,7 @@
+
+ blk_queue_make_request(BLK_DEFAULT_QUEUE(MAJOR_NR), request);
+
+- _dev_dir = devfs_mk_dir(0, _fs_dir, NULL);
++ _dev_dir = devfs_mk_dir(0, DM_DIR, NULL);
+
+ printk(KERN_INFO "%s %d.%d.%d initialised\n", _name,
+ _version[0], _version[1], _version[2]);
+--- linux-last/drivers/md/dm-ioctl.c Tue Nov 13 14:33:39 2001
++++ linux/drivers/md/dm-ioctl.c Tue Nov 13 14:31:12 2001
+@@ -293,14 +293,13 @@
+ {
+ int r;
+
+-
+- if ((r = devfs_register_chrdev(DM_CHAR_MAJOR, "device-mapper",
++ if ((r = devfs_register_chrdev(DM_CHAR_MAJOR, DM_DIR,
+ &_ctl_fops)) < 0) {
+ WARN("devfs_register_chrdev failed for dm control dev");
+ return -EIO;
+ }
+
+- _ctl_handle = devfs_register(0 , "device-mapper/control", 0,
++ _ctl_handle = devfs_register(0 , DM_DIR "/control", 0,
+ DM_CHAR_MAJOR, 0,
+ S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
+ &_ctl_fops, NULL);
+@@ -312,7 +311,7 @@
+ {
+ // FIXME: remove control device
+
+- if (devfs_unregister_chrdev(DM_CHAR_MAJOR, "device-mapper") < 0)
++ if (devfs_unregister_chrdev(DM_CHAR_MAJOR, DM_DIR) < 0)
+ WARN("devfs_unregister_chrdev failed for dm control device");
+ }
+
--- /dev/null
+--- linux-last/drivers/md/dm.c Tue Nov 13 14:38:11 2001
++++ linux/drivers/md/dm.c Tue Nov 13 14:38:30 2001
+@@ -162,7 +162,7 @@
+ }
+
+ /* In 512-byte units */
+-#define VOLUME_SIZE(minor) (_block_size[(minor)] >> 1)
++#define VOLUME_SIZE(minor) (_block_size[(minor)] << 1)
+
+ static int dm_blk_ioctl(struct inode *inode, struct file *file,
+ uint command, ulong a)
--- /dev/null
+--- linux-last/drivers/md/dm.c Tue Nov 13 14:39:36 2001
++++ linux/drivers/md/dm.c Tue Nov 13 15:46:58 2001
+@@ -576,74 +576,6 @@
+ kfree(md);
+ }
+
+-/*
+- * open a device so we can use it as a map
+- * destination.
+- */
+-static int open_dev(struct dm_dev *d)
+-{
+- int err;
+-
+- if (d->bd)
+- BUG();
+-
+- if (!(d->bd = bdget(kdev_t_to_nr(d->dev))))
+- return -ENOMEM;
+-
+- if ((err = blkdev_get(d->bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE))) {
+- bdput(d->bd);
+- return err;
+- }
+-
+- return 0;
+-}
+-
+-/*
+- * close a device that we've been using.
+- */
+-static void close_dev(struct dm_dev *d)
+-{
+- if (!d->bd)
+- return;
+-
+- blkdev_put(d->bd, BDEV_FILE);
+- bdput(d->bd);
+- d->bd = 0;
+-}
+-
+-/*
+- * Close a list of devices.
+- */
+-static void close_devices(struct list_head *devices)
+-{
+- struct list_head *tmp;
+-
+- list_for_each(tmp, devices) {
+- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+- close_dev(dd);
+- }
+-}
+-
+-/*
+- * Open a list of devices.
+- */
+-static int open_devices(struct list_head *devices)
+-{
+- int r = 0;
+- struct list_head *tmp;
+-
+- list_for_each(tmp, devices) {
+- struct dm_dev *dd = list_entry(tmp, struct dm_dev, list);
+- if ((r = open_dev(dd)))
+- goto bad;
+- }
+- return 0;
+-
+- bad:
+- close_devices(devices);
+- return r;
+-}
+-
+ static int register_device(struct mapped_device *md)
+ {
+ md->devfs_entry =
+@@ -686,7 +618,7 @@
+ /*
+ * Bind a table to the device.
+ */
+-int __bind(struct mapped_device *md, struct dm_table *t)
++static int __bind(struct mapped_device *md, struct dm_table *t)
+ {
+ int minor = MINOR(md->dev);
+
+@@ -702,12 +634,11 @@
+ _hardsect_size[minor] = __find_hardsect_size(&t->devices);
+ register_disk(NULL, md->dev, 1, &dm_blk_dops, _block_size[minor]);
+
+- return open_devices(&md->map->devices);
++ return 0;
+ }
+
+-void __unbind(struct mapped_device *md)
++static void __unbind(struct mapped_device *md)
+ {
+- close_devices(&md->map->devices);
+ md->map = NULL;
+ }
+
+--- linux-last/drivers/md/dm-table.c Tue Nov 13 14:24:30 2001
++++ linux/drivers/md/dm-table.c Tue Nov 13 15:50:20 2001
+@@ -223,6 +223,41 @@
+ }
+
+ /*
++ * open a device so we can use it as a map
++ * destination.
++ */
++static int open_dev(struct dm_dev *d)
++{
++ int err;
++
++ if (d->bd)
++ BUG();
++
++ if (!(d->bd = bdget(kdev_t_to_nr(d->dev))))
++ return -ENOMEM;
++
++ if ((err = blkdev_get(d->bd, FMODE_READ|FMODE_WRITE, 0, BDEV_FILE))) {
++ bdput(d->bd);
++ return err;
++ }
++
++ return 0;
++}
++
++/*
++ * close a device that we've been using.
++ */
++static void close_dev(struct dm_dev *d)
++{
++ if (!d->bd)
++ return;
++
++ blkdev_put(d->bd, BDEV_FILE);
++ bdput(d->bd);
++ d->bd = 0;
++}
++
++/*
+ * add a device to the list, or just increment the
+ * usage count if it's already present.
+ */
+@@ -245,6 +280,12 @@
+
+ dd->dev = dev;
+ dd->bd = 0;
++
++ if ((r = open_dev(dd))) {
++ kfree(dd);
++ return r;
++ }
++
+ atomic_set(&dd->count, 0);
+ list_add(&dd->list, &t->devices);
+ }
+@@ -253,6 +294,7 @@
+
+ return 0;
+ }
++
+ /*
+ * decrement a devices use count and remove it if
+ * neccessary.
+@@ -260,6 +302,7 @@
+ void dm_table_put_device(struct dm_table *t, struct dm_dev *dd)
+ {
+ if (atomic_dec_and_test(&dd->count)) {
++ close_dev(dd);
+ list_del(&dd->list);
+ kfree(dd);
+ }
--- /dev/null
+--- linux-last/include/linux/device-mapper.h Tue Nov 13 14:38:11 2001
++++ linux/include/linux/device-mapper.h Tue Nov 13 16:01:12 2001
+@@ -42,6 +42,7 @@
+ * (ie. opened/closed).
+ */
+ int dm_table_get_device(struct dm_table *t, const char *path,
++ offset_t start, offset_t len,
+ struct dm_dev **result);
+ void dm_table_put_device(struct dm_table *table, struct dm_dev *d);
+
+--- linux-last/drivers/md/dm-table.c Tue Nov 13 15:56:39 2001
++++ linux/drivers/md/dm-table.c Tue Nov 13 16:04:55 2001
+@@ -6,6 +6,9 @@
+
+ #include "dm.h"
+
++#include <linux/blkdev.h>
++
++
+ /* ceiling(n / size) * size */
+ static inline ulong round_up(ulong n, ulong size)
+ {
+@@ -258,10 +261,32 @@
+ }
+
+ /*
++ * If possible (ie. blk_size[major] is set), this
++ * checks an area of a destination device is
++ * valid.
++ */
++static int check_device_area(kdev_t dev, offset_t start, offset_t len)
++{
++ int *sizes;
++ offset_t dev_size;
++
++ if (!(sizes = blk_size[MAJOR(dev)]) || !(dev_size = sizes[MINOR(dev)]))
++ /* we don't know the device details,
++ * so give the benefit of the doubt */
++ return 1;
++
++ /* convert to 512-byte sectors */
++ dev_size <<= 1;
++
++ return ((start < dev_size) && (len < (dev_size - start)));
++}
++
++/*
+ * add a device to the list, or just increment the
+ * usage count if it's already present.
+ */
+ int dm_table_get_device(struct dm_table *t, const char *path,
++ offset_t start, offset_t len,
+ struct dm_dev **result)
+ {
+ int r;
+@@ -290,6 +315,13 @@
+ list_add(&dd->list, &t->devices);
+ }
+ atomic_inc(&dd->count);
++
++ if (!check_device_area(dd->dev, start, len)) {
++ WARN("device '%s' not large enough for target", path);
++ dm_table_put_device(t, dd);
++ return -EINVAL;
++ }
++
+ *result = dd;
+
+ return 0;
+--- linux-last/drivers/md/dm-linear.c Tue Nov 13 14:24:30 2001
++++ linux/drivers/md/dm-linear.c Tue Nov 13 15:59:21 2001
+@@ -48,7 +48,7 @@
+ goto bad;
+ }
+
+- if ((r = dm_table_get_device(t, path, &lc->dev))) {
++ if ((r = dm_table_get_device(t, path, start, l, &lc->dev))) {
+ err("couldn't lookup device", e_private);
+ r = -ENXIO;
+ goto bad;
--- /dev/null
+--- linux-last/drivers/md/dm-table.c Wed Nov 14 14:42:24 2001
++++ linux/drivers/md/dm-table.c Wed Nov 14 17:48:28 2001
+@@ -91,6 +91,7 @@
+ memcpy(n_targets, t->targets, sizeof(*n_targets) * n);
+ }
+
++ memset(n_highs + n , -1, sizeof(*n_highs) * (num - n));
+ vfree(t->highs);
+
+ t->num_allocated = num;
+@@ -376,7 +377,8 @@
+
+ /* set up internal nodes, bottom-up */
+ for (i = t->depth - 2, total = 0; i >= 0; i--) {
+- t->index[i] = indexes + (KEYS_PER_NODE * t->counts[i]);
++ t->index[i] = indexes;
++ indexes += (KEYS_PER_NODE * t->counts[i]);
+ setup_btree_index(i, t);
+ }
+
--- /dev/null
+--- linux-last/include/linux/dm-ioctl.h Wed Nov 14 14:42:24 2001
++++ linux/include/linux/dm-ioctl.h Tue Nov 20 10:02:43 2001
+@@ -47,7 +47,7 @@
+ #define DM_IOCTL 0xfd
+ #define DM_CHAR_MAJOR 124
+
+-#define DM_CREATE _IOW(DM_IOCTL, 0x00, struct dm_ioctl)
++#define DM_CREATE _IOWR(DM_IOCTL, 0x00, struct dm_ioctl)
+ #define DM_REMOVE _IOW(DM_IOCTL, 0x01, struct dm_ioctl)
+ #define DM_SUSPEND _IOW(DM_IOCTL, 0x02, struct dm_ioctl)
+ #define DM_RELOAD _IOWR(DM_IOCTL, 0x03, struct dm_ioctl)
+--- linux-last/drivers/md/dm-linear.c Wed Nov 14 14:42:24 2001
++++ linux/drivers/md/dm-linear.c Wed Nov 14 20:13:15 2001
+@@ -112,4 +112,3 @@
+ #ifdef MODULE_LICENSE
+ MODULE_LICENSE("GPL");
+ #endif
+-
+--- linux-last/drivers/md/dm-ioctl.c Wed Nov 14 14:42:24 2001
++++ linux/drivers/md/dm-ioctl.c Tue Nov 20 10:32:47 2001
+@@ -134,7 +134,33 @@
+ return r;
+ }
+
+-static int create(struct dm_ioctl *param)
++/*
++ * Copies device info back to user space, used by
++ * the create and info ioctls.
++ */
++static int info(const char *name, struct dm_ioctl *user)
++{
++ struct dm_ioctl param;
++ struct mapped_device *md = dm_get(name);
++
++ if (!md) {
++ param.exists = 0;
++ goto out;
++ }
++
++ param.data_size = 0;
++ strncpy(param.name, md->name, sizeof(param.name));
++ param.exists = 1;
++ param.suspend = md->suspended;
++ param.open_count = md->use_count;
++ param.minor = MINOR(md->dev);
++ param.target_count = md->map->num_targets;
++
++ out:
++ return copy_to_user(user, ¶m, sizeof(param));
++}
++
++static int create(struct dm_ioctl *param, struct dm_ioctl *user)
+ {
+ int r;
+ struct mapped_device *md;
+@@ -143,15 +169,22 @@
+ if ((r = dm_table_create(&t)))
+ return r;
+
+- if ((r = populate_table(t, param))) {
+- dm_table_destroy(t);
+- return r;
+- }
++ if ((r = populate_table(t, param)))
++ goto bad;
+
+ if ((r = dm_create(param->name, param->minor, t, &md)))
+- return r;
++ goto bad;
++
++ if ((r = info(param->name, user))) {
++ dm_destroy(md);
++ goto bad;
++ }
+
+ return 0;
++
++ bad:
++ dm_table_destroy(t);
++ return r;
+ }
+
+ static int remove(struct dm_ioctl *param)
+@@ -202,28 +235,6 @@
+ return 0;
+ }
+
+-static int info(struct dm_ioctl *param, struct dm_ioctl *user)
+-{
+- struct mapped_device *md = dm_get(param->name);
+-
+- if (!md) {
+- param->exists = 0;
+- goto out;
+- }
+-
+- param->exists = 1;
+- param->suspend = md->suspended;
+- param->open_count = md->use_count;
+- param->minor = MINOR(md->dev);
+- param->target_count = md->map->num_targets;
+-
+- out:
+- if (copy_to_user(user, param, sizeof(*param)))
+- return -EFAULT;
+-
+- return 0;
+-}
+-
+ static int ctl_open(struct inode *inode, struct file *file)
+ {
+ /* only root can open this */
+@@ -244,7 +255,7 @@
+ static int ctl_ioctl(struct inode *inode, struct file *file,
+ uint command, ulong a)
+ {
+- int r = -EINVAL;
++ int r;
+ struct dm_ioctl *p;
+
+ if ((r = copy_params((struct dm_ioctl *) a, &p)))
+@@ -252,7 +263,7 @@
+
+ switch (command) {
+ case DM_CREATE:
+- r = create(p);
++ r = create(p, (struct dm_ioctl *) a);
+ break;
+
+ case DM_REMOVE:
+@@ -268,11 +279,12 @@
+ break;
+
+ case DM_INFO:
+- r = info(p, (struct dm_ioctl *) a);
++ r = info(p->name, (struct dm_ioctl *) a);
+ break;
+
+ default:
+ WARN("dm_ctl_ioctl: unknown command 0x%x\n", command);
++ r = -EINVAL;
+ }
+
+ free_params(p);