--- /dev/null
+diff -ru linux-2.4.21-rc6+/arch/mips64/kernel/ioctl32.c linux/arch/mips64/kernel/ioctl32.c
+--- linux-2.4.21-rc6+/arch/mips64/kernel/ioctl32.c Mon Jun 2 14:01:54 2003
++++ linux/arch/mips64/kernel/ioctl32.c Mon Jun 2 19:45:58 2003
+@@ -33,6 +33,7 @@
+ #include <linux/auto_fs.h>
+ #include <linux/ext2_fs.h>
+ #include <linux/raid/md_u.h>
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ #undef __KERNEL__ /* This file was born to be ugly ... */
+@@ -914,6 +915,21 @@
+ IOCTL32_DEFAULT(STOP_ARRAY_RO),
+ IOCTL32_DEFAULT(RESTART_ARRAY_RW),
+ #endif /* CONFIG_MD */
++
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++ IOCTL32_DEFAULT(DM_VERSION),
++ IOCTL32_DEFAULT(DM_REMOVE_ALL),
++ IOCTL32_DEFAULT(DM_DEV_CREATE),
++ IOCTL32_DEFAULT(DM_DEV_REMOVE),
++ IOCTL32_DEFAULT(DM_DEV_RELOAD),
++ IOCTL32_DEFAULT(DM_DEV_SUSPEND),
++ IOCTL32_DEFAULT(DM_DEV_RENAME),
++ IOCTL32_DEFAULT(DM_DEV_DEPS),
++ IOCTL32_DEFAULT(DM_DEV_STATUS),
++ IOCTL32_DEFAULT(DM_TARGET_STATUS),
++ IOCTL32_DEFAULT(DM_TARGET_WAIT),
++ IOCTL32_DEFAULT(DM_LIST_DEVICES),
++#endif /* CONFIG_BLK_DEV_DM */
+
+ IOCTL32_DEFAULT(MTIOCTOP), /* mtio.h ioctls */
+ IOCTL32_HANDLER(MTIOCGET32, mt_ioctl_trans),
+diff -ru linux-2.4.21-rc6+/arch/parisc/kernel/ioctl32.c linux/arch/parisc/kernel/ioctl32.c
+--- linux-2.4.21-rc6+/arch/parisc/kernel/ioctl32.c Mon Jun 2 14:01:50 2003
++++ linux/arch/parisc/kernel/ioctl32.c Mon Jun 2 19:45:58 2003
+@@ -55,6 +55,7 @@
+ #define max max */
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -3418,6 +3419,21 @@
+ COMPATIBLE_IOCTL(LV_BMAP)
+ COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE)
+ #endif /* LVM */
++/* Device-Mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION)
++COMPATIBLE_IOCTL(DM_REMOVE_ALL)
++COMPATIBLE_IOCTL(DM_DEV_CREATE)
++COMPATIBLE_IOCTL(DM_DEV_REMOVE)
++COMPATIBLE_IOCTL(DM_DEV_RELOAD)
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
++COMPATIBLE_IOCTL(DM_DEV_RENAME)
++COMPATIBLE_IOCTL(DM_DEV_DEPS)
++COMPATIBLE_IOCTL(DM_DEV_STATUS)
++COMPATIBLE_IOCTL(DM_TARGET_STATUS)
++COMPATIBLE_IOCTL(DM_TARGET_WAIT)
++COMPATIBLE_IOCTL(DM_LIST_DEVICES)
++#endif /* CONFIG_BLK_DEV_DM */
+ #if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
+ COMPATIBLE_IOCTL(DRM_IOCTL_GET_MAGIC)
+ COMPATIBLE_IOCTL(DRM_IOCTL_IRQ_BUSID)
+diff -ru linux-2.4.21-rc6+/arch/ppc64/kernel/ioctl32.c linux/arch/ppc64/kernel/ioctl32.c
+--- linux-2.4.21-rc6+/arch/ppc64/kernel/ioctl32.c Mon Jun 2 14:02:13 2003
++++ linux/arch/ppc64/kernel/ioctl32.c Mon Jun 2 19:45:58 2003
+@@ -66,6 +66,7 @@
+ #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -4423,6 +4424,21 @@
+ COMPATIBLE_IOCTL(NBD_PRINT_DEBUG),
+ COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS),
+ COMPATIBLE_IOCTL(NBD_DISCONNECT),
++/* device-mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION),
++COMPATIBLE_IOCTL(DM_REMOVE_ALL),
++COMPATIBLE_IOCTL(DM_DEV_CREATE),
++COMPATIBLE_IOCTL(DM_DEV_REMOVE),
++COMPATIBLE_IOCTL(DM_DEV_RELOAD),
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND),
++COMPATIBLE_IOCTL(DM_DEV_RENAME),
++COMPATIBLE_IOCTL(DM_DEV_DEPS),
++COMPATIBLE_IOCTL(DM_DEV_STATUS),
++COMPATIBLE_IOCTL(DM_TARGET_STATUS),
++COMPATIBLE_IOCTL(DM_TARGET_WAIT),
++COMPATIBLE_IOCTL(DM_LIST_DEVICES),
++#endif /* CONFIG_BLK_DEV_DM */
+ /* Remove *PRIVATE in 2.5 */
+ COMPATIBLE_IOCTL(SIOCDEVPRIVATE),
+ COMPATIBLE_IOCTL(SIOCDEVPRIVATE+1),
+diff -ru linux-2.4.21-rc6+/arch/s390x/kernel/ioctl32.c linux/arch/s390x/kernel/ioctl32.c
+--- linux-2.4.21-rc6+/arch/s390x/kernel/ioctl32.c Mon Jun 2 14:01:16 2003
++++ linux/arch/s390x/kernel/ioctl32.c Mon Jun 2 19:45:58 2003
+@@ -25,6 +25,7 @@
+ #include <linux/ext2_fs.h>
+ #include <linux/hdreg.h>
+ #include <linux/if_bonding.h>
++#include <linux/dm-ioctl.h>
+ #include <asm/types.h>
+ #include <asm/uaccess.h>
+ #include <asm/dasd.h>
+@@ -507,6 +508,19 @@
+ IOCTL32_DEFAULT(VT_UNLOCKSWITCH),
+
+ IOCTL32_DEFAULT(SIOCGSTAMP),
++
++ IOCTL32_DEFAULT(DM_VERSION),
++ IOCTL32_DEFAULT(DM_REMOVE_ALL),
++ IOCTL32_DEFAULT(DM_DEV_CREATE),
++ IOCTL32_DEFAULT(DM_DEV_REMOVE),
++ IOCTL32_DEFAULT(DM_DEV_RELOAD),
++ IOCTL32_DEFAULT(DM_DEV_SUSPEND),
++ IOCTL32_DEFAULT(DM_DEV_RENAME),
++ IOCTL32_DEFAULT(DM_DEV_DEPS),
++ IOCTL32_DEFAULT(DM_DEV_STATUS),
++ IOCTL32_DEFAULT(DM_TARGET_STATUS),
++ IOCTL32_DEFAULT(DM_TARGET_WAIT),
++ IOCTL32_DEFAULT(DM_LIST_DEVICES),
+
+ IOCTL32_HANDLER(SIOCGIFNAME, dev_ifname32),
+ IOCTL32_HANDLER(SIOCGIFCONF, dev_ifconf),
+diff -ru linux-2.4.21-rc6+/arch/sparc64/kernel/ioctl32.c linux/arch/sparc64/kernel/ioctl32.c
+--- linux-2.4.21-rc6+/arch/sparc64/kernel/ioctl32.c Mon Jun 2 14:02:58 2003
++++ linux/arch/sparc64/kernel/ioctl32.c Mon Jun 2 19:52:45 2003
+@@ -56,6 +56,7 @@
+ #if defined(CONFIG_BLK_DEV_LVM) || defined(CONFIG_BLK_DEV_LVM_MODULE)
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -5076,6 +5077,21 @@
+ COMPATIBLE_IOCTL(NBD_PRINT_DEBUG)
+ COMPATIBLE_IOCTL(NBD_SET_SIZE_BLOCKS)
+ COMPATIBLE_IOCTL(NBD_DISCONNECT)
++/* device-mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION)
++COMPATIBLE_IOCTL(DM_REMOVE_ALL)
++COMPATIBLE_IOCTL(DM_DEV_CREATE)
++COMPATIBLE_IOCTL(DM_DEV_REMOVE)
++COMPATIBLE_IOCTL(DM_DEV_RELOAD)
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
++COMPATIBLE_IOCTL(DM_DEV_RENAME)
++COMPATIBLE_IOCTL(DM_DEV_DEPS)
++COMPATIBLE_IOCTL(DM_DEV_STATUS)
++COMPATIBLE_IOCTL(DM_TARGET_STATUS)
++COMPATIBLE_IOCTL(DM_TARGET_WAIT)
++COMPATIBLE_IOCTL(DM_LIST_DEVICES)
++#endif /* CONFIG_BLK_DEV_DM */
+ /* Linux-1394 */
+ #if defined(CONFIG_IEEE1394) || defined(CONFIG_IEEE1394_MODULE)
+ COMPATIBLE_IOCTL(AMDTP_IOC_CHANNEL)
+diff -ru linux-2.4.21-rc6+/arch/x86_64/ia32/ia32_ioctl.c linux/arch/x86_64/ia32/ia32_ioctl.c
+--- linux-2.4.21-rc6+/arch/x86_64/ia32/ia32_ioctl.c Mon Jun 2 14:00:49 2003
++++ linux/arch/x86_64/ia32/ia32_ioctl.c Mon Jun 2 19:50:57 2003
+@@ -67,6 +67,7 @@
+ #define max max
+ #include <linux/lvm.h>
+ #endif /* LVM */
++#include <linux/dm-ioctl.h>
+
+ #include <scsi/scsi.h>
+ /* Ugly hack. */
+@@ -4047,6 +4048,21 @@
+ COMPATIBLE_IOCTL(LV_BMAP)
+ COMPATIBLE_IOCTL(LV_SNAPSHOT_USE_RATE)
+ #endif /* LVM */
++/* Device-Mapper */
++#if defined(CONFIG_BLK_DEV_DM) || defined(CONFIG_BLK_DEV_DM_MODULE)
++COMPATIBLE_IOCTL(DM_VERSION)
++COMPATIBLE_IOCTL(DM_REMOVE_ALL)
++COMPATIBLE_IOCTL(DM_DEV_CREATE)
++COMPATIBLE_IOCTL(DM_DEV_REMOVE)
++COMPATIBLE_IOCTL(DM_DEV_RELOAD)
++COMPATIBLE_IOCTL(DM_DEV_SUSPEND)
++COMPATIBLE_IOCTL(DM_DEV_RENAME)
++COMPATIBLE_IOCTL(DM_DEV_DEPS)
++COMPATIBLE_IOCTL(DM_DEV_STATUS)
++COMPATIBLE_IOCTL(DM_TARGET_STATUS)
++COMPATIBLE_IOCTL(DM_TARGET_WAIT)
++COMPATIBLE_IOCTL(DM_LIST_DEVICES)
++#endif /* CONFIG_BLK_DEV_DM */
+ #ifdef CONFIG_AUTOFS_FS
+ COMPATIBLE_IOCTL(AUTOFS_IOC_READY)
+ COMPATIBLE_IOCTL(AUTOFS_IOC_FAIL)
--- /dev/null
+--- diff/fs/buffer.c 2003-06-02 16:42:26.000000000 +0100
++++ source/fs/buffer.c 2003-06-02 17:07:27.000000000 +0100
+@@ -735,6 +735,7 @@
+ bh->b_list = BUF_CLEAN;
+ bh->b_end_io = handler;
+ bh->b_private = private;
++ bh->b_journal_head = NULL;
+ }
+
+ static void end_buffer_io_async(struct buffer_head * bh, int uptodate)
+--- diff/fs/jbd/journal.c 2003-06-02 16:42:26.000000000 +0100
++++ source/fs/jbd/journal.c 2003-06-02 17:34:44.000000000 +0100
+@@ -1802,9 +1802,9 @@
+
+ if (buffer_jbd(bh)) {
+ /* Someone did it for us! */
+- J_ASSERT_BH(bh, bh->b_private != NULL);
++ J_ASSERT_BH(bh, bh->b_journal_head != NULL);
+ journal_free_journal_head(jh);
+- jh = bh->b_private;
++ jh = bh->b_journal_head;
+ } else {
+ /*
+ * We actually don't need jh_splice_lock when
+@@ -1812,7 +1812,7 @@
+ */
+ spin_lock(&jh_splice_lock);
+ set_bit(BH_JBD, &bh->b_state);
+- bh->b_private = jh;
++ bh->b_journal_head = jh;
+ jh->b_bh = bh;
+ atomic_inc(&bh->b_count);
+ spin_unlock(&jh_splice_lock);
+@@ -1821,7 +1821,7 @@
+ }
+ jh->b_jcount++;
+ spin_unlock(&journal_datalist_lock);
+- return bh->b_private;
++ return bh->b_journal_head;
+ }
+
+ /*
+@@ -1854,7 +1854,7 @@
+ J_ASSERT_BH(bh, jh2bh(jh) == bh);
+ BUFFER_TRACE(bh, "remove journal_head");
+ spin_lock(&jh_splice_lock);
+- bh->b_private = NULL;
++ bh->b_journal_head = NULL;
+ jh->b_bh = NULL; /* debug, really */
+ clear_bit(BH_JBD, &bh->b_state);
+ __brelse(bh);
+--- diff/include/linux/fs.h 2003-06-02 16:42:28.000000000 +0100
++++ source/include/linux/fs.h 2003-06-02 17:36:34.000000000 +0100
+@@ -263,7 +263,7 @@
+ struct page *b_page; /* the page this bh is mapped to */
+ void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */
+ void *b_private; /* reserved for b_end_io */
+-
++ void *b_journal_head; /* ext3 journal_heads */
+ unsigned long b_rsector; /* Real buffer location on disk */
+ wait_queue_head_t b_wait;
+
+--- diff/include/linux/jbd.h 2003-06-02 16:42:28.000000000 +0100
++++ source/include/linux/jbd.h 2003-06-02 17:38:02.000000000 +0100
+@@ -311,7 +311,7 @@
+
+ static inline struct journal_head *bh2jh(struct buffer_head *bh)
+ {
+- return bh->b_private;
++ return bh->b_journal_head;
+ }
+
+ #define HAVE_JOURNAL_CALLBACK_STATUS
--- /dev/null
+diff -ruN linux-2.4.20/include/linux/mempool.h linux/include/linux/mempool.h
+--- linux-2.4.20/include/linux/mempool.h Thu Jan 1 01:00:00 1970
++++ linux/include/linux/mempool.h Wed Mar 26 12:53:48 2003
+@@ -0,0 +1,31 @@
++/*
++ * memory buffer pool support
++ */
++#ifndef _LINUX_MEMPOOL_H
++#define _LINUX_MEMPOOL_H
++
++#include <linux/list.h>
++#include <linux/wait.h>
++
++struct mempool_s;
++typedef struct mempool_s mempool_t;
++
++typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
++typedef void (mempool_free_t)(void *element, void *pool_data);
++
++extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
++ mempool_free_t *free_fn, void *pool_data);
++extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
++extern void mempool_destroy(mempool_t *pool);
++extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
++extern void mempool_free(void *element, mempool_t *pool);
++
++/*
++ * A mempool_alloc_t and mempool_free_t that get the memory from
++ * a slab that is passed in through pool_data.
++ */
++void *mempool_alloc_slab(int gfp_mask, void *pool_data);
++void mempool_free_slab(void *element, void *pool_data);
++
++
++#endif /* _LINUX_MEMPOOL_H */
+diff -ruN linux-2.4.20/mm/Makefile linux/mm/Makefile
+--- linux-2.4.20/mm/Makefile Fri Jan 10 16:36:02 2003
++++ linux/mm/Makefile Wed Mar 26 12:53:19 2003
+@@ -9,12 +9,12 @@
+
+ O_TARGET := mm.o
+
+-export-objs := shmem.o filemap.o memory.o page_alloc.o
++export-objs := shmem.o filemap.o memory.o page_alloc.o mempool.o
+
+ obj-y := memory.o mmap.o filemap.o mprotect.o mlock.o mremap.o \
+ vmalloc.o slab.o bootmem.o swap.o vmscan.o page_io.o \
+ page_alloc.o swap_state.o swapfile.o numa.o oom_kill.o \
+- shmem.o
++ shmem.o mempool.o
+
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+
+diff -ruN linux-2.4.20/mm/mempool.c linux/mm/mempool.c
+--- linux-2.4.20/mm/mempool.c Thu Jan 1 01:00:00 1970
++++ linux/mm/mempool.c Wed Mar 26 12:53:48 2003
+@@ -0,0 +1,299 @@
++/*
++ * linux/mm/mempool.c
++ *
++ * memory buffer pool support. Such pools are mostly used
++ * for guaranteed, deadlock-free memory allocations during
++ * extreme VM load.
++ *
++ * started by Ingo Molnar, Copyright (C) 2001
++ */
++
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/mempool.h>
++
++struct mempool_s {
++ spinlock_t lock;
++ int min_nr; /* nr of elements at *elements */
++ int curr_nr; /* Current nr of elements at *elements */
++ void **elements;
++
++ void *pool_data;
++ mempool_alloc_t *alloc;
++ mempool_free_t *free;
++ wait_queue_head_t wait;
++};
++
++static void add_element(mempool_t *pool, void *element)
++{
++ BUG_ON(pool->curr_nr >= pool->min_nr);
++ pool->elements[pool->curr_nr++] = element;
++}
++
++static void *remove_element(mempool_t *pool)
++{
++ BUG_ON(pool->curr_nr <= 0);
++ return pool->elements[--pool->curr_nr];
++}
++
++static void free_pool(mempool_t *pool)
++{
++ while (pool->curr_nr) {
++ void *element = remove_element(pool);
++ pool->free(element, pool->pool_data);
++ }
++ kfree(pool->elements);
++ kfree(pool);
++}
++
++/**
++ * mempool_create - create a memory pool
++ * @min_nr: the minimum number of elements guaranteed to be
++ * allocated for this pool.
++ * @alloc_fn: user-defined element-allocation function.
++ * @free_fn: user-defined element-freeing function.
++ * @pool_data: optional private data available to the user-defined functions.
++ *
++ * this function creates and allocates a guaranteed size, preallocated
++ * memory pool. The pool can be used from the mempool_alloc and mempool_free
++ * functions. This function might sleep. Both the alloc_fn() and the free_fn()
++ * functions might sleep - as long as the mempool_alloc function is not called
++ * from IRQ contexts.
++ */
++mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
++ mempool_free_t *free_fn, void *pool_data)
++{
++ mempool_t *pool;
++
++ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
++ if (!pool)
++ return NULL;
++ memset(pool, 0, sizeof(*pool));
++ pool->elements = kmalloc(min_nr * sizeof(void *), GFP_KERNEL);
++ if (!pool->elements) {
++ kfree(pool);
++ return NULL;
++ }
++ spin_lock_init(&pool->lock);
++ pool->min_nr = min_nr;
++ pool->pool_data = pool_data;
++ init_waitqueue_head(&pool->wait);
++ pool->alloc = alloc_fn;
++ pool->free = free_fn;
++
++ /*
++ * First pre-allocate the guaranteed number of buffers.
++ */
++ while (pool->curr_nr < pool->min_nr) {
++ void *element;
++
++ element = pool->alloc(GFP_KERNEL, pool->pool_data);
++ if (unlikely(!element)) {
++ free_pool(pool);
++ return NULL;
++ }
++ add_element(pool, element);
++ }
++ return pool;
++}
++
++/**
++ * mempool_resize - resize an existing memory pool
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ * @new_min_nr: the new minimum number of elements guaranteed to be
++ * allocated for this pool.
++ * @gfp_mask: the usual allocation bitmask.
++ *
++ * This function shrinks/grows the pool. In the case of growing,
++ * it cannot be guaranteed that the pool will be grown to the new
++ * size immediately, but new mempool_free() calls will refill it.
++ *
++ * Note, the caller must guarantee that no mempool_destroy is called
++ * while this function is running. mempool_alloc() & mempool_free()
++ * might be called (eg. from IRQ contexts) while this function executes.
++ */
++int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
++{
++ void *element;
++ void **new_elements;
++ unsigned long flags;
++
++ BUG_ON(new_min_nr <= 0);
++
++ spin_lock_irqsave(&pool->lock, flags);
++ if (new_min_nr < pool->min_nr) {
++ while (pool->curr_nr > new_min_nr) {
++ element = remove_element(pool);
++ spin_unlock_irqrestore(&pool->lock, flags);
++ pool->free(element, pool->pool_data);
++ spin_lock_irqsave(&pool->lock, flags);
++ }
++ pool->min_nr = new_min_nr;
++ goto out_unlock;
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ /* Grow the pool */
++ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
++ if (!new_elements)
++ return -ENOMEM;
++
++ spin_lock_irqsave(&pool->lock, flags);
++ memcpy(new_elements, pool->elements,
++ pool->curr_nr * sizeof(*new_elements));
++ kfree(pool->elements);
++ pool->elements = new_elements;
++ pool->min_nr = new_min_nr;
++
++ while (pool->curr_nr < pool->min_nr) {
++ spin_unlock_irqrestore(&pool->lock, flags);
++ element = pool->alloc(gfp_mask, pool->pool_data);
++ if (!element)
++ goto out;
++ spin_lock_irqsave(&pool->lock, flags);
++ if (pool->curr_nr < pool->min_nr)
++ add_element(pool, element);
++ else
++ kfree(element); /* Raced */
++ }
++out_unlock:
++ spin_unlock_irqrestore(&pool->lock, flags);
++out:
++ return 0;
++}
++
++/**
++ * mempool_destroy - deallocate a memory pool
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ *
++ * this function only sleeps if the free_fn() function sleeps. The caller
++ * has to guarantee that all elements have been returned to the pool (ie:
++ * freed) prior to calling mempool_destroy().
++ */
++void mempool_destroy(mempool_t *pool)
++{
++ if (pool->curr_nr != pool->min_nr)
++ BUG(); /* There were outstanding elements */
++ free_pool(pool);
++}
++
++/**
++ * mempool_alloc - allocate an element from a specific memory pool
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ * @gfp_mask: the usual allocation bitmask.
++ *
++ * this function only sleeps if the alloc_fn function sleeps or
++ * returns NULL. Note that due to preallocation, this function
++ * *never* fails when called from process contexts. (it might
++ * fail if called from an IRQ context.)
++ */
++void * mempool_alloc(mempool_t *pool, int gfp_mask)
++{
++ void *element;
++ unsigned long flags;
++ int curr_nr;
++ DECLARE_WAITQUEUE(wait, current);
++ int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
++
++repeat_alloc:
++ element = pool->alloc(gfp_nowait, pool->pool_data);
++ if (likely(element != NULL))
++ return element;
++
++ /*
++ * If the pool is less than 50% full then try harder
++ * to allocate an element:
++ */
++ if ((gfp_mask != gfp_nowait) && (pool->curr_nr <= pool->min_nr/2)) {
++ element = pool->alloc(gfp_mask, pool->pool_data);
++ if (likely(element != NULL))
++ return element;
++ }
++
++ /*
++ * Kick the VM at this point.
++ */
++ wakeup_bdflush();
++
++ spin_lock_irqsave(&pool->lock, flags);
++ if (likely(pool->curr_nr)) {
++ element = remove_element(pool);
++ spin_unlock_irqrestore(&pool->lock, flags);
++ return element;
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ /* We must not sleep in the GFP_ATOMIC case */
++ if (gfp_mask == gfp_nowait)
++ return NULL;
++
++ run_task_queue(&tq_disk);
++
++ add_wait_queue_exclusive(&pool->wait, &wait);
++ set_task_state(current, TASK_UNINTERRUPTIBLE);
++
++ spin_lock_irqsave(&pool->lock, flags);
++ curr_nr = pool->curr_nr;
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ if (!curr_nr)
++ schedule();
++
++ current->state = TASK_RUNNING;
++ remove_wait_queue(&pool->wait, &wait);
++
++ goto repeat_alloc;
++}
++
++/**
++ * mempool_free - return an element to the pool.
++ * @element: pool element pointer.
++ * @pool: pointer to the memory pool which was allocated via
++ * mempool_create().
++ *
++ * this function only sleeps if the free_fn() function sleeps.
++ */
++void mempool_free(void *element, mempool_t *pool)
++{
++ unsigned long flags;
++
++ if (pool->curr_nr < pool->min_nr) {
++ spin_lock_irqsave(&pool->lock, flags);
++ if (pool->curr_nr < pool->min_nr) {
++ add_element(pool, element);
++ spin_unlock_irqrestore(&pool->lock, flags);
++ wake_up(&pool->wait);
++ return;
++ }
++ spin_unlock_irqrestore(&pool->lock, flags);
++ }
++ pool->free(element, pool->pool_data);
++}
++
++/*
++ * A commonly used alloc and free fn.
++ */
++void *mempool_alloc_slab(int gfp_mask, void *pool_data)
++{
++ kmem_cache_t *mem = (kmem_cache_t *) pool_data;
++ return kmem_cache_alloc(mem, gfp_mask);
++}
++
++void mempool_free_slab(void *element, void *pool_data)
++{
++ kmem_cache_t *mem = (kmem_cache_t *) pool_data;
++ kmem_cache_free(mem, element);
++}
++
++
++EXPORT_SYMBOL(mempool_create);
++EXPORT_SYMBOL(mempool_resize);
++EXPORT_SYMBOL(mempool_destroy);
++EXPORT_SYMBOL(mempool_alloc);
++EXPORT_SYMBOL(mempool_free);
++EXPORT_SYMBOL(mempool_alloc_slab);
++EXPORT_SYMBOL(mempool_free_slab);
--- /dev/null
+diff -ru linux-2.4.21-rc6+/include/linux/vmalloc.h linux/include/linux/vmalloc.h
+--- linux-2.4.21-rc6+/include/linux/vmalloc.h Mon Jun 2 14:03:16 2003
++++ linux/include/linux/vmalloc.h Mon Jun 2 15:53:06 2003
+@@ -26,6 +26,7 @@
+ extern void vmfree_area_pages(unsigned long address, unsigned long size);
+ extern int vmalloc_area_pages(unsigned long address, unsigned long size,
+ int gfp_mask, pgprot_t prot);
++extern void *vcalloc(unsigned long nmemb, unsigned long elem_size);
+
+ /*
+ * Allocate any pages
+diff -ru linux-2.4.21-rc6+/kernel/ksyms.c linux/kernel/ksyms.c
+--- linux-2.4.21-rc6+/kernel/ksyms.c Mon Jun 2 13:59:22 2003
++++ linux/kernel/ksyms.c Mon Jun 2 15:53:06 2003
+@@ -112,6 +112,7 @@
+ EXPORT_SYMBOL(vfree);
+ EXPORT_SYMBOL(__vmalloc);
+ EXPORT_SYMBOL(vmalloc_to_page);
++EXPORT_SYMBOL(vcalloc);
+ EXPORT_SYMBOL(mem_map);
+ EXPORT_SYMBOL(remap_page_range);
+ EXPORT_SYMBOL(max_mapnr);
+diff -ru linux-2.4.21-rc6+/mm/vmalloc.c linux/mm/vmalloc.c
+--- linux-2.4.21-rc6+/mm/vmalloc.c Mon Jun 2 14:02:13 2003
++++ linux/mm/vmalloc.c Mon Jun 2 15:53:06 2003
+@@ -327,3 +327,22 @@
+ read_unlock(&vmlist_lock);
+ return buf - buf_start;
+ }
++
++void *vcalloc(unsigned long nmemb, unsigned long elem_size)
++{
++ unsigned long size;
++ void *addr;
++
++ /*
++ * Check that we're not going to overflow.
++ */
++ if (nmemb > (ULONG_MAX / elem_size))
++ return NULL;
++
++ size = nmemb * elem_size;
++ addr = vmalloc(size);
++ if (addr)
++ memset(addr, 0, size);
++
++ return addr;
++}