1 --- kernel-2.6.28.orig/drivers/md/Kconfig
2 +++ kernel-2.6.28/drivers/md/Kconfig
8 + tristate "Loop target (EXPERIMENTAL)"
9 + depends on BLK_DEV_DM && EXPERIMENTAL
11 + This device-mapper target allows you to treat a regular file as
17 tristate "Snapshot target"
19 --- kernel-2.6.28.orig/drivers/md/Makefile
20 +++ kernel-2.6.28/drivers/md/Makefile
22 obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
23 obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
24 obj-$(CONFIG_DM_DELAY) += dm-delay.o
25 +obj-$(CONFIG_DM_LOOP) += dm-loop.o
26 obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
27 obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
28 obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
30 +++ kernel-2.6.28/drivers/md/dm-loop.c
33 + * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
35 + * This file is part of device-mapper.
37 + * Extent mapping implementation heavily influenced by mm/swapfile.c
38 + * Bryn Reeves <breeves@redhat.com>
40 + * File mapping and block lookup algorithms support by
41 + * Heinz Mauelshagen <hjm@redhat.com>.
43 + * This file is released under the GPL.
46 +#include <linux/kernel.h>
47 +#include <linux/slab.h>
48 +#include <linux/fs.h>
49 +#include <linux/module.h>
50 +#include <linux/vmalloc.h>
51 +#include <linux/syscalls.h>
52 +#include <linux/workqueue.h>
53 +#include <linux/file.h>
54 +#include <linux/bio.h>
57 +#include "dm-bio-list.h"
59 +#define DM_LOOP_DAEMON "kloopd"
60 +#define DM_MSG_PREFIX "loop"
62 +enum flags { DM_LOOP_BMAP, DM_LOOP_FSIO };
64 +/*--------------------------------------------------------------------
66 + *--------------------------------------------------------------------*/
69 + unsigned long flags;
76 + struct block_device *bdev;
77 + unsigned blkbits; /* file system block size shift bits */
79 + loff_t size; /* size of entire file in bytes */
80 + loff_t blocks; /* blocks allocated to loop file */
81 + sector_t mapped_sectors; /* size of mapped area in sectors */
83 + int (*map_fn)(struct dm_target *, struct bio *);
90 +struct dm_loop_extent {
91 + sector_t start; /* start sector in mapped device */
92 + sector_t to; /* start sector on target device */
93 + sector_t len; /* length in sectors */
97 + * Temporary extent list
100 + struct dm_loop_extent *extent;
101 + struct list_head list;
104 +static struct kmem_cache *dm_loop_extent_cache;
107 + * Block map private context
109 +struct block_map_c {
110 + int nr_extents; /* number of extents in map */
111 + struct dm_loop_extent **map; /* linear map of extent pointers */
112 + struct dm_loop_extent **mru; /* pointer to mru entry */
113 + spinlock_t mru_lock; /* protects mru */
117 + * File map private context
120 + spinlock_t lock; /* protects in */
121 + struct bio_list in; /* new bios for processing */
122 + struct bio_list work; /* bios queued for processing */
123 + struct workqueue_struct *wq; /* workqueue */
124 + struct work_struct ws; /* loop work */
125 + struct loop_c *loop; /* for filp & offset */
128 +/*--------------------------------------------------------------------
130 + *--------------------------------------------------------------------*/
132 +static sector_t blk2sect(struct loop_c *lc, blkcnt_t block)
134 + return block << (lc->blkbits - SECTOR_SHIFT);
137 +static blkcnt_t sec2blk(struct loop_c *lc, sector_t sector)
139 + return sector >> (lc->blkbits - SECTOR_SHIFT);
142 +/*--------------------------------------------------------------------
144 + *--------------------------------------------------------------------*/
147 + * transfer data to/from file using the read/write file_operations.
149 +static int fs_io(int rw, struct file *filp, loff_t *pos, struct bio_vec *bv)
152 + void __user *ptr = (void __user __force *) kmap(bv->bv_page) +
154 + mm_segment_t old_fs = get_fs();
157 + r = (rw == READ) ? filp->f_op->read(filp, ptr, bv->bv_len, pos) :
158 + filp->f_op->write(filp, ptr, bv->bv_len, pos);
160 + kunmap(bv->bv_page);
162 + return (r == bv->bv_len) ? 0 : -EIO;
166 + * Handle I/O for one bio
168 +static void do_one_bio(struct file_map_c *fc, struct bio *bio)
170 + int r = 0, rw = bio_data_dir(bio);
171 + loff_t start = (bio->bi_sector << 9) + fc->loop->offset, pos = start;
172 + struct bio_vec *bv, *bv_end = bio->bi_io_vec + bio->bi_vcnt;
174 + for (bv = bio->bi_io_vec; bv < bv_end; bv++) {
175 + r = fs_io(rw, fc->loop->filp, &pos, bv);
177 + DMERR("%s error %d", rw ? "write" : "read", r);
186 + * Worker thread for a 'file' type loop device
188 +static void do_loop_work(struct work_struct *ws)
190 + struct file_map_c *fc = container_of(ws, struct file_map_c, ws);
193 + /* quickly grab all new bios queued and add them to the work list */
194 + spin_lock_irq(&fc->lock);
195 + bio_list_merge(&fc->work, &fc->in);
196 + bio_list_init(&fc->in);
197 + spin_unlock_irq(&fc->lock);
199 + /* work the list and do file I/O on all bios */
200 + while ((bio = bio_list_pop(&fc->work)))
201 + do_one_bio(fc, bio);
205 + * Create work queue and initialize work
207 +static int loop_work_init(struct loop_c *lc)
209 + struct file_map_c *fc = lc->map_data;
211 + fc->wq = create_singlethread_workqueue(DM_LOOP_DAEMON);
219 + * Destroy work queue
221 +static void loop_work_exit(struct loop_c *lc)
223 + struct file_map_c *fc = lc->map_data;
226 + destroy_workqueue(fc->wq);
230 + * DM_LOOP_FSIO map_fn. Mapping just queues bios to the file map
231 + * context and lets the daemon deal with them.
233 +static int loop_file_map(struct dm_target *ti, struct bio *bio)
236 + struct loop_c *lc = ti->private;
237 + struct file_map_c *fc = lc->map_data;
239 + spin_lock_irq(&fc->lock);
240 + wake = bio_list_empty(&fc->in);
241 + bio_list_add(&fc->in, bio);
242 + spin_unlock_irq(&fc->lock);
245 + * Only call queue_work() if necessary to avoid
246 + * superfluous preempt_{disable/enable}() overhead.
249 + queue_work(fc->wq, &fc->ws);
251 + /* Handling bio - will submit later. */
256 + * Shutdown the workqueue and free a file mapping
258 +static void destroy_file_map(struct loop_c *lc)
260 + loop_work_exit(lc);
261 + kfree(lc->map_data);
265 + * Set up a file map context and workqueue
267 +static int setup_file_map(struct loop_c *lc)
269 + struct file_map_c *fc = kzalloc(sizeof(*fc), GFP_KERNEL);
274 + spin_lock_init(&fc->lock);
275 + bio_list_init(&fc->in);
276 + bio_list_init(&fc->work);
277 + INIT_WORK(&fc->ws, do_loop_work);
281 + lc->map_fn = loop_file_map;
283 + return loop_work_init(lc);
286 +/*--------------------------------------------------------------------
287 + * Block I/O helpers
288 + *--------------------------------------------------------------------*/
290 +static int contains_sector(struct dm_loop_extent *e, sector_t s)
293 + return s < (e->start + (e->len)) && s >= e->start;
299 + * Walk over a linked list of extent_list structures, freeing them as
300 + * we go. Does not free el->extent.
302 +static void destroy_extent_list(struct list_head *head)
304 + struct list_head *curr, *n;
305 + struct extent_list *el;
307 + if (list_empty(head))
310 + list_for_each_safe(curr, n, head) {
311 + el = list_entry(curr, struct extent_list, list);
318 + * Add a new extent to the tail of the list at *head with
319 + * start/to/len parameters. Allocates from the extent cache.
321 +static int list_add_extent(struct list_head *head, sector_t start,
322 + sector_t to, sector_t len)
324 + struct dm_loop_extent *extent;
325 + struct extent_list *list;
327 + extent = kmem_cache_alloc(dm_loop_extent_cache, GFP_KERNEL);
331 + list = kmalloc(sizeof(*list), GFP_KERNEL);
335 + extent->start = start;
339 + list->extent = extent;
340 + list_add_tail(&list->list, head);
345 + kmem_cache_free(dm_loop_extent_cache, extent);
350 + * Return an extent range (i.e. beginning and ending physical block numbers).
352 +static int extent_range(struct inode *inode,
353 + blkcnt_t logical_blk, blkcnt_t last_blk,
354 + blkcnt_t *begin_blk, blkcnt_t *end_blk)
356 + sector_t dist = 0, phys_blk, probe_blk = logical_blk;
358 + /* Find beginning physical block of extent starting at logical_blk. */
359 + *begin_blk = phys_blk = bmap(inode, probe_blk);
363 + for (; phys_blk == *begin_blk + dist; dist++) {
364 + *end_blk = phys_blk;
365 + if (++probe_blk > last_blk)
368 + phys_blk = bmap(inode, probe_blk);
369 + if (unlikely(!phys_blk))
377 + * Create a sequential list of extents from an inode and return
378 + * it in *head. On success return the number of extents found or
379 + * -ERRNO on failure.
381 +static int loop_extents(struct loop_c *lc, struct inode *inode,
382 + struct list_head *head)
384 + sector_t start = 0;
385 + int r, nr_extents = 0;
386 + blkcnt_t nr_blks = 0, begin_blk = 0, end_blk = 0;
387 + blkcnt_t after_last_blk = sec2blk(lc,
388 + (lc->mapped_sectors + (lc->offset >> 9)));
389 + blkcnt_t logical_blk = sec2blk(lc, (lc->offset >> 9));
391 + /* for each block in the mapped region */
392 + while (logical_blk < after_last_blk) {
393 + r = extent_range(inode, logical_blk, after_last_blk - 1,
394 + &begin_blk, &end_blk);
396 + /* sparse file fallback */
398 + DMWARN("%s has a hole; sparse file detected - "
399 + "switching to filesystem I/O", lc->path);
400 + clear_bit(DM_LOOP_BMAP, &lc->flags);
401 + set_bit(DM_LOOP_FSIO, &lc->flags);
405 + nr_blks = 1 + end_blk - begin_blk;
407 + if (unlikely(!nr_blks))
410 + r = list_add_extent(head, start, blk2sect(lc, begin_blk),
411 + blk2sect(lc, nr_blks));
415 + /* advance to next extent */
417 + start += blk2sect(lc, nr_blks);
418 + logical_blk += nr_blks;
425 + * Walk over the extents in a block_map_c, returning them to the cache and
426 + * freeing bc->map and bc.
428 +static void destroy_block_map(struct block_map_c *bc)
435 + for (i = 0; i < bc->nr_extents; i++)
436 + kmem_cache_free(dm_loop_extent_cache, bc->map[i]);
438 + DMDEBUG("destroying block map of %d entries", i);
445 + * Find an extent in *bc using binary search. Returns a pointer into the
446 + * extent map. Calculate index as (extent - bc->map).
448 +static struct dm_loop_extent **extent_binary_lookup(struct block_map_c *bc,
449 + struct dm_loop_extent **extent_mru, sector_t sector)
451 + unsigned nr_extents = bc->nr_extents;
452 + unsigned delta, dist, prev_dist = 0;
453 + struct dm_loop_extent **eptr;
455 + /* Optimize lookup range based on MRU extent. */
456 + dist = extent_mru - bc->map;
457 + if ((*extent_mru)->start >= sector)
458 + delta = dist = dist / 2;
460 + delta = (nr_extents - dist) / 2;
464 + eptr = bc->map + dist;
465 + while (*eptr && !contains_sector(*eptr, sector)) {
466 + if (sector >= (*eptr)->start + (*eptr)->len) {
472 + delta = (dist - prev_dist) / 2;
477 + eptr = bc->map + dist;
484 + * Lookup an extent for a sector using the mru cache and binary search.
486 +static struct dm_loop_extent *extent_lookup(struct block_map_c *bc,
489 + struct dm_loop_extent **eptr;
491 + spin_lock_irq(&bc->mru_lock);
493 + spin_unlock_irq(&bc->mru_lock);
495 + if (contains_sector(*eptr, sector))
498 + eptr = extent_binary_lookup(bc, eptr, sector);
502 + spin_lock_irq(&bc->mru_lock);
504 + spin_unlock_irq(&bc->mru_lock);
510 + * DM_LOOP_BMAP map_fn. Looks up the sector in the extent map and
511 + * rewrites the bio device and bi_sector fields.
513 +static int loop_block_map(struct dm_target *ti, struct bio *bio)
515 + struct loop_c *lc = ti->private;
516 + struct dm_loop_extent *extent = extent_lookup(lc->map_data,
519 + if (likely(extent)) {
520 + bio->bi_bdev = lc->bdev;
521 + bio->bi_sector = extent->to + (bio->bi_sector - extent->start);
522 + return 1; /* Done with bio -> submit */
525 + DMERR("no matching extent in map for sector %llu",
526 + (unsigned long long) bio->bi_sector + ti->begin);
533 + * Turn an extent_list into a linear pointer map of nr_extents + 1 entries
534 + * and set the final entry to NULL.
536 +static struct dm_loop_extent **build_extent_map(struct list_head *head,
538 + unsigned long *flags)
540 + unsigned map_size, cache_size;
541 + struct dm_loop_extent **map, **curr;
542 + struct list_head *pos;
543 + struct extent_list *el;
545 + map_size = 1 + (sizeof(*map) * nr_extents);
546 + cache_size = kmem_cache_size(dm_loop_extent_cache) * nr_extents;
548 + map = vmalloc(map_size);
551 + DMDEBUG("allocated extent map of %u %s for %d extents (%u %s)",
552 + (map_size < 8192) ? map_size : map_size >> 10,
553 + (map_size < 8192) ? "bytes" : "kilobytes", nr_extents,
554 + (cache_size < 8192) ? cache_size : cache_size >> 10,
555 + (cache_size < 8192) ? "bytes" : "kilobytes");
557 + list_for_each(pos, head) {
558 + el = list_entry(pos, struct extent_list, list);
559 + *(curr++) = el->extent;
567 + * Set up a block map context and extent map
569 +static int setup_block_map(struct loop_c *lc, struct inode *inode)
572 + struct block_map_c *bc;
575 + if (!inode || !inode->i_sb || !inode->i_sb->s_bdev)
578 + /* build a linked list of extents in linear order */
579 + r = nr_extents = loop_extents(lc, inode, &head);
580 + if (nr_extents < 1)
584 + bc = kzalloc(sizeof(*bc), GFP_KERNEL);
588 + /* create a linear map of pointers into the extent cache */
589 + bc->map = build_extent_map(&head, nr_extents, &lc->flags);
590 + destroy_extent_list(&head);
592 + if (IS_ERR(bc->map)) {
593 + r = PTR_ERR(bc->map);
597 + spin_lock_init(&bc->mru_lock);
599 + bc->nr_extents = nr_extents;
600 + lc->bdev = inode->i_sb->s_bdev;
602 + lc->map_fn = loop_block_map;
610 +/*--------------------------------------------------------------------
612 + *--------------------------------------------------------------------*/
615 + * Invalidate all unlocked loop file pages
617 +static int loop_invalidate_file(struct file *filp)
621 + /* Same as generic_file_direct_IO() */
622 + unmap_mapping_range(filp->f_mapping, 0, ~0UL, 0);
624 + r = filemap_write_and_wait(filp->f_mapping);
629 + * This will remove all pages except dirty ones.
630 + * If there are dirty pages at this point, it means that the user
631 + * is writing to the file and the coherency is lost anyway.
632 + * If the user was writing to the file simultaneously, this
633 + * returns non-zero, but we ignore that.
635 + invalidate_inode_pages2_range(filp->f_mapping, 0, ~0UL);
641 + * Acquire or release a "no-truncate" lock on *filp.
642 + * We overload the S_SWAPFILE flag for loop targets because
643 + * it provides the same no-truncate semantics we require, and
644 + * holding onto i_sem is no longer an option.
646 +static void file_truncate_lock(struct file *filp)
648 + struct inode *inode = filp->f_mapping->host;
650 + mutex_lock(&inode->i_mutex);
651 + inode->i_flags |= S_SWAPFILE;
652 + mutex_unlock(&inode->i_mutex);
655 +static void file_truncate_unlock(struct file *filp)
657 + struct inode *inode = filp->f_mapping->host;
659 + mutex_lock(&inode->i_mutex);
660 + inode->i_flags &= ~S_SWAPFILE;
661 + mutex_unlock(&inode->i_mutex);
665 + * Fill out split_io for taget backing store
667 +static void set_split_io(struct dm_target *ti)
669 + struct loop_c *lc = ti->private;
671 + if (test_bit(DM_LOOP_BMAP, &lc->flags))
672 + /* Split I/O at block boundaries */
673 + ti->split_io = 1 << (lc->blkbits - SECTOR_SHIFT);
677 + DMDEBUG("splitting io at %llu sector boundaries",
678 + (unsigned long long) ti->split_io);
682 + * Check that the loop file is regular and available.
684 +static int loop_check_file(struct dm_target *ti)
686 + struct loop_c *lc = ti->private;
687 + struct file *filp = lc->filp;
688 + struct inode *inode = filp->f_mapping->host;
693 + ti->error = "backing file must be a regular file";
694 + if (!S_ISREG(inode->i_mode))
697 + ti->error = "backing file is mapped into userspace for writing";
698 + if (mapping_writably_mapped(filp->f_mapping))
701 + if (mapping_mapped(filp->f_mapping))
702 + DMWARN("%s is mapped into userspace", lc->path);
704 + if (!inode->i_sb || !inode->i_sb->s_bdev) {
705 + DMWARN("%s has no blockdevice - switching to filesystem I/O",
707 + clear_bit(DM_LOOP_BMAP, &lc->flags);
708 + set_bit(DM_LOOP_FSIO, &lc->flags);
711 + ti->error = "backing file already in use";
712 + if (IS_SWAPFILE(inode))
719 + * Check loop file size and store it in the loop context
721 +static int loop_setup_size(struct dm_target *ti)
723 + struct loop_c *lc = ti->private;
724 + struct inode *inode = lc->filp->f_mapping->host;
727 + lc->size = i_size_read(inode);
728 + lc->blkbits = inode->i_blkbits;
730 + ti->error = "backing file is empty";
734 + DMDEBUG("set backing file size to %llu", (unsigned long long) lc->size);
736 + ti->error = "backing file cannot be less than one block in size";
737 + if (lc->size < (blk2sect(lc, 1) << 9))
740 + ti->error = "loop file offset must be a multiple of fs blocksize";
741 + if (lc->offset & ((1 << lc->blkbits) - 1))
744 + ti->error = "loop file offset too large";
745 + if (lc->offset > (lc->size - (1 << 9)))
748 + lc->mapped_sectors = (lc->size - lc->offset) >> 9;
749 + DMDEBUG("set mapped sectors to %llu (%llu bytes)",
750 + (unsigned long long) lc->mapped_sectors,
751 + (lc->size - lc->offset));
753 + if ((lc->offset + (lc->mapped_sectors << 9)) < lc->size)
754 + DMWARN("not using %llu bytes in incomplete block at EOF",
755 + lc->size - (lc->offset + (lc->mapped_sectors << 9)));
757 + ti->error = "mapped region cannot be smaller than target size";
758 + if (lc->size - lc->offset < (ti->len << 9))
768 + * release a loop file
770 +static void loop_put_file(struct file *filp)
775 + file_truncate_unlock(filp);
776 + filp_close(filp, NULL);
780 + * Open loop file and perform type, availability and size checks.
782 +static int loop_get_file(struct dm_target *ti)
784 + int flags = ((dm_table_get_mode(ti->table) & FMODE_WRITE) ?
785 + O_RDWR : O_RDONLY) | O_LARGEFILE;
786 + struct loop_c *lc = ti->private;
790 + ti->error = "could not open backing file";
791 + filp = filp_open(lc->path, flags, 0);
793 + return PTR_ERR(filp);
795 + r = loop_check_file(ti);
799 + r = loop_setup_size(ti);
803 + file_truncate_lock(filp);
812 + * invalidate mapped pages belonging to the loop file
814 +static void loop_flush(struct dm_target *ti)
816 + struct loop_c *lc = ti->private;
818 + loop_invalidate_file(lc->filp);
821 +/*--------------------------------------------------------------------
822 + * Device-mapper target methods
823 + *--------------------------------------------------------------------*/
826 + * Generic loop map function. Re-base I/O to target begin and submit
828 +static int loop_map(struct dm_target *ti, struct bio *bio,
829 + union map_info *context)
831 + struct loop_c *lc = ti->private;
833 + if (unlikely(bio_barrier(bio)))
834 + return -EOPNOTSUPP;
836 + bio->bi_sector -= ti->begin;
839 + return lc->map_fn(ti, bio);
845 + * Block status helper
847 +static ssize_t loop_file_status(struct loop_c *lc, char *result,
851 + struct file_map_c *fc = lc->map_data;
854 + spin_lock_irq(&fc->lock);
855 + qlen = bio_list_size(&fc->work);
856 + qlen += bio_list_size(&fc->in);
857 + spin_unlock_irq(&fc->lock);
859 + DMEMIT("file %d", qlen);
865 + * File status helper
867 +static ssize_t loop_block_status(struct loop_c *lc, char *result,
871 + struct block_map_c *bc = lc->map_data;
874 + spin_lock_irq(&bc->mru_lock);
875 + mru = bc->mru - bc->map;
876 + spin_unlock_irq(&bc->mru_lock);
878 + DMEMIT("block %d %d", bc->nr_extents, mru);
884 + * This needs some thought on handling unlinked backing files. some parts of
885 + * the kernel return a cached name (now invalid), while others return a dcache
886 + * "/path/to/foo (deleted)" name (never was/is valid). Which is "better" is
889 + * On the one hand, using a cached name gives table output which is directly
890 + * usable assuming the user re-creates the unlinked image file, on the other
891 + * it is more consistent with e.g. swap to use the dcache name.
894 +static int loop_status(struct dm_target *ti, status_type_t type, char *result,
897 + struct loop_c *lc = ti->private;
901 + case STATUSTYPE_INFO:
902 + if (test_bit(DM_LOOP_BMAP, &lc->flags))
903 + sz += loop_block_status(lc, result, maxlen - sz);
904 + else if (test_bit(DM_LOOP_FSIO, &lc->flags))
905 + sz += loop_file_status(lc, result, maxlen - sz);
908 + case STATUSTYPE_TABLE:
909 + DMEMIT("%s %llu", lc->path, lc->offset);
916 + * Destroy a loopback mapping
918 +static void loop_dtr(struct dm_target *ti)
920 + struct loop_c *lc = ti->private;
922 + if ((dm_table_get_mode(ti->table) & FMODE_WRITE))
923 + loop_invalidate_file(lc->filp);
925 + if (test_bit(DM_LOOP_BMAP, &lc->flags) && lc->map_data)
926 + destroy_block_map((struct block_map_c *)lc->map_data);
927 + if (test_bit(DM_LOOP_FSIO, &lc->flags) && lc->map_data)
928 + destroy_file_map(lc);
930 + loop_put_file(lc->filp);
931 + DMINFO("released file %s", lc->path);
937 + * Construct a loopback mapping: <path> <offset>
939 +static int loop_ctr(struct dm_target *ti, unsigned argc, char **argv)
941 + struct loop_c *lc = NULL;
944 + ti->error = "invalid argument count";
949 + ti->error = "cannot allocate loop context";
950 + lc = kzalloc(sizeof(*lc), GFP_KERNEL);
955 + set_bit(DM_LOOP_BMAP, &lc->flags);
956 + ti->error = "cannot allocate loop path";
957 + lc->path = kstrdup(argv[0], GFP_KERNEL);
964 + ti->error = "invalid file offset";
965 + if (sscanf(argv[1], "%lld", &lc->offset) != 1)
969 + DMDEBUG("setting file offset to %lld", lc->offset);
971 + /* open & check file and set size parameters */
972 + r = loop_get_file(ti);
974 + /* ti->error has been set by loop_get_file */
978 + ti->error = "could not create loop mapping";
979 + if (test_bit(DM_LOOP_BMAP, &lc->flags))
980 + r = setup_block_map(lc, lc->filp->f_mapping->host);
981 + if (test_bit(DM_LOOP_FSIO, &lc->flags))
982 + r = setup_file_map(lc);
987 + loop_invalidate_file(lc->filp);
991 + dm_set_device_limits(ti, lc->bdev);
993 + DMDEBUG("constructed loop target on %s "
994 + "(%lldk, %llu sectors)", lc->path,
995 + (lc->size >> 10), (unsigned long long)lc->mapped_sectors);
1001 + loop_put_file(lc->filp);
1007 +static struct target_type loop_target = {
1009 + .version = {0, 0, 2},
1010 + .module = THIS_MODULE,
1014 + .presuspend = loop_flush,
1015 + .flush = loop_flush,
1016 + .status = loop_status,
1019 +/*--------------------------------------------------------------------
1021 + *--------------------------------------------------------------------*/
1022 +static int __init dm_loop_init(void)
1026 + r = dm_register_target(&loop_target);
1028 + DMERR("register failed %d", r);
1033 + dm_loop_extent_cache = KMEM_CACHE(dm_loop_extent, SLAB_HWCACHE_ALIGN);
1034 + if (!dm_loop_extent_cache)
1037 + DMINFO("version %u.%u.%u loaded",
1038 + loop_target.version[0], loop_target.version[1],
1039 + loop_target.version[2]);
1044 + if (dm_loop_extent_cache)
1045 + kmem_cache_destroy(dm_loop_extent_cache);
1050 +static void __exit dm_loop_exit(void)
1054 + r = dm_unregister_target(&loop_target);
1055 + kmem_cache_destroy(dm_loop_extent_cache);
1058 + DMERR("target unregister failed %d", r);
1060 + DMINFO("version %u.%u.%u unloaded",
1061 + loop_target.version[0], loop_target.version[1],
1062 + loop_target.version[2]);
1065 +module_init(dm_loop_init);
1066 +module_exit(dm_loop_exit);
1068 +MODULE_LICENSE("GPL");
1069 +MODULE_AUTHOR("Bryn Reeves <breeves@redhat.com>");
1070 +MODULE_DESCRIPTION("device-mapper loop target");