From: Milan Broz <mbroz@redhat.com>

Remove clone_info struct and replace it with direct references to the bio.

It's no longer needed now that we never split into more than two parts.

Signed-off-by: Milan Broz <mbroz@redhat.com>
---
 drivers/md/dm.c |  103 +++++++++++++++++++++-----------------------------------
 1 files changed, 40 insertions(+), 63 deletions(-)

Index: current-quilt/drivers/md/dm.c
===================================================================
--- current-quilt.orig/drivers/md/dm.c	2007-07-25 21:04:52.000000000 +0100
+++ current-quilt/drivers/md/dm.c	2007-07-25 21:04:53.000000000 +0100
@@ -618,16 +618,6 @@ static void __map_bio(struct dm_target *
 	}
 }
 
-struct clone_info {
-	struct mapped_device *md;
-	struct dm_table *map;
-	struct bio *bio;
-	struct dm_io *io;
-	sector_t sector;
-	sector_t sector_count;
-	unsigned short idx;
-};
-
 static void dm_bio_destructor(struct bio *bio)
 {
 	struct mapped_device *md = bio->bi_private;
@@ -694,46 +684,36 @@ static struct bio *clone2_bio(struct bio
 	return clone;
 }
 
-static void __clone_and_map(struct clone_info *ci)
+static void __clone_and_map(struct bio *bio, struct dm_io *io,
+			    struct dm_target_io *tio)
 {
-	struct bio *clone, *bio = ci->bio;
-	struct bio *clone2 = NULL;
-	struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
-	sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
-	struct dm_target_io *tio;
-
-	/*
-	 * Allocate a target io object.
-	 */
-	tio = alloc_tio(ci->md);
-	tio->io = ci->io;
-	tio->ti = ti;
-	memset(&tio->info, 0, sizeof(tio->info));
+	struct bio *clone, *clone2 = NULL;
+	sector_t max = max_io_len(io->md, bio->bi_sector, tio->ti);
 
 	/* Merge page function should prevent split and in ideal
 	 * situation do not allow splitting at all.
 	 * Only very inefficient mapping should cause split to more
 	 * than 2 pieces, no need to extra optimize this case.
 	 */
-	if (ci->sector_count <= max) {
+	if (bio_sectors(bio) <= max) {
 		/*
 		 * Optimise for the simple case where we can do all of
 		 * the remaining io with a single clone.
 		 */
-		clone = clone_bio(bio, ci->sector, ci->idx,
-				  bio->bi_vcnt - ci->idx,
-				  ci->sector_count, ci->md->bs);
+		clone = clone_bio(bio, bio->bi_sector, bio->bi_idx,
+				  bio->bi_vcnt - bio->bi_idx,
+				  bio_sectors(bio), io->md->bs);
 
-	} else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
+	} else if (bio_cur_sectors(bio) <= max) {
 		/*
 		 * There are some bvecs that don't span targets.
 		 * Do as many of these as possible.
 		 */
 		int i;
 		sector_t remaining = max;
-		sector_t bv_len;
+		sector_t bv_len, len = 0;
 
-		for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
+		for (i = bio->bi_idx; remaining && (i < bio->bi_vcnt); i++) {
 			bv_len = to_sector(bio->bi_io_vec[i].bv_len);
 
 			if (bv_len > remaining)
@@ -743,29 +723,24 @@ static void __clone_and_map(struct clone
 			len += bv_len;
 		}
 
-		clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
-				  ci->md->bs);
-
-		ci->sector += len;
-		ci->sector_count -= len;
-		ci->idx = i;
+		clone = clone_bio(bio, bio->bi_sector, bio->bi_idx,
+				  i - bio->bi_idx, len, io->md->bs);
 
 		/* Clone second part of bio */
-		clone2 = clone2_bio(bio, ci->sector, ci->idx, ci->sector_count, ci->md);
+		clone2 = clone2_bio(bio, bio->bi_sector + len, i,
+				    bio_sectors(bio) - len, io->md);
 	} else {
 		/*
 		 * Handle a bvec that must be split between two or more targets.
 		 */
-		struct bio_vec *bv = bio->bi_io_vec + ci->idx;
-
-		clone = split_bvec(bio, ci->sector, ci->idx, bv->bv_offset, max,
-				   ci->md->bs);
+		struct bio_vec *bv = bio->bi_io_vec + bio->bi_idx;
 
-		ci->sector += max;
-		ci->sector_count -= max;
+		clone = split_bvec(bio, bio->bi_sector, bio->bi_idx,
+				   bv->bv_offset, max, io->md->bs);
 
 		/* Clone second part of bio */
-		clone2 = clone2_bio(bio, ci->sector, ci->idx, ci->sector_count, ci->md);
+		clone2 = clone2_bio(bio, bio->bi_sector + max, bio->bi_idx,
+				    bio_sectors(bio) - max, io->md);
 		bv = clone2->bi_io_vec + clone2->bi_idx;
 		bv->bv_len -= to_bytes(max);
 		bv->bv_offset += to_bytes(max);
@@ -775,7 +750,7 @@ static void __clone_and_map(struct clone
 	 * Fire off both parts of bio, the first will be remmaped and
 	 * the second is queued for new dm_request to the same device.
 	 */
-	__map_bio(ti, clone, tio);
+	__map_bio(tio->ti, clone, tio);
 	if (clone2)
 		generic_make_request(clone2);
 }
@@ -785,32 +760,34 @@ static void __clone_and_map(struct clone
  */
 static int __split_bio(struct mapped_device *md, struct bio *bio)
 {
-	struct clone_info ci;
+	struct dm_table *map = dm_get_table(md);
+	struct dm_io *io;
+	struct dm_target_io *tio;
 
-	ci.map = dm_get_table(md);
-	if (unlikely(!ci.map))
+	if (unlikely(!map))
 		return -EIO;
 
-	ci.md = md;
-	ci.bio = bio;
-	ci.io = alloc_io(md);
-	ci.io->error = 0;
-	atomic_set(&ci.io->io_count, 1);
-	ci.io->bio = bio;
-	ci.io->md = md;
-	ci.sector = bio->bi_sector;
-	ci.sector_count = bio_sectors(bio);
-	ci.idx = bio->bi_idx;
+	io = alloc_io(md);
+	io->error = 0;
+	atomic_set(&io->io_count, 1);
+	io->bio = bio;
+	io->md = md;
+
+	tio = alloc_tio(md);
+	tio->io = io;
+	tio->ti = dm_table_find_target(map, bio->bi_sector);
+	memset(&tio->info, 0, sizeof(tio->info));
 
-	start_io_acct(ci.io);
-	__clone_and_map(&ci);
+	start_io_acct(io);
+	__clone_and_map(bio, io, tio);
 
 	/* drop the extra reference count */
-	dec_pending(ci.io, 0);
-	dm_table_put(ci.map);
+	dec_pending(io, 0);
+	dm_table_put(map);
 
 	return 0;
 }
+
 /*-----------------------------------------------------------------
  * CRUD END
  *---------------------------------------------------------------*/