Knock out the underscore prefix to static fns.
--- diff/drivers/md/dm-mpath.c	2003-12-29 10:15:50.000000000 +0000
+++ source/drivers/md/dm-mpath.c	2003-12-29 10:15:56.000000000 +0000
@@ -77,7 +77,7 @@
 	int reactivation_interval;	/* Automatic reactivation interval */
 	int fail_max;			/* Maximum failures allowed */
 
-	typeof(jiffies) io_jiffies;	/* Jiffies of last IO queued */
+	jiffy_t io_jiffies;	/* Jiffies of last IO queued */
 	atomic_t fail;			/* actual failure count vs. fail_max */
 	atomic_t fail_total;		/* Total failures on this path */
 
@@ -92,19 +92,19 @@
  */
 
 /* Set/Rretrieve jiffies of last IO on this path */
-static inline void _set_io_jiffies(struct path *path)
+static inline void set_io_jiffies(struct path *path)
 {
 	path->io_jiffies = jiffies;
 }
 
-static inline typeof(jiffies) _get_io_jiffies(struct path *path)
+static inline jiffy_t get_io_jiffies(struct path *path)
 {
 	return path->io_jiffies;
 }
 
 /* "Queue" an event on a table in order to process
    dm_table_event() calls in task context */
-static inline void _queue_table_event(struct multipath_io *io)
+static inline void queue_table_event(struct multipath_io *io)
 {
 	struct multipath_c *mc = (struct multipath_c *) io->mc;
 
@@ -112,28 +112,28 @@
 }
 
 /* Check path failed */
-static inline int _is_failed(struct path *path)
+static inline int is_failed(struct path *path)
 {
 	return test_bit(FAILED, &path->flags);
 }
 
 /* Set a path to "failed" */
-static inline void _set_failed(struct multipath_io *io)
+static inline void set_failed(struct multipath_io *io)
 {
 	struct path *path = io->path;
 	struct path_selector *ps = &path->mc->ps;
 
-	if (_is_failed(path))
+	if (is_failed(path))
 		return;
 
 	atomic_inc(&path->fail_total);
 	io->path->test_sector = io->bh->b_rsector;
 	ps->type->set_path_state(path->ps_private, 1);
-	_queue_table_event(io);
+	queue_table_event(io);
 }
 
 /* Reset failure information on a path */
-static inline void _reset_failures(struct path *path)
+static inline void reset_failures(struct path *path)
 {
 	struct path_selector *ps = &path->mc->ps;
 
@@ -146,47 +146,47 @@
 /* Reset a "failed" path
  * (IOW: set it to operational so that it can be selected for IO submission)
  */
-static inline void _reset_failed(struct multipath_io *io)
+static inline void reset_failed(struct multipath_io *io)
 {
 	struct path *path = io->path;
 
 	if (_is_failed(path)) {
-		_reset_failures(path);
-		_queue_table_event(io);
+		reset_failures(path);
+		queue_table_event(io);
 	}
 }
 
 /* Scrub IO handling */
-static inline void _reset_scrub_io(struct path *path)
+static inline void reset_scrub_io(struct path *path)
 {
 	clear_bit(SCRUB_IO, &path->flags);
 }
 
 
 /* Scrub timeout calculation */
-static inline unsigned long _get_reactivation_timeout(struct path *path)
+static inline unsigned long get_reactivation_timeout(struct path *path)
 {
 	return path->reactivation_interval * HZ;
 }
 
-static inline unsigned long _get_scrub_timeout(struct path *path)
+static inline unsigned long get_scrub_timeout(struct path *path)
 {
 	return path->mc->scrub_interval * HZ;
 }
 
 /* Calculate scrubbing sleep timeout for deamon */
-static inline int _scrub_timeout(struct path *path, long *timeout)
+static inline int scrub_timeout(struct path *path, long *timeout)
 {
 	int ret = 0;
-	typeof(jiffies) j = _get_io_jiffies(path);
-	typeof(jiffies) t = _is_failed(path) ? _get_reactivation_timeout(path) :
-					       _get_scrub_timeout(path);
+	jiffy_t j = get_io_jiffies(path);
+	jiffy_t t = is_failed(path) ? get_reactivation_timeout(path) :
+		get_scrub_timeout(path);
 
 	if (t) {
 		/* Jiffies wrap around check */
 		if (jiffies < j) {
 			*timeout = HZ;
-			_set_io_jiffies(path);
+			set_io_jiffies(path);
 			return 1;
 		}
 
@@ -237,9 +237,9 @@
 }
 
 
-/*
+/*-----------------------------------------------------------------
  * IO job allocation/deallocation
- */
+ *---------------------------------------------------------------*/
 
 /* Slab for the io jobs */
 static kmem_cache_t *_multipath_cache;
@@ -261,7 +261,7 @@
 }
 
 /* Multipath context allocation */
-static inline struct multipath_c *_alloc_context(void)
+static inline struct multipath_c *alloc_context(void)
 {
 	struct multipath_c *mc = kmalloc(sizeof(*mc), GFP_KERNEL);
 
@@ -279,7 +279,7 @@
 }
 
 /* Path context allocation */
-static inline struct path *_alloc_path(void)
+static inline struct path *alloc_path(void)
 {
 	struct path *path = kmalloc(sizeof(*path), GFP_KERNEL);
 
@@ -291,7 +291,7 @@
 	return path;
 }
 
-static void _free_context(struct multipath_c *mc)
+static void free_context(struct multipath_c *mc)
 {
 	struct list_head *elem, *tmp;
 	struct path_selector *ps = &mc->ps;
@@ -325,14 +325,14 @@
 static spinlock_t _job_lock = SPIN_LOCK_UNLOCKED;
 
 /* Submit an IO and store the IO timestamp */
-static inline void _make_request(struct multipath_io *io)
+static inline void make_request(struct multipath_io *io)
 {
-	_set_io_jiffies(io->path);
+	set_io_jiffies(io->path);
 	generic_make_request(io->rw, io->bh);
 }
 
 /* Requeue error ios */
-static inline void _do_ios(void)
+static inline void do_ios(void)
 {
 	unsigned long flags;
 	struct multipath_c *mc;
@@ -341,7 +341,7 @@
 	spin_lock_irqsave(&_job_lock, flags);
 	list_for_each_entry(mc, &_mc_jobs, list) {
 		while ((io = pop(&mc->io_jobs, &mc->lock)))
-			_make_request(io);
+			make_request(io);
 	}
 	spin_unlock_irqrestore(&_job_lock, flags);
 
@@ -349,7 +349,7 @@
 }
 
 /* Work all table events thrown */
-static inline void _do_table_events(void)
+static inline void do_table_events(void)
 {
 	unsigned long flags;
 	struct multipath_c *mc;
@@ -368,7 +368,7 @@
 }
 
 /* Allocate a scrubing IO buffer_head and page */
-static inline struct buffer_head *_alloc_scrub_bh(void)
+static inline struct buffer_head *alloc_scrub_bh(void)
 {
 	struct buffer_head *bh = kmalloc(sizeof(*bh), GFP_NOIO);
 
@@ -393,7 +393,7 @@
 }
 
 /* Free a scrubing IO page and buffer_head */
-static inline void _free_scrub_bh(struct buffer_head *bh)
+static inline void free_scrub_bh(struct buffer_head *bh)
 {
 	UnlockPage(bh->b_page);
 	__free_page(bh->b_page);
@@ -410,14 +410,14 @@
 		unsigned long flags;
 
 		spin_lock_irqsave(&mc->lock, flags);
-		_reset_failed(io);
+		reset_failed(io);
 		spin_unlock_irqrestore(&mc->lock, flags);
 
 		dm_daemon_wake(&_kmultipathd);
 	}
 
-	_reset_scrub_io(io->path);
-	_free_scrub_bh(io->bh);
+	reset_scrub_io(io->path);
+	free_scrub_bh(io->bh);
 	free_io(io);
 }
 
@@ -430,7 +430,7 @@
  *	1: scrub IO queued
  *
  */
-static inline int _queue_scrub_io(struct path *path)
+static inline int queue_scrub_io(struct path *path)
 {
 	struct multipath_io *io;
 	struct buffer_head *bh;
@@ -438,7 +438,7 @@
 	if (test_and_set_bit(SCRUB_IO, &path->flags))
 		goto out;
 
-	bh = _alloc_scrub_bh();
+	bh = alloc_scrub_bh();
 	if (!bh)
 		goto retry;	/* just retry later */
 
@@ -457,13 +457,13 @@
 	bh->b_end_io = multipath_scrub_end_io;
 	bh->b_private = io;
 
-	_make_request(io);
+	make_request(io);
 	run_task_queue(&tq_disk);
 
 	return 1;
 
 retry:
-	_reset_scrub_io(path);
+	reset_scrub_io(path);
 
 out:
 	return 0;
@@ -489,8 +489,8 @@
 			continue;
 
 		list_for_each_entry(path, &mc->paths, list) {
-			if (_scrub_timeout(path, &timeout))
-				_queue_scrub_io(path);
+			if (scrub_timeout(path, &timeout))
+				queue_scrub_io(path);
 		}
 	}
 	spin_unlock_irqrestore(&_job_lock, flags);
@@ -573,7 +573,7 @@
 	if (MIN_PARMS + paths * parms != argc)
 		goto bad_parms;
 
-	mc = _alloc_context();
+	mc = alloc_context();
 	if (!mc)
 		goto bad_context;
 
@@ -591,7 +591,7 @@
 	for (a = MIN_PARMS; a < argc; a += parms, av += parms) {
 		void *path_c;
 
-		path = _alloc_path();
+		path = alloc_path();
 		if (!path)
 			goto bad_alloc_path;
 
@@ -614,7 +614,7 @@
 
 		path->ps_private = path_c;
 		path->mc = mc;
-		_reset_failures(path);
+		reset_failures(path);
 	}
 
 	ti->private = mc;
@@ -637,33 +637,33 @@
 	return -ENOMEM;
 
 bad_ps:
-	_free_context(mc);
+	free_context(mc);
 	ti->error = "dm-multipath: invalid path selector";
 	return -EINVAL;
 
 bad_ps_ctr:
-	_free_context(mc);
+	free_context(mc);
 	ti->error = "dm-multipath: error path selector constructor";
        	return -ENXIO;
 
 bad_alloc_path:
-	_free_context(mc);
+	free_context(mc);
 	ti->error = "dm-multipath: can't allocate path context";
 	return -ENOMEM;
 
 bad_dm_get_device:
-	_free_context(mc);
+	free_context(mc);
 	ti->error = "dm-multipath: error getting device";
        	return -ENXIO;
 
 bad_ps_add:
-	_free_context(mc);
+	free_context(mc);
 	ti->error = "dm-multipath: error add path";
        	return -ENXIO;
 }
 #undef xx
 
-static void _wait_for_scrub_ios(struct multipath_c *mc)
+static void wait_for_scrub_ios(struct multipath_c *mc)
 {
 	struct path *path;
 
@@ -673,7 +673,7 @@
 	}
 }
 
-static inline void _remove_mc_job(struct multipath_c *mc)
+static inline void remove_mc_job(struct multipath_c *mc)
 {
 	unsigned long flags;
 	struct multipath_c *mc_tmp;
@@ -698,11 +698,11 @@
 	_free_context(mc);
 }
 
-static inline void _map(struct multipath_io *io, struct path *path)
+static inline void map(struct multipath_io *io, struct path *path)
 {
 	io->path = path;
 	io->bh->b_rdev = path->dev->dev;
-	_set_io_jiffies(path);
+	set_io_jiffies(path);
 }
 
 static int multipath_end_io(struct dm_target *ti, struct buffer_head *bh,
@@ -718,12 +718,12 @@
 
 	if (error) {
 		if (atomic_dec_and_test(&path->fail))
-			_set_failed(io);
+			set_failed(io);
 
 		path = pst->select_path(ps, io->bh, io->rw, &io->path_context);
 		if (path) {
 			/* Map the IO to this new path */
-			_map(io, path);
+			map(io, path);
 			push(&mc->io_jobs, &io->list, &mc->lock);
 			dm_daemon_wake(&_kmultipathd);
 
@@ -746,7 +746,7 @@
 	struct multipath_c *mc = (struct multipath_c *) ti->private;
 
 	atomic_set(&mc->suspended, 1);
-	_wait_for_scrub_ios(mc);
+	wait_for_scrub_ios(mc);
 }
 
 /* Resume */
@@ -778,7 +778,7 @@
 	io->bh = bh;
 	io->rw = rw;
 
-	_map(io, path);			/* Map the IO to this path */
+	map(io, path);			/* Map the IO to this path */
 	map_context->ptr = (void *) io;	/* Save for multipath_end_io() */
 
 	return 1;	/* Normal map */