2005-04-17 02:20:36 +04:00
/*
* Block device elevator / IO - scheduler .
*
* Copyright ( C ) 2000 Andrea Arcangeli < andrea @ suse . de > SuSE
*
2006-09-04 17:41:16 +04:00
* 30042000 Jens Axboe < axboe @ kernel . dk > :
2005-04-17 02:20:36 +04:00
*
* Split the elevator a bit so that it is possible to choose a different
* one or even write a new " plug in " . There are three pieces :
* - elevator_fn , inserts a new request in the queue list
* - elevator_merge_fn , decides whether a new buffer can be merged with
* an existing request
* - elevator_dequeue_fn , called when a request is taken off the active list
*
* 20082000 Dave Jones < davej @ suse . de > :
* Removed tests for max - bomb - segments , which was breaking elvtune
* when run without - bN
*
* Jens :
* - Rework again to work with bio instead of buffer_heads
* - loose bi_dev comparisons , partition handling is right now
* - completely modularize elevator setup and teardown
*
*/
# include <linux/kernel.h>
# include <linux/fs.h>
# include <linux/blkdev.h>
# include <linux/elevator.h>
# include <linux/bio.h>
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/init.h>
# include <linux/compiler.h>
2005-10-28 10:29:39 +04:00
# include <linux/delay.h>
2006-03-23 22:00:26 +03:00
# include <linux/blktrace_api.h>
2006-07-28 11:23:08 +04:00
# include <linux/hash.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
static DEFINE_SPINLOCK ( elv_list_lock ) ;
static LIST_HEAD ( elv_list ) ;
2006-07-28 11:23:08 +04:00
/*
* Merge hash stuff .
*/
static const int elv_hash_shift = 6 ;
# define ELV_HASH_BLOCK(sec) ((sec) >> 3)
# define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
# define ELV_HASH_ENTRIES (1 << elv_hash_shift)
# define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
# define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
2006-12-20 13:04:12 +03:00
/*
* Query io scheduler to see if the current process issuing bio may be
* merged with rq .
*/
static int elv_iosched_allow_merge ( struct request * rq , struct bio * bio )
{
request_queue_t * q = rq - > q ;
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_allow_merge_fn )
return e - > ops - > elevator_allow_merge_fn ( q , rq , bio ) ;
return 1 ;
}
2005-04-17 02:20:36 +04:00
/*
* can we safely merge with this request ?
*/
inline int elv_rq_merge_ok ( struct request * rq , struct bio * bio )
{
if ( ! rq_mergeable ( rq ) )
return 0 ;
/*
* different data direction or already started , don ' t merge
*/
if ( bio_data_dir ( bio ) ! = rq_data_dir ( rq ) )
return 0 ;
/*
2006-12-20 13:04:12 +03:00
* must be same device and not a special request
2005-04-17 02:20:36 +04:00
*/
2006-12-21 23:20:01 +03:00
if ( rq - > rq_disk ! = bio - > bi_bdev - > bd_disk | | rq - > special )
2006-12-20 13:04:12 +03:00
return 0 ;
if ( ! elv_iosched_allow_merge ( rq , bio ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2006-12-20 13:04:12 +03:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( elv_rq_merge_ok ) ;
2005-12-28 12:55:49 +03:00
static inline int elv_try_merge ( struct request * __rq , struct bio * bio )
2005-04-17 02:20:36 +04:00
{
int ret = ELEVATOR_NO_MERGE ;
/*
* we can merge and sequence is ok , check if it ' s possible
*/
if ( elv_rq_merge_ok ( __rq , bio ) ) {
if ( __rq - > sector + __rq - > nr_sectors = = bio - > bi_sector )
ret = ELEVATOR_BACK_MERGE ;
else if ( __rq - > sector - bio_sectors ( bio ) = = bio - > bi_sector )
ret = ELEVATOR_FRONT_MERGE ;
}
return ret ;
}
static struct elevator_type * elevator_find ( const char * name )
{
2006-10-11 11:24:27 +04:00
struct elevator_type * e ;
2005-04-17 02:20:36 +04:00
2007-07-10 14:26:24 +04:00
list_for_each_entry ( e , & elv_list , list ) {
2006-10-11 11:24:27 +04:00
if ( ! strcmp ( e - > elevator_name , name ) )
return e ;
2005-04-17 02:20:36 +04:00
}
2006-10-11 11:24:27 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
}
static void elevator_put ( struct elevator_type * e )
{
module_put ( e - > elevator_owner ) ;
}
static struct elevator_type * elevator_get ( const char * name )
{
2005-10-20 12:56:41 +04:00
struct elevator_type * e ;
2005-04-17 02:20:36 +04:00
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2005-10-20 12:56:41 +04:00
e = elevator_find ( name ) ;
if ( e & & ! try_module_get ( e - > elevator_owner ) )
e = NULL ;
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
return e ;
}
2006-06-08 10:49:06 +04:00
static void * elevator_init_queue ( request_queue_t * q , struct elevator_queue * eq )
2005-04-17 02:20:36 +04:00
{
2006-12-01 12:42:33 +03:00
return eq - > ops - > elevator_init_fn ( q ) ;
2006-06-08 10:49:06 +04:00
}
2005-04-17 02:20:36 +04:00
2006-06-08 10:49:06 +04:00
static void elevator_attach ( request_queue_t * q , struct elevator_queue * eq ,
void * data )
{
2005-04-17 02:20:36 +04:00
q - > elevator = eq ;
2006-06-08 10:49:06 +04:00
eq - > elevator_data = data ;
2005-04-17 02:20:36 +04:00
}
static char chosen_elevator [ 16 ] ;
2006-01-24 12:07:58 +03:00
static int __init elevator_setup ( char * str )
2005-04-17 02:20:36 +04:00
{
2006-01-16 11:47:37 +03:00
/*
* Be backwards - compatible with previous kernels , so users
* won ' t get the wrong elevator .
*/
2006-01-24 12:07:58 +03:00
if ( ! strcmp ( str , " as " ) )
2006-01-16 11:47:37 +03:00
strcpy ( chosen_elevator , " anticipatory " ) ;
2005-11-09 15:24:20 +03:00
else
2006-01-24 12:07:58 +03:00
strncpy ( chosen_elevator , str , sizeof ( chosen_elevator ) - 1 ) ;
2006-03-31 14:30:33 +04:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
__setup ( " elevator= " , elevator_setup ) ;
2006-03-19 02:35:43 +03:00
static struct kobj_type elv_ktype ;
2006-07-20 01:39:40 +04:00
static elevator_t * elevator_alloc ( request_queue_t * q , struct elevator_type * e )
2006-03-19 02:35:43 +03:00
{
2006-07-28 11:23:08 +04:00
elevator_t * eq ;
int i ;
2006-07-20 01:39:40 +04:00
eq = kmalloc_node ( sizeof ( elevator_t ) , GFP_KERNEL , q - > node ) ;
2006-07-28 11:23:08 +04:00
if ( unlikely ( ! eq ) )
goto err ;
memset ( eq , 0 , sizeof ( * eq ) ) ;
eq - > ops = & e - > ops ;
eq - > elevator_type = e ;
kobject_init ( & eq - > kobj ) ;
snprintf ( eq - > kobj . name , KOBJ_NAME_LEN , " %s " , " iosched " ) ;
eq - > kobj . ktype = & elv_ktype ;
mutex_init ( & eq - > sysfs_lock ) ;
2006-07-20 01:39:40 +04:00
eq - > hash = kmalloc_node ( sizeof ( struct hlist_head ) * ELV_HASH_ENTRIES ,
GFP_KERNEL , q - > node ) ;
2006-07-28 11:23:08 +04:00
if ( ! eq - > hash )
goto err ;
for ( i = 0 ; i < ELV_HASH_ENTRIES ; i + + )
INIT_HLIST_HEAD ( & eq - > hash [ i ] ) ;
2006-03-19 02:35:43 +03:00
return eq ;
2006-07-28 11:23:08 +04:00
err :
kfree ( eq ) ;
elevator_put ( e ) ;
return NULL ;
2006-03-19 02:35:43 +03:00
}
static void elevator_release ( struct kobject * kobj )
{
elevator_t * e = container_of ( kobj , elevator_t , kobj ) ;
2006-07-28 11:23:08 +04:00
2006-03-19 02:35:43 +03:00
elevator_put ( e - > elevator_type ) ;
2006-07-28 11:23:08 +04:00
kfree ( e - > hash ) ;
2006-03-19 02:35:43 +03:00
kfree ( e ) ;
}
2005-04-17 02:20:36 +04:00
int elevator_init ( request_queue_t * q , char * name )
{
struct elevator_type * e = NULL ;
struct elevator_queue * eq ;
int ret = 0 ;
2006-06-08 10:49:06 +04:00
void * data ;
2005-04-17 02:20:36 +04:00
2005-10-28 10:29:39 +04:00
INIT_LIST_HEAD ( & q - > queue_head ) ;
q - > last_merge = NULL ;
q - > end_sector = 0 ;
q - > boundary_rq = NULL ;
2006-01-24 12:07:58 +03:00
if ( name & & ! ( e = elevator_get ( name ) ) )
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2006-01-24 12:09:14 +03:00
if ( ! e & & * chosen_elevator & & ! ( e = elevator_get ( chosen_elevator ) ) )
printk ( " I/O scheduler %s not found \n " , chosen_elevator ) ;
if ( ! e & & ! ( e = elevator_get ( CONFIG_DEFAULT_IOSCHED ) ) ) {
printk ( " Default I/O scheduler not found, using no-op \n " ) ;
e = elevator_get ( " noop " ) ;
2006-01-24 12:07:58 +03:00
}
2006-07-20 01:39:40 +04:00
eq = elevator_alloc ( q , e ) ;
2006-03-19 02:35:43 +03:00
if ( ! eq )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
2006-06-08 10:49:06 +04:00
data = elevator_init_queue ( q , eq ) ;
if ( ! data ) {
2006-03-19 02:35:43 +03:00
kobject_put ( & eq - > kobj ) ;
2006-06-08 10:49:06 +04:00
return - ENOMEM ;
}
2005-04-17 02:20:36 +04:00
2006-06-08 10:49:06 +04:00
elevator_attach ( q , eq , data ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elevator_init ) ;
2005-04-17 02:20:36 +04:00
void elevator_exit ( elevator_t * e )
{
2006-03-19 02:35:43 +03:00
mutex_lock ( & e - > sysfs_lock ) ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_exit_fn )
e - > ops - > elevator_exit_fn ( e ) ;
2006-03-19 02:35:43 +03:00
e - > ops = NULL ;
mutex_unlock ( & e - > sysfs_lock ) ;
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
kobject_put ( & e - > kobj ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elevator_exit ) ;
2007-01-14 14:26:09 +03:00
static void elv_activate_rq ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_activate_req_fn )
e - > ops - > elevator_activate_req_fn ( q , rq ) ;
}
static void elv_deactivate_rq ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_deactivate_req_fn )
e - > ops - > elevator_deactivate_req_fn ( q , rq ) ;
}
2006-07-28 11:23:08 +04:00
static inline void __elv_rqhash_del ( struct request * rq )
{
hlist_del_init ( & rq - > hash ) ;
}
static void elv_rqhash_del ( request_queue_t * q , struct request * rq )
{
if ( ELV_ON_HASH ( rq ) )
__elv_rqhash_del ( rq ) ;
}
static void elv_rqhash_add ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
BUG_ON ( ELV_ON_HASH ( rq ) ) ;
hlist_add_head ( & rq - > hash , & e - > hash [ ELV_HASH_FN ( rq_hash_key ( rq ) ) ] ) ;
}
static void elv_rqhash_reposition ( request_queue_t * q , struct request * rq )
{
__elv_rqhash_del ( rq ) ;
elv_rqhash_add ( q , rq ) ;
}
static struct request * elv_rqhash_find ( request_queue_t * q , sector_t offset )
{
elevator_t * e = q - > elevator ;
struct hlist_head * hash_list = & e - > hash [ ELV_HASH_FN ( offset ) ] ;
struct hlist_node * entry , * next ;
struct request * rq ;
hlist_for_each_entry_safe ( rq , entry , next , hash_list , hash ) {
BUG_ON ( ! ELV_ON_HASH ( rq ) ) ;
if ( unlikely ( ! rq_mergeable ( rq ) ) ) {
__elv_rqhash_del ( rq ) ;
continue ;
}
if ( rq_hash_key ( rq ) = = offset )
return rq ;
}
return NULL ;
}
2006-07-13 13:55:04 +04:00
/*
* RB - tree support functions for inserting / lookup / removal of requests
* in a sorted RB tree .
*/
struct request * elv_rb_add ( struct rb_root * root , struct request * rq )
{
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
struct request * __rq ;
while ( * p ) {
parent = * p ;
__rq = rb_entry ( parent , struct request , rb_node ) ;
if ( rq - > sector < __rq - > sector )
p = & ( * p ) - > rb_left ;
else if ( rq - > sector > __rq - > sector )
p = & ( * p ) - > rb_right ;
else
return __rq ;
}
rb_link_node ( & rq - > rb_node , parent , p ) ;
rb_insert_color ( & rq - > rb_node , root ) ;
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_add ) ;
void elv_rb_del ( struct rb_root * root , struct request * rq )
{
BUG_ON ( RB_EMPTY_NODE ( & rq - > rb_node ) ) ;
rb_erase ( & rq - > rb_node , root ) ;
RB_CLEAR_NODE ( & rq - > rb_node ) ;
}
EXPORT_SYMBOL ( elv_rb_del ) ;
struct request * elv_rb_find ( struct rb_root * root , sector_t sector )
{
struct rb_node * n = root - > rb_node ;
struct request * rq ;
while ( n ) {
rq = rb_entry ( n , struct request , rb_node ) ;
if ( sector < rq - > sector )
n = n - > rb_left ;
else if ( sector > rq - > sector )
n = n - > rb_right ;
else
return rq ;
}
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_find ) ;
2005-10-20 18:23:44 +04:00
/*
* Insert rq into dispatch queue of q . Queue lock must be held on
2006-07-13 13:55:04 +04:00
* entry . rq is sort insted into the dispatch queue . To be used by
* specific elevators .
2005-10-20 18:23:44 +04:00
*/
2005-10-20 18:37:00 +04:00
void elv_dispatch_sort ( request_queue_t * q , struct request * rq )
2005-10-20 18:23:44 +04:00
{
sector_t boundary ;
struct list_head * entry ;
2005-10-20 18:46:23 +04:00
if ( q - > last_merge = = rq )
q - > last_merge = NULL ;
2006-07-28 11:23:08 +04:00
elv_rqhash_del ( q , rq ) ;
2005-11-10 10:52:05 +03:00
q - > nr_sorted - - ;
2005-10-20 18:46:23 +04:00
2005-10-20 18:37:00 +04:00
boundary = q - > end_sector ;
2005-10-24 10:35:58 +04:00
2005-10-20 18:23:44 +04:00
list_for_each_prev ( entry , & q - > queue_head ) {
struct request * pos = list_entry_rq ( entry ) ;
2007-01-19 03:27:47 +03:00
if ( rq_data_dir ( rq ) ! = rq_data_dir ( pos ) )
break ;
2006-08-10 10:44:47 +04:00
if ( pos - > cmd_flags & ( REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED ) )
2005-10-20 18:23:44 +04:00
break ;
if ( rq - > sector > = boundary ) {
if ( pos - > sector < boundary )
continue ;
} else {
if ( pos - > sector > = boundary )
break ;
}
if ( rq - > sector > = pos - > sector )
break ;
}
list_add ( & rq - > queuelist , entry ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_dispatch_sort ) ;
2006-07-28 11:23:08 +04:00
/*
2006-07-13 13:55:04 +04:00
* Insert rq into dispatch queue of q . Queue lock must be held on
* entry . rq is added to the back of the dispatch queue . To be used by
* specific elevators .
2006-07-28 11:23:08 +04:00
*/
void elv_dispatch_add_tail ( struct request_queue * q , struct request * rq )
{
if ( q - > last_merge = = rq )
q - > last_merge = NULL ;
elv_rqhash_del ( q , rq ) ;
q - > nr_sorted - - ;
q - > end_sector = rq_end_sector ( rq ) ;
q - > boundary_rq = rq ;
list_add_tail ( & rq - > queuelist , & q - > queue_head ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_dispatch_add_tail ) ;
2005-04-17 02:20:36 +04:00
int elv_merge ( request_queue_t * q , struct request * * req , struct bio * bio )
{
elevator_t * e = q - > elevator ;
2006-07-28 11:23:08 +04:00
struct request * __rq ;
2005-10-20 18:46:23 +04:00
int ret ;
2006-07-28 11:23:08 +04:00
/*
* First try one - hit cache .
*/
2005-10-20 18:46:23 +04:00
if ( q - > last_merge ) {
ret = elv_try_merge ( q - > last_merge , bio ) ;
if ( ret ! = ELEVATOR_NO_MERGE ) {
* req = q - > last_merge ;
return ret ;
}
}
2005-04-17 02:20:36 +04:00
2006-07-28 11:23:08 +04:00
/*
* See if our hash lookup can find a potential backmerge .
*/
__rq = elv_rqhash_find ( q , bio - > bi_sector ) ;
if ( __rq & & elv_rq_merge_ok ( __rq , bio ) ) {
* req = __rq ;
return ELEVATOR_BACK_MERGE ;
}
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_merge_fn )
return e - > ops - > elevator_merge_fn ( q , req , bio ) ;
return ELEVATOR_NO_MERGE ;
}
2006-07-13 13:55:04 +04:00
void elv_merged_request ( request_queue_t * q , struct request * rq , int type )
2005-04-17 02:20:36 +04:00
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_merged_fn )
2006-07-13 13:55:04 +04:00
e - > ops - > elevator_merged_fn ( q , rq , type ) ;
2005-10-20 18:46:23 +04:00
2006-07-13 13:55:04 +04:00
if ( type = = ELEVATOR_BACK_MERGE )
elv_rqhash_reposition ( q , rq ) ;
2006-07-28 11:23:08 +04:00
2005-10-20 18:46:23 +04:00
q - > last_merge = rq ;
2005-04-17 02:20:36 +04:00
}
void elv_merge_requests ( request_queue_t * q , struct request * rq ,
struct request * next )
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_merge_req_fn )
e - > ops - > elevator_merge_req_fn ( q , rq , next ) ;
2005-10-20 18:46:23 +04:00
2006-07-28 11:23:08 +04:00
elv_rqhash_reposition ( q , rq ) ;
elv_rqhash_del ( q , next ) ;
q - > nr_sorted - - ;
2005-10-20 18:46:23 +04:00
q - > last_merge = rq ;
2005-04-17 02:20:36 +04:00
}
2005-10-20 18:23:44 +04:00
void elv_requeue_request ( request_queue_t * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
/*
* it already went through dequeue , we need to decrement the
* in_flight count again
*/
2005-10-20 18:23:44 +04:00
if ( blk_account_rq ( rq ) ) {
2005-04-17 02:20:36 +04:00
q - > in_flight - - ;
2007-01-14 14:26:09 +03:00
if ( blk_sorted_rq ( rq ) )
elv_deactivate_rq ( q , rq ) ;
2005-10-20 18:23:44 +04:00
}
2005-04-17 02:20:36 +04:00
2006-08-10 10:44:47 +04:00
rq - > cmd_flags & = ~ REQ_STARTED ;
2005-04-17 02:20:36 +04:00
2006-02-08 12:01:31 +03:00
elv_insert ( q , rq , ELEVATOR_INSERT_REQUEUE ) ;
2005-04-17 02:20:36 +04:00
}
2005-11-10 10:52:05 +03:00
static void elv_drain_elevator ( request_queue_t * q )
{
static int printed ;
while ( q - > elevator - > ops - > elevator_dispatch_fn ( q , 1 ) )
;
if ( q - > nr_sorted = = 0 )
return ;
if ( printed + + < 10 ) {
printk ( KERN_ERR " %s: forced dispatching is broken "
" (nr_sorted=%u), please report this \n " ,
q - > elevator - > elevator_type - > elevator_name , q - > nr_sorted ) ;
}
}
2006-02-08 12:01:31 +03:00
void elv_insert ( request_queue_t * q , struct request * rq , int where )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:51:03 +03:00
struct list_head * pos ;
unsigned ordseq ;
2006-05-11 10:20:16 +04:00
int unplug_it = 1 ;
2006-01-06 11:51:03 +03:00
2006-03-23 22:00:26 +03:00
blk_add_trace_rq ( q , rq , BLK_TA_INSERT ) ;
2005-04-17 02:20:36 +04:00
rq - > q = q ;
2005-10-20 18:23:44 +04:00
switch ( where ) {
case ELEVATOR_INSERT_FRONT :
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SOFTBARRIER ;
2005-10-20 18:23:44 +04:00
list_add ( & rq - > queuelist , & q - > queue_head ) ;
break ;
case ELEVATOR_INSERT_BACK :
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SOFTBARRIER ;
2005-11-10 10:52:05 +03:00
elv_drain_elevator ( q ) ;
2005-10-20 18:23:44 +04:00
list_add_tail ( & rq - > queuelist , & q - > queue_head ) ;
/*
* We kick the queue here for the following reasons .
* - The elevator might have returned NULL previously
* to delay requests and returned them now . As the
* queue wasn ' t empty before this request , ll_rw_blk
* won ' t run the queue on return , resulting in hang .
* - Usually , back inserted requests won ' t be merged
* with anything . There ' s no point in delaying queue
* processing .
*/
blk_remove_plug ( q ) ;
q - > request_fn ( q ) ;
break ;
case ELEVATOR_INSERT_SORT :
BUG_ON ( ! blk_fs_request ( rq ) ) ;
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SORTED ;
2005-11-10 10:52:05 +03:00
q - > nr_sorted + + ;
2006-07-28 11:23:08 +04:00
if ( rq_mergeable ( rq ) ) {
elv_rqhash_add ( q , rq ) ;
if ( ! q - > last_merge )
q - > last_merge = rq ;
}
2005-11-01 11:23:49 +03:00
/*
* Some ioscheds ( cfq ) run q - > request_fn directly , so
* rq cannot be accessed after calling
* elevator_add_req_fn .
*/
q - > elevator - > ops - > elevator_add_req_fn ( q , rq ) ;
2005-10-20 18:23:44 +04:00
break ;
2006-01-06 11:51:03 +03:00
case ELEVATOR_INSERT_REQUEUE :
/*
* If ordered flush isn ' t in progress , we do front
* insertion ; otherwise , requests should be requeued
* in ordseq order .
*/
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SOFTBARRIER ;
2006-01-06 11:51:03 +03:00
2007-01-23 21:40:54 +03:00
/*
* Most requeues happen because of a busy condition ,
* don ' t force unplug of the queue for that case .
*/
unplug_it = 0 ;
2006-01-06 11:51:03 +03:00
if ( q - > ordseq = = 0 ) {
list_add ( & rq - > queuelist , & q - > queue_head ) ;
break ;
}
ordseq = blk_ordered_req_seq ( rq ) ;
list_for_each ( pos , & q - > queue_head ) {
struct request * pos_rq = list_entry_rq ( pos ) ;
if ( ordseq < = blk_ordered_req_seq ( pos_rq ) )
break ;
}
list_add_tail ( & rq - > queuelist , pos ) ;
break ;
2005-10-20 18:23:44 +04:00
default :
printk ( KERN_ERR " %s: bad insertion point %d \n " ,
__FUNCTION__ , where ) ;
BUG ( ) ;
}
2006-05-11 10:20:16 +04:00
if ( unplug_it & & blk_queue_plugged ( q ) ) {
2005-10-20 18:23:44 +04:00
int nrq = q - > rq . count [ READ ] + q - > rq . count [ WRITE ]
- q - > in_flight ;
if ( nrq > = q - > unplug_thresh )
__generic_unplug_device ( q ) ;
}
2005-04-17 02:20:36 +04:00
}
2006-02-08 12:01:31 +03:00
void __elv_add_request ( request_queue_t * q , struct request * rq , int where ,
int plug )
{
if ( q - > ordcolor )
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_ORDERED_COLOR ;
2006-02-08 12:01:31 +03:00
2006-08-10 10:44:47 +04:00
if ( rq - > cmd_flags & ( REQ_SOFTBARRIER | REQ_HARDBARRIER ) ) {
2006-02-08 12:01:31 +03:00
/*
* toggle ordered color
*/
if ( blk_barrier_rq ( rq ) )
q - > ordcolor ^ = 1 ;
/*
* barriers implicitly indicate back insertion
*/
if ( where = = ELEVATOR_INSERT_SORT )
where = ELEVATOR_INSERT_BACK ;
/*
* this request is scheduling boundary , update
* end_sector
*/
if ( blk_fs_request ( rq ) ) {
q - > end_sector = rq_end_sector ( rq ) ;
q - > boundary_rq = rq ;
}
2006-08-10 10:44:47 +04:00
} else if ( ! ( rq - > cmd_flags & REQ_ELVPRIV ) & & where = = ELEVATOR_INSERT_SORT )
2006-02-08 12:01:31 +03:00
where = ELEVATOR_INSERT_BACK ;
if ( plug )
blk_plug_device ( q ) ;
elv_insert ( q , rq , where ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( __elv_add_request ) ;
2005-04-17 02:20:36 +04:00
void elv_add_request ( request_queue_t * q , struct request * rq , int where ,
int plug )
{
unsigned long flags ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
__elv_add_request ( q , rq , where , plug ) ;
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_add_request ) ;
2005-04-17 02:20:36 +04:00
static inline struct request * __elv_next_request ( request_queue_t * q )
{
2005-10-20 18:23:44 +04:00
struct request * rq ;
2006-01-06 11:51:03 +03:00
while ( 1 ) {
while ( ! list_empty ( & q - > queue_head ) ) {
rq = list_entry_rq ( q - > queue_head . next ) ;
if ( blk_do_ordered ( q , & rq ) )
return rq ;
}
2005-04-17 02:20:36 +04:00
2006-01-06 11:51:03 +03:00
if ( ! q - > elevator - > ops - > elevator_dispatch_fn ( q , 0 ) )
return NULL ;
2005-04-17 02:20:36 +04:00
}
}
struct request * elv_next_request ( request_queue_t * q )
{
struct request * rq ;
int ret ;
while ( ( rq = __elv_next_request ( q ) ) ! = NULL ) {
2006-08-10 10:44:47 +04:00
if ( ! ( rq - > cmd_flags & REQ_STARTED ) ) {
2005-10-20 18:23:44 +04:00
/*
* This is the first time the device driver
* sees this request ( possibly after
* requeueing ) . Notify IO scheduler .
*/
2007-01-14 14:26:09 +03:00
if ( blk_sorted_rq ( rq ) )
elv_activate_rq ( q , rq ) ;
2005-04-17 02:20:36 +04:00
2005-10-20 18:23:44 +04:00
/*
* just mark as started even if we don ' t start
* it , a request that has been delayed should
* not be passed by new incoming requests
*/
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_STARTED ;
2006-03-23 22:00:26 +03:00
blk_add_trace_rq ( q , rq , BLK_TA_ISSUE ) ;
2005-10-20 18:23:44 +04:00
}
2005-04-17 02:20:36 +04:00
2005-10-20 18:23:44 +04:00
if ( ! q - > boundary_rq | | q - > boundary_rq = = rq ) {
2005-10-20 18:37:00 +04:00
q - > end_sector = rq_end_sector ( rq ) ;
2005-10-20 18:23:44 +04:00
q - > boundary_rq = NULL ;
}
2005-04-17 02:20:36 +04:00
2006-08-10 10:44:47 +04:00
if ( ( rq - > cmd_flags & REQ_DONTPREP ) | | ! q - > prep_rq_fn )
2005-04-17 02:20:36 +04:00
break ;
ret = q - > prep_rq_fn ( q , rq ) ;
if ( ret = = BLKPREP_OK ) {
break ;
} else if ( ret = = BLKPREP_DEFER ) {
2005-04-24 11:04:21 +04:00
/*
* the request may have been ( partially ) prepped .
* we need to keep this request in the front to
2005-10-20 18:23:44 +04:00
* avoid resource deadlock . REQ_STARTED will
* prevent other fs requests from passing this one .
2005-04-24 11:04:21 +04:00
*/
2005-04-17 02:20:36 +04:00
rq = NULL ;
break ;
} else if ( ret = = BLKPREP_KILL ) {
int nr_bytes = rq - > hard_nr_sectors < < 9 ;
if ( ! nr_bytes )
nr_bytes = rq - > data_len ;
blkdev_dequeue_request ( rq ) ;
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_QUIET ;
2005-04-17 02:20:36 +04:00
end_that_request_chunk ( rq , 0 , nr_bytes ) ;
2006-01-06 11:49:03 +03:00
end_that_request_last ( rq , 0 ) ;
2005-04-17 02:20:36 +04:00
} else {
printk ( KERN_ERR " %s: bad return=%d \n " , __FUNCTION__ ,
ret ) ;
break ;
}
}
return rq ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_next_request ) ;
2005-10-20 18:23:44 +04:00
void elv_dequeue_request ( request_queue_t * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2005-10-20 18:23:44 +04:00
BUG_ON ( list_empty ( & rq - > queuelist ) ) ;
2006-07-28 11:23:08 +04:00
BUG_ON ( ELV_ON_HASH ( rq ) ) ;
2005-10-20 18:23:44 +04:00
list_del_init ( & rq - > queuelist ) ;
2005-04-17 02:20:36 +04:00
/*
* the time frame between a request being removed from the lists
* and to it is freed is accounted as io that is in progress at
2005-10-20 18:23:44 +04:00
* the driver side .
2005-04-17 02:20:36 +04:00
*/
if ( blk_account_rq ( rq ) )
q - > in_flight + + ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_dequeue_request ) ;
2005-04-17 02:20:36 +04:00
int elv_queue_empty ( request_queue_t * q )
{
elevator_t * e = q - > elevator ;
2005-10-20 18:23:44 +04:00
if ( ! list_empty ( & q - > queue_head ) )
return 0 ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_queue_empty_fn )
return e - > ops - > elevator_queue_empty_fn ( q ) ;
2005-10-20 18:23:44 +04:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_queue_empty ) ;
2005-04-17 02:20:36 +04:00
struct request * elv_latter_request ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_latter_req_fn )
return e - > ops - > elevator_latter_req_fn ( q , rq ) ;
return NULL ;
}
struct request * elv_former_request ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_former_req_fn )
return e - > ops - > elevator_former_req_fn ( q , rq ) ;
return NULL ;
}
2006-07-28 11:32:57 +04:00
int elv_set_request ( request_queue_t * q , struct request * rq , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_set_req_fn )
2006-07-28 11:32:57 +04:00
return e - > ops - > elevator_set_req_fn ( q , rq , gfp_mask ) ;
2005-04-17 02:20:36 +04:00
rq - > elevator_private = NULL ;
return 0 ;
}
void elv_put_request ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_put_req_fn )
2006-12-01 12:42:33 +03:00
e - > ops - > elevator_put_req_fn ( rq ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-28 11:32:57 +04:00
int elv_may_queue ( request_queue_t * q , int rw )
2005-04-17 02:20:36 +04:00
{
elevator_t * e = q - > elevator ;
if ( e - > ops - > elevator_may_queue_fn )
2006-07-28 11:32:57 +04:00
return e - > ops - > elevator_may_queue_fn ( q , rw ) ;
2005-04-17 02:20:36 +04:00
return ELV_MQUEUE_MAY ;
}
void elv_completed_request ( request_queue_t * q , struct request * rq )
{
elevator_t * e = q - > elevator ;
/*
* request is released from the driver , io must be done
*/
2005-10-20 18:23:44 +04:00
if ( blk_account_rq ( rq ) ) {
2005-04-17 02:20:36 +04:00
q - > in_flight - - ;
2006-01-12 17:39:26 +03:00
if ( blk_sorted_rq ( rq ) & & e - > ops - > elevator_completed_req_fn )
e - > ops - > elevator_completed_req_fn ( q , rq ) ;
}
2006-01-06 11:51:03 +03:00
2006-01-12 17:39:26 +03:00
/*
* Check if the queue is waiting for fs requests to be
* drained for flush sequence .
*/
if ( unlikely ( q - > ordseq ) ) {
struct request * first_rq = list_entry_rq ( q - > queue_head . next ) ;
if ( q - > in_flight = = 0 & &
2006-01-06 11:51:03 +03:00
blk_ordered_cur_seq ( q ) = = QUEUE_ORDSEQ_DRAIN & &
blk_ordered_req_seq ( first_rq ) > QUEUE_ORDSEQ_DRAIN ) {
blk_ordered_complete_seq ( q , QUEUE_ORDSEQ_DRAIN , 0 ) ;
q - > request_fn ( q ) ;
}
2005-10-20 18:23:44 +04:00
}
2005-04-17 02:20:36 +04:00
}
2006-03-19 02:35:43 +03:00
# define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
static ssize_t
elv_attr_show ( struct kobject * kobj , struct attribute * attr , char * page )
2005-04-17 02:20:36 +04:00
{
2006-03-19 02:35:43 +03:00
elevator_t * e = container_of ( kobj , elevator_t , kobj ) ;
struct elv_fs_entry * entry = to_elv ( attr ) ;
ssize_t error ;
if ( ! entry - > show )
return - EIO ;
mutex_lock ( & e - > sysfs_lock ) ;
error = e - > ops ? entry - > show ( e , page ) : - ENOENT ;
mutex_unlock ( & e - > sysfs_lock ) ;
return error ;
}
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
static ssize_t
elv_attr_store ( struct kobject * kobj , struct attribute * attr ,
const char * page , size_t length )
{
elevator_t * e = container_of ( kobj , elevator_t , kobj ) ;
struct elv_fs_entry * entry = to_elv ( attr ) ;
ssize_t error ;
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
if ( ! entry - > store )
return - EIO ;
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
mutex_lock ( & e - > sysfs_lock ) ;
error = e - > ops ? entry - > store ( e , page , length ) : - ENOENT ;
mutex_unlock ( & e - > sysfs_lock ) ;
return error ;
}
static struct sysfs_ops elv_sysfs_ops = {
. show = elv_attr_show ,
. store = elv_attr_store ,
} ;
static struct kobj_type elv_ktype = {
. sysfs_ops = & elv_sysfs_ops ,
. release = elevator_release ,
} ;
int elv_register_queue ( struct request_queue * q )
{
elevator_t * e = q - > elevator ;
int error ;
e - > kobj . parent = & q - > kobj ;
error = kobject_add ( & e - > kobj ) ;
if ( ! error ) {
2006-03-19 06:27:18 +03:00
struct elv_fs_entry * attr = e - > elevator_type - > elevator_attrs ;
2006-03-19 02:35:43 +03:00
if ( attr ) {
2006-03-19 06:27:18 +03:00
while ( attr - > attr . name ) {
if ( sysfs_create_file ( & e - > kobj , & attr - > attr ) )
2006-03-19 02:35:43 +03:00
break ;
2006-03-19 06:27:18 +03:00
attr + + ;
2006-03-19 02:35:43 +03:00
}
}
kobject_uevent ( & e - > kobj , KOBJ_ADD ) ;
}
return error ;
2005-04-17 02:20:36 +04:00
}
2006-06-08 10:49:06 +04:00
static void __elv_unregister_queue ( elevator_t * e )
{
kobject_uevent ( & e - > kobj , KOBJ_REMOVE ) ;
kobject_del ( & e - > kobj ) ;
}
2005-04-17 02:20:36 +04:00
void elv_unregister_queue ( struct request_queue * q )
{
2006-06-08 10:49:06 +04:00
if ( q )
__elv_unregister_queue ( q - > elevator ) ;
2005-04-17 02:20:36 +04:00
}
int elv_register ( struct elevator_type * e )
{
2007-03-15 14:59:19 +03:00
char * def = " " ;
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2006-03-24 20:43:26 +03:00
BUG_ON ( elevator_find ( e - > elevator_name ) ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & e - > list , & elv_list ) ;
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
2006-01-24 12:07:58 +03:00
if ( ! strcmp ( e - > elevator_name , chosen_elevator ) | |
( ! * chosen_elevator & &
! strcmp ( e - > elevator_name , CONFIG_DEFAULT_IOSCHED ) ) )
2007-03-15 14:59:19 +03:00
def = " (default) " ;
printk ( KERN_INFO " io scheduler %s registered%s \n " , e - > elevator_name , def ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
EXPORT_SYMBOL_GPL ( elv_register ) ;
void elv_unregister ( struct elevator_type * e )
{
2005-10-31 02:01:39 +03:00
struct task_struct * g , * p ;
/*
* Iterate every thread in the process to remove the io contexts .
*/
2006-03-18 21:21:20 +03:00
if ( e - > ops . trim ) {
read_lock ( & tasklist_lock ) ;
do_each_thread ( g , p ) {
task_lock ( p ) ;
2006-08-22 21:22:13 +04:00
if ( p - > io_context )
e - > ops . trim ( p - > io_context ) ;
2006-03-18 21:21:20 +03:00
task_unlock ( p ) ;
} while_each_thread ( g , p ) ;
read_unlock ( & tasklist_lock ) ;
}
2005-10-31 02:01:39 +03:00
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_del_init ( & e - > list ) ;
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL_GPL ( elv_unregister ) ;
/*
* switch to new_e io scheduler . be careful not to introduce deadlocks -
* we don ' t free the old io scheduler , before we have allocated what we
* need for the new one . this way we have a chance of going back to the old
2005-10-28 10:29:39 +04:00
* one , if the new one fails init for some reason .
2005-04-17 02:20:36 +04:00
*/
2006-03-19 02:35:43 +03:00
static int elevator_switch ( request_queue_t * q , struct elevator_type * new_e )
2005-04-17 02:20:36 +04:00
{
2005-10-28 10:29:39 +04:00
elevator_t * old_elevator , * e ;
2006-06-08 10:49:06 +04:00
void * data ;
2005-04-17 02:20:36 +04:00
2005-10-28 10:29:39 +04:00
/*
* Allocate new elevator
*/
2006-07-20 01:39:40 +04:00
e = elevator_alloc ( q , new_e ) ;
2005-04-17 02:20:36 +04:00
if ( ! e )
2006-03-19 02:35:43 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
2006-06-08 10:49:06 +04:00
data = elevator_init_queue ( q , e ) ;
if ( ! data ) {
kobject_put ( & e - > kobj ) ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
2005-10-28 10:29:39 +04:00
* Turn on BYPASS and drain all requests w / elevator private data
2005-04-17 02:20:36 +04:00
*/
2005-10-28 10:29:39 +04:00
spin_lock_irq ( q - > queue_lock ) ;
2005-10-28 10:30:39 +04:00
set_bit ( QUEUE_FLAG_ELVSWITCH , & q - > queue_flags ) ;
2005-10-28 10:29:39 +04:00
2005-11-10 10:52:05 +03:00
elv_drain_elevator ( q ) ;
2005-10-28 10:29:39 +04:00
while ( q - > rq . elvpriv ) {
2005-11-10 10:48:21 +03:00
blk_remove_plug ( q ) ;
q - > request_fn ( q ) ;
2005-10-28 10:29:39 +04:00
spin_unlock_irq ( q - > queue_lock ) ;
2005-10-28 10:30:39 +04:00
msleep ( 10 ) ;
2005-10-28 10:29:39 +04:00
spin_lock_irq ( q - > queue_lock ) ;
2005-11-10 10:52:05 +03:00
elv_drain_elevator ( q ) ;
2005-10-28 10:29:39 +04:00
}
2005-04-17 02:20:36 +04:00
/*
2006-06-08 10:49:06 +04:00
* Remember old elevator .
2005-04-17 02:20:36 +04:00
*/
old_elevator = q - > elevator ;
/*
* attach and start new elevator
*/
2006-06-08 10:49:06 +04:00
elevator_attach ( q , e , data ) ;
spin_unlock_irq ( q - > queue_lock ) ;
__elv_unregister_queue ( old_elevator ) ;
2005-04-17 02:20:36 +04:00
if ( elv_register_queue ( q ) )
goto fail_register ;
/*
2005-10-28 10:29:39 +04:00
* finally exit old elevator and turn off BYPASS .
2005-04-17 02:20:36 +04:00
*/
elevator_exit ( old_elevator ) ;
2005-10-28 10:30:39 +04:00
clear_bit ( QUEUE_FLAG_ELVSWITCH , & q - > queue_flags ) ;
2006-03-19 02:35:43 +03:00
return 1 ;
2005-04-17 02:20:36 +04:00
fail_register :
/*
* switch failed , exit the new io scheduler and reattach the old
* one again ( along with re - adding the sysfs dir )
*/
elevator_exit ( e ) ;
q - > elevator = old_elevator ;
elv_register_queue ( q ) ;
2005-10-28 10:30:39 +04:00
clear_bit ( QUEUE_FLAG_ELVSWITCH , & q - > queue_flags ) ;
2006-03-19 02:35:43 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
ssize_t elv_iosched_store ( request_queue_t * q , const char * name , size_t count )
{
char elevator_name [ ELV_NAME_MAX ] ;
2005-11-10 10:55:01 +03:00
size_t len ;
2005-04-17 02:20:36 +04:00
struct elevator_type * e ;
2005-11-10 10:55:01 +03:00
elevator_name [ sizeof ( elevator_name ) - 1 ] = ' \0 ' ;
strncpy ( elevator_name , name , sizeof ( elevator_name ) - 1 ) ;
len = strlen ( elevator_name ) ;
2005-04-17 02:20:36 +04:00
2005-11-10 10:55:01 +03:00
if ( len & & elevator_name [ len - 1 ] = = ' \n ' )
elevator_name [ len - 1 ] = ' \0 ' ;
2005-04-17 02:20:36 +04:00
e = elevator_get ( elevator_name ) ;
if ( ! e ) {
printk ( KERN_ERR " elevator: type %s not found \n " , elevator_name ) ;
return - EINVAL ;
}
2005-10-31 02:02:24 +03:00
if ( ! strcmp ( elevator_name , q - > elevator - > elevator_type - > elevator_name ) ) {
elevator_put ( e ) ;
2005-04-17 02:20:36 +04:00
return count ;
2005-10-31 02:02:24 +03:00
}
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
if ( ! elevator_switch ( q , e ) )
printk ( KERN_ERR " elevator: switch to %s failed \n " , elevator_name ) ;
2005-04-17 02:20:36 +04:00
return count ;
}
ssize_t elv_iosched_show ( request_queue_t * q , char * name )
{
elevator_t * e = q - > elevator ;
struct elevator_type * elv = e - > elevator_type ;
2007-07-10 14:26:24 +04:00
struct elevator_type * __e ;
2005-04-17 02:20:36 +04:00
int len = 0 ;
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2007-07-10 14:26:24 +04:00
list_for_each_entry ( __e , & elv_list , list ) {
2005-04-17 02:20:36 +04:00
if ( ! strcmp ( elv - > elevator_name , __e - > elevator_name ) )
len + = sprintf ( name + len , " [%s] " , elv - > elevator_name ) ;
else
len + = sprintf ( name + len , " %s " , __e - > elevator_name ) ;
}
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
len + = sprintf ( len + name , " \n " ) ;
return len ;
}
2006-07-13 13:55:04 +04:00
struct request * elv_rb_former_request ( request_queue_t * q , struct request * rq )
{
struct rb_node * rbprev = rb_prev ( & rq - > rb_node ) ;
if ( rbprev )
return rb_entry_rq ( rbprev ) ;
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_former_request ) ;
struct request * elv_rb_latter_request ( request_queue_t * q , struct request * rq )
{
struct rb_node * rbnext = rb_next ( & rq - > rb_node ) ;
if ( rbnext )
return rb_entry_rq ( rbnext ) ;
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_latter_request ) ;