2005-04-17 02:20:36 +04:00
/*
* Block device elevator / IO - scheduler .
*
* Copyright ( C ) 2000 Andrea Arcangeli < andrea @ suse . de > SuSE
*
2006-09-04 17:41:16 +04:00
* 30042000 Jens Axboe < axboe @ kernel . dk > :
2005-04-17 02:20:36 +04:00
*
* Split the elevator a bit so that it is possible to choose a different
* one or even write a new " plug in " . There are three pieces :
* - elevator_fn , inserts a new request in the queue list
* - elevator_merge_fn , decides whether a new buffer can be merged with
* an existing request
* - elevator_dequeue_fn , called when a request is taken off the active list
*
* 20082000 Dave Jones < davej @ suse . de > :
* Removed tests for max - bomb - segments , which was breaking elvtune
* when run without - bN
*
* Jens :
* - Rework again to work with bio instead of buffer_heads
* - loose bi_dev comparisons , partition handling is right now
* - completely modularize elevator setup and teardown
*
*/
# include <linux/kernel.h>
# include <linux/fs.h>
# include <linux/blkdev.h>
# include <linux/elevator.h>
# include <linux/bio.h>
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/init.h>
# include <linux/compiler.h>
2005-10-28 10:29:39 +04:00
# include <linux/delay.h>
2006-03-23 22:00:26 +03:00
# include <linux/blktrace_api.h>
2006-07-28 11:23:08 +04:00
# include <linux/hash.h>
2008-08-26 11:15:47 +04:00
# include <linux/uaccess.h>
2005-04-17 02:20:36 +04:00
tracing/events: convert block trace points to TRACE_EVENT()
TRACE_EVENT is a more generic way to define tracepoints. Doing so adds
these new capabilities to this tracepoint:
- zero-copy and per-cpu splice() tracing
- binary tracing without printf overhead
- structured logging records exposed under /debug/tracing/events
- trace events embedded in function tracer output and other plugins
- user-defined, per tracepoint filter expressions
...
Cons:
- no dev_t info for the output of plug, unplug_timer and unplug_io events.
no dev_t info for getrq and sleeprq events if bio == NULL.
no dev_t info for rq_abort,...,rq_requeue events if rq->rq_disk == NULL.
This is mainly because we can't get the deivce from a request queue.
But this may change in the future.
- A packet command is converted to a string in TP_assign, not TP_print.
While blktrace do the convertion just before output.
Since pc requests should be rather rare, this is not a big issue.
- In blktrace, an event can have 2 different print formats, but a TRACE_EVENT
has a unique format, which means we have some unused data in a trace entry.
The overhead is minimized by using __dynamic_array() instead of __array().
I've benchmarked the ioctl blktrace vs the splice based TRACE_EVENT tracing:
dd dd + ioctl blktrace dd + TRACE_EVENT (splice)
1 7.36s, 42.7 MB/s 7.50s, 42.0 MB/s 7.41s, 42.5 MB/s
2 7.43s, 42.3 MB/s 7.48s, 42.1 MB/s 7.43s, 42.4 MB/s
3 7.38s, 42.6 MB/s 7.45s, 42.2 MB/s 7.41s, 42.5 MB/s
So the overhead of tracing is very small, and no regression when using
those trace events vs blktrace.
And the binary output of TRACE_EVENT is much smaller than blktrace:
# ls -l -h
-rw-r--r-- 1 root root 8.8M 06-09 13:24 sda.blktrace.0
-rw-r--r-- 1 root root 195K 06-09 13:24 sda.blktrace.1
-rw-r--r-- 1 root root 2.7M 06-09 13:25 trace_splice.out
Following are some comparisons between TRACE_EVENT and blktrace:
plug:
kjournald-480 [000] 303.084981: block_plug: [kjournald]
kjournald-480 [000] 303.084981: 8,0 P N [kjournald]
unplug_io:
kblockd/0-118 [000] 300.052973: block_unplug_io: [kblockd/0] 1
kblockd/0-118 [000] 300.052974: 8,0 U N [kblockd/0] 1
remap:
kjournald-480 [000] 303.085042: block_remap: 8,0 W 102736992 + 8 <- (8,8) 33384
kjournald-480 [000] 303.085043: 8,0 A W 102736992 + 8 <- (8,8) 33384
bio_backmerge:
kjournald-480 [000] 303.085086: block_bio_backmerge: 8,0 W 102737032 + 8 [kjournald]
kjournald-480 [000] 303.085086: 8,0 M W 102737032 + 8 [kjournald]
getrq:
kjournald-480 [000] 303.084974: block_getrq: 8,0 W 102736984 + 8 [kjournald]
kjournald-480 [000] 303.084975: 8,0 G W 102736984 + 8 [kjournald]
bash-2066 [001] 1072.953770: 8,0 G N [bash]
bash-2066 [001] 1072.953773: block_getrq: 0,0 N 0 + 0 [bash]
rq_complete:
konsole-2065 [001] 300.053184: block_rq_complete: 8,0 W () 103669040 + 16 [0]
konsole-2065 [001] 300.053191: 8,0 C W 103669040 + 16 [0]
ksoftirqd/1-7 [001] 1072.953811: 8,0 C N (5a 00 08 00 00 00 00 00 24 00) [0]
ksoftirqd/1-7 [001] 1072.953813: block_rq_complete: 0,0 N (5a 00 08 00 00 00 00 00 24 00) 0 + 0 [0]
rq_insert:
kjournald-480 [000] 303.084985: block_rq_insert: 8,0 W 0 () 102736984 + 8 [kjournald]
kjournald-480 [000] 303.084986: 8,0 I W 102736984 + 8 [kjournald]
Changelog from v2 -> v3:
- use the newly introduced __dynamic_array().
Changelog from v1 -> v2:
- use __string() instead of __array() to minimize the memory required
to store hex dump of rq->cmd().
- support large pc requests.
- add missing blk_fill_rwbs_rq() in block_rq_requeue TRACE_EVENT.
- some cleanups.
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
LKML-Reference: <4A2DF669.5070905@cn.fujitsu.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-09 09:43:05 +04:00
# include <trace/events/block.h>
2008-09-14 16:55:09 +04:00
# include "blk.h"
2005-04-17 02:20:36 +04:00
static DEFINE_SPINLOCK ( elv_list_lock ) ;
static LIST_HEAD ( elv_list ) ;
2006-07-28 11:23:08 +04:00
/*
* Merge hash stuff .
*/
static const int elv_hash_shift = 6 ;
# define ELV_HASH_BLOCK(sec) ((sec) >> 3)
2008-02-01 02:37:27 +03:00
# define ELV_HASH_FN(sec) \
( hash_long ( ELV_HASH_BLOCK ( ( sec ) ) , elv_hash_shift ) )
2006-07-28 11:23:08 +04:00
# define ELV_HASH_ENTRIES (1 << elv_hash_shift)
2009-05-07 17:24:39 +04:00
# define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
2006-07-28 11:23:08 +04:00
2006-12-20 13:04:12 +03:00
/*
* Query io scheduler to see if the current process issuing bio may be
* merged with rq .
*/
static int elv_iosched_allow_merge ( struct request * rq , struct bio * bio )
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = rq - > q ;
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2006-12-20 13:04:12 +03:00
if ( e - > ops - > elevator_allow_merge_fn )
return e - > ops - > elevator_allow_merge_fn ( q , rq , bio ) ;
return 1 ;
}
2005-04-17 02:20:36 +04:00
/*
* can we safely merge with this request ?
*/
2008-04-29 11:49:05 +04:00
int elv_rq_merge_ok ( struct request * rq , struct bio * bio )
2005-04-17 02:20:36 +04:00
{
if ( ! rq_mergeable ( rq ) )
return 0 ;
2008-08-09 19:42:20 +04:00
/*
* Don ' t merge file system requests and discard requests
*/
2010-08-07 20:20:39 +04:00
if ( ( bio - > bi_rw & REQ_DISCARD ) ! = ( rq - > bio - > bi_rw & REQ_DISCARD ) )
2008-08-09 19:42:20 +04:00
return 0 ;
2010-08-12 01:17:49 +04:00
/*
* Don ' t merge discard requests and secure discard requests
*/
if ( ( bio - > bi_rw & REQ_SECURE ) ! = ( rq - > bio - > bi_rw & REQ_SECURE ) )
return 0 ;
2005-04-17 02:20:36 +04:00
/*
* different data direction or already started , don ' t merge
*/
if ( bio_data_dir ( bio ) ! = rq_data_dir ( rq ) )
return 0 ;
/*
2006-12-20 13:04:12 +03:00
* must be same device and not a special request
2005-04-17 02:20:36 +04:00
*/
2006-12-21 23:20:01 +03:00
if ( rq - > rq_disk ! = bio - > bi_bdev - > bd_disk | | rq - > special )
2006-12-20 13:04:12 +03:00
return 0 ;
2008-06-30 22:04:41 +04:00
/*
* only merge integrity protected bio into ditto rq
*/
if ( bio_integrity ( bio ) ! = blk_integrity_rq ( rq ) )
return 0 ;
2006-12-20 13:04:12 +03:00
if ( ! elv_iosched_allow_merge ( rq , bio ) )
return 0 ;
2005-04-17 02:20:36 +04:00
2006-12-20 13:04:12 +03:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL ( elv_rq_merge_ok ) ;
2011-03-08 15:19:51 +03:00
int elv_try_merge ( struct request * __rq , struct bio * bio )
2005-04-17 02:20:36 +04:00
{
int ret = ELEVATOR_NO_MERGE ;
/*
* we can merge and sequence is ok , check if it ' s possible
*/
if ( elv_rq_merge_ok ( __rq , bio ) ) {
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( __rq ) + blk_rq_sectors ( __rq ) = = bio - > bi_sector )
2005-04-17 02:20:36 +04:00
ret = ELEVATOR_BACK_MERGE ;
2009-05-07 17:24:39 +04:00
else if ( blk_rq_pos ( __rq ) - bio_sectors ( bio ) = = bio - > bi_sector )
2005-04-17 02:20:36 +04:00
ret = ELEVATOR_FRONT_MERGE ;
}
return ret ;
}
static struct elevator_type * elevator_find ( const char * name )
{
2006-10-11 11:24:27 +04:00
struct elevator_type * e ;
2005-04-17 02:20:36 +04:00
2007-07-10 14:26:24 +04:00
list_for_each_entry ( e , & elv_list , list ) {
2006-10-11 11:24:27 +04:00
if ( ! strcmp ( e - > elevator_name , name ) )
return e ;
2005-04-17 02:20:36 +04:00
}
2006-10-11 11:24:27 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
}
static void elevator_put ( struct elevator_type * e )
{
module_put ( e - > elevator_owner ) ;
}
static struct elevator_type * elevator_get ( const char * name )
{
2005-10-20 12:56:41 +04:00
struct elevator_type * e ;
2005-04-17 02:20:36 +04:00
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2005-10-20 12:56:41 +04:00
e = elevator_find ( name ) ;
2008-02-19 12:20:37 +03:00
if ( ! e ) {
spin_unlock ( & elv_list_lock ) ;
2011-05-06 04:02:12 +04:00
request_module ( " %s-iosched " , name ) ;
2008-02-19 12:20:37 +03:00
spin_lock ( & elv_list_lock ) ;
e = elevator_find ( name ) ;
}
2005-10-20 12:56:41 +04:00
if ( e & & ! try_module_get ( e - > elevator_owner ) )
e = NULL ;
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
return e ;
}
2007-07-24 11:28:11 +04:00
static void * elevator_init_queue ( struct request_queue * q ,
struct elevator_queue * eq )
2005-04-17 02:20:36 +04:00
{
2006-12-01 12:42:33 +03:00
return eq - > ops - > elevator_init_fn ( q ) ;
2006-06-08 10:49:06 +04:00
}
2005-04-17 02:20:36 +04:00
2007-07-24 11:28:11 +04:00
static void elevator_attach ( struct request_queue * q , struct elevator_queue * eq ,
2006-06-08 10:49:06 +04:00
void * data )
{
2005-04-17 02:20:36 +04:00
q - > elevator = eq ;
2006-06-08 10:49:06 +04:00
eq - > elevator_data = data ;
2005-04-17 02:20:36 +04:00
}
static char chosen_elevator [ 16 ] ;
2006-01-24 12:07:58 +03:00
static int __init elevator_setup ( char * str )
2005-04-17 02:20:36 +04:00
{
2006-01-16 11:47:37 +03:00
/*
* Be backwards - compatible with previous kernels , so users
* won ' t get the wrong elevator .
*/
2009-10-03 11:37:51 +04:00
strncpy ( chosen_elevator , str , sizeof ( chosen_elevator ) - 1 ) ;
2006-03-31 14:30:33 +04:00
return 1 ;
2005-04-17 02:20:36 +04:00
}
__setup ( " elevator= " , elevator_setup ) ;
2006-03-19 02:35:43 +03:00
static struct kobj_type elv_ktype ;
2008-10-31 12:05:07 +03:00
static struct elevator_queue * elevator_alloc ( struct request_queue * q ,
2007-07-24 11:28:11 +04:00
struct elevator_type * e )
2006-03-19 02:35:43 +03:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * eq ;
2006-07-28 11:23:08 +04:00
int i ;
2008-10-31 12:05:07 +03:00
eq = kmalloc_node ( sizeof ( * eq ) , GFP_KERNEL | __GFP_ZERO , q - > node ) ;
2006-07-28 11:23:08 +04:00
if ( unlikely ( ! eq ) )
goto err ;
eq - > ops = & e - > ops ;
eq - > elevator_type = e ;
2007-12-18 09:05:35 +03:00
kobject_init ( & eq - > kobj , & elv_ktype ) ;
2006-07-28 11:23:08 +04:00
mutex_init ( & eq - > sysfs_lock ) ;
2006-07-20 01:39:40 +04:00
eq - > hash = kmalloc_node ( sizeof ( struct hlist_head ) * ELV_HASH_ENTRIES ,
GFP_KERNEL , q - > node ) ;
2006-07-28 11:23:08 +04:00
if ( ! eq - > hash )
goto err ;
for ( i = 0 ; i < ELV_HASH_ENTRIES ; i + + )
INIT_HLIST_HEAD ( & eq - > hash [ i ] ) ;
2006-03-19 02:35:43 +03:00
return eq ;
2006-07-28 11:23:08 +04:00
err :
kfree ( eq ) ;
elevator_put ( e ) ;
return NULL ;
2006-03-19 02:35:43 +03:00
}
static void elevator_release ( struct kobject * kobj )
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e ;
2006-07-28 11:23:08 +04:00
2008-10-31 12:05:07 +03:00
e = container_of ( kobj , struct elevator_queue , kobj ) ;
2006-03-19 02:35:43 +03:00
elevator_put ( e - > elevator_type ) ;
2006-07-28 11:23:08 +04:00
kfree ( e - > hash ) ;
2006-03-19 02:35:43 +03:00
kfree ( e ) ;
}
2007-07-24 11:28:11 +04:00
int elevator_init ( struct request_queue * q , char * name )
2005-04-17 02:20:36 +04:00
{
struct elevator_type * e = NULL ;
struct elevator_queue * eq ;
2006-06-08 10:49:06 +04:00
void * data ;
2005-04-17 02:20:36 +04:00
2010-05-25 21:15:15 +04:00
if ( unlikely ( q - > elevator ) )
return 0 ;
2005-10-28 10:29:39 +04:00
INIT_LIST_HEAD ( & q - > queue_head ) ;
q - > last_merge = NULL ;
q - > end_sector = 0 ;
q - > boundary_rq = NULL ;
2008-02-01 02:37:27 +03:00
if ( name ) {
e = elevator_get ( name ) ;
if ( ! e )
return - EINVAL ;
}
2005-04-17 02:20:36 +04:00
2008-02-01 02:37:27 +03:00
if ( ! e & & * chosen_elevator ) {
e = elevator_get ( chosen_elevator ) ;
if ( ! e )
printk ( KERN_ERR " I/O scheduler %s not found \n " ,
chosen_elevator ) ;
}
2006-01-24 12:09:14 +03:00
2008-02-01 02:37:27 +03:00
if ( ! e ) {
e = elevator_get ( CONFIG_DEFAULT_IOSCHED ) ;
if ( ! e ) {
printk ( KERN_ERR
" Default I/O scheduler not found. " \
" Using noop. \n " ) ;
e = elevator_get ( " noop " ) ;
}
2006-01-24 12:07:58 +03:00
}
2006-07-20 01:39:40 +04:00
eq = elevator_alloc ( q , e ) ;
2006-03-19 02:35:43 +03:00
if ( ! eq )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
2006-06-08 10:49:06 +04:00
data = elevator_init_queue ( q , eq ) ;
if ( ! data ) {
2006-03-19 02:35:43 +03:00
kobject_put ( & eq - > kobj ) ;
2006-06-08 10:49:06 +04:00
return - ENOMEM ;
}
2005-04-17 02:20:36 +04:00
2006-06-08 10:49:06 +04:00
elevator_attach ( q , eq , data ) ;
2010-05-25 21:15:15 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elevator_init ) ;
2008-10-31 12:05:07 +03:00
void elevator_exit ( struct elevator_queue * e )
2005-04-17 02:20:36 +04:00
{
2006-03-19 02:35:43 +03:00
mutex_lock ( & e - > sysfs_lock ) ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_exit_fn )
e - > ops - > elevator_exit_fn ( e ) ;
2006-03-19 02:35:43 +03:00
e - > ops = NULL ;
mutex_unlock ( & e - > sysfs_lock ) ;
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
kobject_put ( & e - > kobj ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elevator_exit ) ;
2006-07-28 11:23:08 +04:00
static inline void __elv_rqhash_del ( struct request * rq )
{
hlist_del_init ( & rq - > hash ) ;
}
2007-07-24 11:28:11 +04:00
static void elv_rqhash_del ( struct request_queue * q , struct request * rq )
2006-07-28 11:23:08 +04:00
{
if ( ELV_ON_HASH ( rq ) )
__elv_rqhash_del ( rq ) ;
}
2007-07-24 11:28:11 +04:00
static void elv_rqhash_add ( struct request_queue * q , struct request * rq )
2006-07-28 11:23:08 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2006-07-28 11:23:08 +04:00
BUG_ON ( ELV_ON_HASH ( rq ) ) ;
hlist_add_head ( & rq - > hash , & e - > hash [ ELV_HASH_FN ( rq_hash_key ( rq ) ) ] ) ;
}
2007-07-24 11:28:11 +04:00
static void elv_rqhash_reposition ( struct request_queue * q , struct request * rq )
2006-07-28 11:23:08 +04:00
{
__elv_rqhash_del ( rq ) ;
elv_rqhash_add ( q , rq ) ;
}
2007-07-24 11:28:11 +04:00
static struct request * elv_rqhash_find ( struct request_queue * q , sector_t offset )
2006-07-28 11:23:08 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2006-07-28 11:23:08 +04:00
struct hlist_head * hash_list = & e - > hash [ ELV_HASH_FN ( offset ) ] ;
struct hlist_node * entry , * next ;
struct request * rq ;
hlist_for_each_entry_safe ( rq , entry , next , hash_list , hash ) {
BUG_ON ( ! ELV_ON_HASH ( rq ) ) ;
if ( unlikely ( ! rq_mergeable ( rq ) ) ) {
__elv_rqhash_del ( rq ) ;
continue ;
}
if ( rq_hash_key ( rq ) = = offset )
return rq ;
}
return NULL ;
}
2006-07-13 13:55:04 +04:00
/*
* RB - tree support functions for inserting / lookup / removal of requests
* in a sorted RB tree .
*/
struct request * elv_rb_add ( struct rb_root * root , struct request * rq )
{
struct rb_node * * p = & root - > rb_node ;
struct rb_node * parent = NULL ;
struct request * __rq ;
while ( * p ) {
parent = * p ;
__rq = rb_entry ( parent , struct request , rb_node ) ;
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( rq ) < blk_rq_pos ( __rq ) )
2006-07-13 13:55:04 +04:00
p = & ( * p ) - > rb_left ;
2009-05-07 17:24:39 +04:00
else if ( blk_rq_pos ( rq ) > blk_rq_pos ( __rq ) )
2006-07-13 13:55:04 +04:00
p = & ( * p ) - > rb_right ;
else
return __rq ;
}
rb_link_node ( & rq - > rb_node , parent , p ) ;
rb_insert_color ( & rq - > rb_node , root ) ;
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_add ) ;
void elv_rb_del ( struct rb_root * root , struct request * rq )
{
BUG_ON ( RB_EMPTY_NODE ( & rq - > rb_node ) ) ;
rb_erase ( & rq - > rb_node , root ) ;
RB_CLEAR_NODE ( & rq - > rb_node ) ;
}
EXPORT_SYMBOL ( elv_rb_del ) ;
struct request * elv_rb_find ( struct rb_root * root , sector_t sector )
{
struct rb_node * n = root - > rb_node ;
struct request * rq ;
while ( n ) {
rq = rb_entry ( n , struct request , rb_node ) ;
2009-05-07 17:24:39 +04:00
if ( sector < blk_rq_pos ( rq ) )
2006-07-13 13:55:04 +04:00
n = n - > rb_left ;
2009-05-07 17:24:39 +04:00
else if ( sector > blk_rq_pos ( rq ) )
2006-07-13 13:55:04 +04:00
n = n - > rb_right ;
else
return rq ;
}
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_find ) ;
2005-10-20 18:23:44 +04:00
/*
* Insert rq into dispatch queue of q . Queue lock must be held on
2007-10-20 03:55:04 +04:00
* entry . rq is sort instead into the dispatch queue . To be used by
2006-07-13 13:55:04 +04:00
* specific elevators .
2005-10-20 18:23:44 +04:00
*/
2007-07-24 11:28:11 +04:00
void elv_dispatch_sort ( struct request_queue * q , struct request * rq )
2005-10-20 18:23:44 +04:00
{
sector_t boundary ;
struct list_head * entry ;
2008-02-01 02:37:27 +03:00
int stop_flags ;
2005-10-20 18:23:44 +04:00
2005-10-20 18:46:23 +04:00
if ( q - > last_merge = = rq )
q - > last_merge = NULL ;
2006-07-28 11:23:08 +04:00
elv_rqhash_del ( q , rq ) ;
2005-11-10 10:52:05 +03:00
q - > nr_sorted - - ;
2005-10-20 18:46:23 +04:00
2005-10-20 18:37:00 +04:00
boundary = q - > end_sector ;
2010-11-10 16:54:09 +03:00
stop_flags = REQ_SOFTBARRIER | REQ_STARTED ;
2005-10-20 18:23:44 +04:00
list_for_each_prev ( entry , & q - > queue_head ) {
struct request * pos = list_entry_rq ( entry ) ;
2010-08-07 20:17:56 +04:00
if ( ( rq - > cmd_flags & REQ_DISCARD ) ! =
( pos - > cmd_flags & REQ_DISCARD ) )
2008-08-09 19:42:20 +04:00
break ;
2007-01-19 03:27:47 +03:00
if ( rq_data_dir ( rq ) ! = rq_data_dir ( pos ) )
break ;
2008-02-01 02:37:27 +03:00
if ( pos - > cmd_flags & stop_flags )
2005-10-20 18:23:44 +04:00
break ;
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( rq ) > = boundary ) {
if ( blk_rq_pos ( pos ) < boundary )
2005-10-20 18:23:44 +04:00
continue ;
} else {
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( pos ) > = boundary )
2005-10-20 18:23:44 +04:00
break ;
}
2009-05-07 17:24:39 +04:00
if ( blk_rq_pos ( rq ) > = blk_rq_pos ( pos ) )
2005-10-20 18:23:44 +04:00
break ;
}
list_add ( & rq - > queuelist , entry ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_dispatch_sort ) ;
2006-07-28 11:23:08 +04:00
/*
2006-07-13 13:55:04 +04:00
* Insert rq into dispatch queue of q . Queue lock must be held on
* entry . rq is added to the back of the dispatch queue . To be used by
* specific elevators .
2006-07-28 11:23:08 +04:00
*/
void elv_dispatch_add_tail ( struct request_queue * q , struct request * rq )
{
if ( q - > last_merge = = rq )
q - > last_merge = NULL ;
elv_rqhash_del ( q , rq ) ;
q - > nr_sorted - - ;
q - > end_sector = rq_end_sector ( rq ) ;
q - > boundary_rq = rq ;
list_add_tail ( & rq - > queuelist , & q - > queue_head ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_dispatch_add_tail ) ;
2007-07-24 11:28:11 +04:00
int elv_merge ( struct request_queue * q , struct request * * req , struct bio * bio )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2006-07-28 11:23:08 +04:00
struct request * __rq ;
2005-10-20 18:46:23 +04:00
int ret ;
2010-01-29 11:04:08 +03:00
/*
* Levels of merges :
* nomerges : No merges at all attempted
* noxmerges : Only simple one - hit cache try
* merges : All merge tries attempted
*/
if ( blk_queue_nomerges ( q ) )
return ELEVATOR_NO_MERGE ;
2006-07-28 11:23:08 +04:00
/*
* First try one - hit cache .
*/
2005-10-20 18:46:23 +04:00
if ( q - > last_merge ) {
ret = elv_try_merge ( q - > last_merge , bio ) ;
if ( ret ! = ELEVATOR_NO_MERGE ) {
* req = q - > last_merge ;
return ret ;
}
}
2005-04-17 02:20:36 +04:00
2010-01-29 11:04:08 +03:00
if ( blk_queue_noxmerges ( q ) )
2008-04-29 16:44:19 +04:00
return ELEVATOR_NO_MERGE ;
2006-07-28 11:23:08 +04:00
/*
* See if our hash lookup can find a potential backmerge .
*/
__rq = elv_rqhash_find ( q , bio - > bi_sector ) ;
if ( __rq & & elv_rq_merge_ok ( __rq , bio ) ) {
* req = __rq ;
return ELEVATOR_BACK_MERGE ;
}
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_merge_fn )
return e - > ops - > elevator_merge_fn ( q , req , bio ) ;
return ELEVATOR_NO_MERGE ;
}
2011-03-21 12:14:27 +03:00
/*
* Attempt to do an insertion back merge . Only check for the case where
* we can append ' rq ' to an existing request , so we can throw ' rq ' away
* afterwards .
*
* Returns true if we merged , false otherwise
*/
static bool elv_attempt_insert_merge ( struct request_queue * q ,
struct request * rq )
{
struct request * __rq ;
if ( blk_queue_nomerges ( q ) )
return false ;
/*
* First try one - hit cache .
*/
if ( q - > last_merge & & blk_attempt_req_merge ( q , q - > last_merge , rq ) )
return true ;
if ( blk_queue_noxmerges ( q ) )
return false ;
/*
* See if our hash lookup can find a potential backmerge .
*/
__rq = elv_rqhash_find ( q , blk_rq_pos ( rq ) ) ;
if ( __rq & & blk_attempt_req_merge ( q , __rq , rq ) )
return true ;
return false ;
}
2007-07-24 11:28:11 +04:00
void elv_merged_request ( struct request_queue * q , struct request * rq , int type )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_merged_fn )
2006-07-13 13:55:04 +04:00
e - > ops - > elevator_merged_fn ( q , rq , type ) ;
2005-10-20 18:46:23 +04:00
2006-07-13 13:55:04 +04:00
if ( type = = ELEVATOR_BACK_MERGE )
elv_rqhash_reposition ( q , rq ) ;
2006-07-28 11:23:08 +04:00
2005-10-20 18:46:23 +04:00
q - > last_merge = rq ;
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
void elv_merge_requests ( struct request_queue * q , struct request * rq ,
2005-04-17 02:20:36 +04:00
struct request * next )
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2011-03-21 12:14:27 +03:00
const int next_sorted = next - > cmd_flags & REQ_SORTED ;
2005-04-17 02:20:36 +04:00
2011-03-21 12:14:27 +03:00
if ( next_sorted & & e - > ops - > elevator_merge_req_fn )
2005-04-17 02:20:36 +04:00
e - > ops - > elevator_merge_req_fn ( q , rq , next ) ;
2005-10-20 18:46:23 +04:00
2006-07-28 11:23:08 +04:00
elv_rqhash_reposition ( q , rq ) ;
2011-03-21 12:14:27 +03:00
if ( next_sorted ) {
elv_rqhash_del ( q , next ) ;
q - > nr_sorted - - ;
}
2005-10-20 18:46:23 +04:00
q - > last_merge = rq ;
2005-04-17 02:20:36 +04:00
}
2010-04-09 08:14:23 +04:00
void elv_bio_merged ( struct request_queue * q , struct request * rq ,
struct bio * bio )
{
struct elevator_queue * e = q - > elevator ;
if ( e - > ops - > elevator_bio_merged_fn )
e - > ops - > elevator_bio_merged_fn ( q , rq , bio ) ;
}
2007-07-24 11:28:11 +04:00
void elv_requeue_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
/*
* it already went through dequeue , we need to decrement the
* in_flight count again
*/
2005-10-20 18:23:44 +04:00
if ( blk_account_rq ( rq ) ) {
2009-05-20 10:54:31 +04:00
q - > in_flight [ rq_is_sync ( rq ) ] - - ;
2010-08-07 20:17:56 +04:00
if ( rq - > cmd_flags & REQ_SORTED )
2007-01-14 14:26:09 +03:00
elv_deactivate_rq ( q , rq ) ;
2005-10-20 18:23:44 +04:00
}
2005-04-17 02:20:36 +04:00
2006-08-10 10:44:47 +04:00
rq - > cmd_flags & = ~ REQ_STARTED ;
2005-04-17 02:20:36 +04:00
2011-03-30 11:52:30 +04:00
__elv_add_request ( q , rq , ELEVATOR_INSERT_REQUEUE ) ;
2005-04-17 02:20:36 +04:00
}
2009-03-27 12:31:51 +03:00
void elv_drain_elevator ( struct request_queue * q )
2005-11-10 10:52:05 +03:00
{
static int printed ;
while ( q - > elevator - > ops - > elevator_dispatch_fn ( q , 1 ) )
;
if ( q - > nr_sorted = = 0 )
return ;
if ( printed + + < 10 ) {
printk ( KERN_ERR " %s: forced dispatching is broken "
" (nr_sorted=%u), please report this \n " ,
q - > elevator - > elevator_type - > elevator_name , q - > nr_sorted ) ;
}
}
2009-03-27 12:30:47 +03:00
/*
* Call with queue lock held , interrupts disabled
*/
2009-04-08 16:22:01 +04:00
void elv_quiesce_start ( struct request_queue * q )
2009-03-27 12:30:47 +03:00
{
2009-05-23 01:17:52 +04:00
if ( ! q - > elevator )
return ;
2009-03-27 12:30:47 +03:00
queue_flag_set ( QUEUE_FLAG_ELVSWITCH , q ) ;
/*
* make sure we don ' t have any requests in flight
*/
elv_drain_elevator ( q ) ;
while ( q - > rq . elvpriv ) {
2011-04-18 13:41:33 +04:00
__blk_run_queue ( q ) ;
2009-03-27 12:30:47 +03:00
spin_unlock_irq ( q - > queue_lock ) ;
msleep ( 10 ) ;
spin_lock_irq ( q - > queue_lock ) ;
elv_drain_elevator ( q ) ;
}
}
2009-04-08 16:22:01 +04:00
void elv_quiesce_end ( struct request_queue * q )
2009-03-27 12:30:47 +03:00
{
queue_flag_clear ( QUEUE_FLAG_ELVSWITCH , q ) ;
}
2011-03-30 11:52:30 +04:00
void __elv_add_request ( struct request_queue * q , struct request * rq , int where )
2005-04-17 02:20:36 +04:00
{
2008-10-30 10:34:33 +03:00
trace_block_rq_insert ( q , rq ) ;
2006-03-23 22:00:26 +03:00
2005-04-17 02:20:36 +04:00
rq - > q = q ;
2011-03-30 11:52:30 +04:00
if ( rq - > cmd_flags & REQ_SOFTBARRIER ) {
/* barriers are scheduling boundary, update end_sector */
if ( rq - > cmd_type = = REQ_TYPE_FS | |
( rq - > cmd_flags & REQ_DISCARD ) ) {
q - > end_sector = rq_end_sector ( rq ) ;
q - > boundary_rq = rq ;
}
} else if ( ! ( rq - > cmd_flags & REQ_ELVPRIV ) & &
2011-04-21 21:28:35 +04:00
( where = = ELEVATOR_INSERT_SORT | |
where = = ELEVATOR_INSERT_SORT_MERGE ) )
2011-03-30 11:52:30 +04:00
where = ELEVATOR_INSERT_BACK ;
2005-10-20 18:23:44 +04:00
switch ( where ) {
2010-09-03 13:56:16 +04:00
case ELEVATOR_INSERT_REQUEUE :
2005-10-20 18:23:44 +04:00
case ELEVATOR_INSERT_FRONT :
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SOFTBARRIER ;
2005-10-20 18:23:44 +04:00
list_add ( & rq - > queuelist , & q - > queue_head ) ;
break ;
case ELEVATOR_INSERT_BACK :
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SOFTBARRIER ;
2005-11-10 10:52:05 +03:00
elv_drain_elevator ( q ) ;
2005-10-20 18:23:44 +04:00
list_add_tail ( & rq - > queuelist , & q - > queue_head ) ;
/*
* We kick the queue here for the following reasons .
* - The elevator might have returned NULL previously
* to delay requests and returned them now . As the
* queue wasn ' t empty before this request , ll_rw_blk
* won ' t run the queue on return , resulting in hang .
* - Usually , back inserted requests won ' t be merged
* with anything . There ' s no point in delaying queue
* processing .
*/
2011-04-18 13:41:33 +04:00
__blk_run_queue ( q ) ;
2005-10-20 18:23:44 +04:00
break ;
2011-03-21 12:14:27 +03:00
case ELEVATOR_INSERT_SORT_MERGE :
/*
* If we succeed in merging this request with one in the
* queue already , we are done - rq has now been freed ,
* so no need to do anything further .
*/
if ( elv_attempt_insert_merge ( q , rq ) )
break ;
2005-10-20 18:23:44 +04:00
case ELEVATOR_INSERT_SORT :
2010-08-07 20:17:56 +04:00
BUG_ON ( rq - > cmd_type ! = REQ_TYPE_FS & &
! ( rq - > cmd_flags & REQ_DISCARD ) ) ;
2006-08-10 10:44:47 +04:00
rq - > cmd_flags | = REQ_SORTED ;
2005-11-10 10:52:05 +03:00
q - > nr_sorted + + ;
2006-07-28 11:23:08 +04:00
if ( rq_mergeable ( rq ) ) {
elv_rqhash_add ( q , rq ) ;
if ( ! q - > last_merge )
q - > last_merge = rq ;
}
2005-11-01 11:23:49 +03:00
/*
* Some ioscheds ( cfq ) run q - > request_fn directly , so
* rq cannot be accessed after calling
* elevator_add_req_fn .
*/
q - > elevator - > ops - > elevator_add_req_fn ( q , rq ) ;
2005-10-20 18:23:44 +04:00
break ;
2011-01-25 14:43:54 +03:00
case ELEVATOR_INSERT_FLUSH :
rq - > cmd_flags | = REQ_SOFTBARRIER ;
blk_insert_flush ( rq ) ;
break ;
2005-10-20 18:23:44 +04:00
default :
printk ( KERN_ERR " %s: bad insertion point %d \n " ,
2008-05-01 15:35:17 +04:00
__func__ , where ) ;
2005-10-20 18:23:44 +04:00
BUG ( ) ;
}
2005-04-17 02:20:36 +04:00
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( __elv_add_request ) ;
2011-03-10 10:52:07 +03:00
void elv_add_request ( struct request_queue * q , struct request * rq , int where )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
2011-03-10 10:52:07 +03:00
__elv_add_request ( q , rq , where ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2006-07-13 13:55:04 +04:00
EXPORT_SYMBOL ( elv_add_request ) ;
2007-07-24 11:28:11 +04:00
struct request * elv_latter_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_latter_req_fn )
return e - > ops - > elevator_latter_req_fn ( q , rq ) ;
return NULL ;
}
2007-07-24 11:28:11 +04:00
struct request * elv_former_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_former_req_fn )
return e - > ops - > elevator_former_req_fn ( q , rq ) ;
return NULL ;
}
2007-07-24 11:28:11 +04:00
int elv_set_request ( struct request_queue * q , struct request * rq , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_set_req_fn )
2006-07-28 11:32:57 +04:00
return e - > ops - > elevator_set_req_fn ( q , rq , gfp_mask ) ;
2005-04-17 02:20:36 +04:00
2011-02-11 13:08:00 +03:00
rq - > elevator_private [ 0 ] = NULL ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2007-07-24 11:28:11 +04:00
void elv_put_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_put_req_fn )
2006-12-01 12:42:33 +03:00
e - > ops - > elevator_put_req_fn ( rq ) ;
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
int elv_may_queue ( struct request_queue * q , int rw )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
if ( e - > ops - > elevator_may_queue_fn )
2006-07-28 11:32:57 +04:00
return e - > ops - > elevator_may_queue_fn ( q , rw ) ;
2005-04-17 02:20:36 +04:00
return ELV_MQUEUE_MAY ;
}
2008-09-13 22:31:27 +04:00
void elv_abort_queue ( struct request_queue * q )
{
struct request * rq ;
2011-01-25 14:43:54 +03:00
blk_abort_flushes ( q ) ;
2008-09-13 22:31:27 +04:00
while ( ! list_empty ( & q - > queue_head ) ) {
rq = list_entry_rq ( q - > queue_head . next ) ;
rq - > cmd_flags | = REQ_QUIET ;
2008-10-30 10:34:33 +03:00
trace_block_rq_abort ( q , rq ) ;
2009-06-02 10:44:01 +04:00
/*
* Mark this request as started so we don ' t trigger
* any debug logic in the end I / O path .
*/
blk_start_request ( rq ) ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( rq , - EIO ) ;
2008-09-13 22:31:27 +04:00
}
}
EXPORT_SYMBOL ( elv_abort_queue ) ;
2007-07-24 11:28:11 +04:00
void elv_completed_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2005-04-17 02:20:36 +04:00
/*
* request is released from the driver , io must be done
*/
2005-10-20 18:23:44 +04:00
if ( blk_account_rq ( rq ) ) {
2009-05-20 10:54:31 +04:00
q - > in_flight [ rq_is_sync ( rq ) ] - - ;
2010-08-07 20:17:56 +04:00
if ( ( rq - > cmd_flags & REQ_SORTED ) & &
e - > ops - > elevator_completed_req_fn )
2006-01-12 17:39:26 +03:00
e - > ops - > elevator_completed_req_fn ( q , rq ) ;
}
2005-04-17 02:20:36 +04:00
}
2006-03-19 02:35:43 +03:00
# define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
static ssize_t
elv_attr_show ( struct kobject * kobj , struct attribute * attr , char * page )
2005-04-17 02:20:36 +04:00
{
2006-03-19 02:35:43 +03:00
struct elv_fs_entry * entry = to_elv ( attr ) ;
2008-10-31 12:05:07 +03:00
struct elevator_queue * e ;
2006-03-19 02:35:43 +03:00
ssize_t error ;
if ( ! entry - > show )
return - EIO ;
2008-10-31 12:05:07 +03:00
e = container_of ( kobj , struct elevator_queue , kobj ) ;
2006-03-19 02:35:43 +03:00
mutex_lock ( & e - > sysfs_lock ) ;
error = e - > ops ? entry - > show ( e , page ) : - ENOENT ;
mutex_unlock ( & e - > sysfs_lock ) ;
return error ;
}
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
static ssize_t
elv_attr_store ( struct kobject * kobj , struct attribute * attr ,
const char * page , size_t length )
{
struct elv_fs_entry * entry = to_elv ( attr ) ;
2008-10-31 12:05:07 +03:00
struct elevator_queue * e ;
2006-03-19 02:35:43 +03:00
ssize_t error ;
2005-04-17 02:20:36 +04:00
2006-03-19 02:35:43 +03:00
if ( ! entry - > store )
return - EIO ;
2005-04-17 02:20:36 +04:00
2008-10-31 12:05:07 +03:00
e = container_of ( kobj , struct elevator_queue , kobj ) ;
2006-03-19 02:35:43 +03:00
mutex_lock ( & e - > sysfs_lock ) ;
error = e - > ops ? entry - > store ( e , page , length ) : - ENOENT ;
mutex_unlock ( & e - > sysfs_lock ) ;
return error ;
}
2010-01-19 04:58:23 +03:00
static const struct sysfs_ops elv_sysfs_ops = {
2006-03-19 02:35:43 +03:00
. show = elv_attr_show ,
. store = elv_attr_store ,
} ;
static struct kobj_type elv_ktype = {
. sysfs_ops = & elv_sysfs_ops ,
. release = elevator_release ,
} ;
int elv_register_queue ( struct request_queue * q )
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2006-03-19 02:35:43 +03:00
int error ;
2007-12-18 09:05:35 +03:00
error = kobject_add ( & e - > kobj , & q - > kobj , " %s " , " iosched " ) ;
2006-03-19 02:35:43 +03:00
if ( ! error ) {
2006-03-19 06:27:18 +03:00
struct elv_fs_entry * attr = e - > elevator_type - > elevator_attrs ;
2006-03-19 02:35:43 +03:00
if ( attr ) {
2006-03-19 06:27:18 +03:00
while ( attr - > attr . name ) {
if ( sysfs_create_file ( & e - > kobj , & attr - > attr ) )
2006-03-19 02:35:43 +03:00
break ;
2006-03-19 06:27:18 +03:00
attr + + ;
2006-03-19 02:35:43 +03:00
}
}
kobject_uevent ( & e - > kobj , KOBJ_ADD ) ;
2010-10-07 11:35:16 +04:00
e - > registered = 1 ;
2006-03-19 02:35:43 +03:00
}
return error ;
2005-04-17 02:20:36 +04:00
}
2010-05-11 10:57:42 +04:00
EXPORT_SYMBOL ( elv_register_queue ) ;
2005-04-17 02:20:36 +04:00
2008-10-31 12:05:07 +03:00
static void __elv_unregister_queue ( struct elevator_queue * e )
2006-06-08 10:49:06 +04:00
{
kobject_uevent ( & e - > kobj , KOBJ_REMOVE ) ;
kobject_del ( & e - > kobj ) ;
2010-10-07 11:35:16 +04:00
e - > registered = 0 ;
2006-06-08 10:49:06 +04:00
}
2005-04-17 02:20:36 +04:00
void elv_unregister_queue ( struct request_queue * q )
{
2006-06-08 10:49:06 +04:00
if ( q )
__elv_unregister_queue ( q - > elevator ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-11 10:57:42 +04:00
EXPORT_SYMBOL ( elv_unregister_queue ) ;
2005-04-17 02:20:36 +04:00
2007-12-12 20:51:56 +03:00
void elv_register ( struct elevator_type * e )
2005-04-17 02:20:36 +04:00
{
2007-03-15 14:59:19 +03:00
char * def = " " ;
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2006-03-24 20:43:26 +03:00
BUG_ON ( elevator_find ( e - > elevator_name ) ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & e - > list , & elv_list ) ;
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
2006-01-24 12:07:58 +03:00
if ( ! strcmp ( e - > elevator_name , chosen_elevator ) | |
( ! * chosen_elevator & &
! strcmp ( e - > elevator_name , CONFIG_DEFAULT_IOSCHED ) ) )
2007-03-15 14:59:19 +03:00
def = " (default) " ;
2008-02-01 02:37:27 +03:00
printk ( KERN_INFO " io scheduler %s registered%s \n " , e - > elevator_name ,
def ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL_GPL ( elv_register ) ;
void elv_unregister ( struct elevator_type * e )
{
2005-10-31 02:01:39 +03:00
struct task_struct * g , * p ;
/*
* Iterate every thread in the process to remove the io contexts .
*/
2006-03-18 21:21:20 +03:00
if ( e - > ops . trim ) {
read_lock ( & tasklist_lock ) ;
do_each_thread ( g , p ) {
task_lock ( p ) ;
2006-08-22 21:22:13 +04:00
if ( p - > io_context )
e - > ops . trim ( p - > io_context ) ;
2006-03-18 21:21:20 +03:00
task_unlock ( p ) ;
} while_each_thread ( g , p ) ;
read_unlock ( & tasklist_lock ) ;
}
2005-10-31 02:01:39 +03:00
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
list_del_init ( & e - > list ) ;
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
}
EXPORT_SYMBOL_GPL ( elv_unregister ) ;
/*
* switch to new_e io scheduler . be careful not to introduce deadlocks -
* we don ' t free the old io scheduler , before we have allocated what we
* need for the new one . this way we have a chance of going back to the old
2005-10-28 10:29:39 +04:00
* one , if the new one fails init for some reason .
2005-04-17 02:20:36 +04:00
*/
2007-07-24 11:28:11 +04:00
static int elevator_switch ( struct request_queue * q , struct elevator_type * new_e )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * old_elevator , * e ;
2006-06-08 10:49:06 +04:00
void * data ;
2010-08-23 15:52:19 +04:00
int err ;
2005-04-17 02:20:36 +04:00
2005-10-28 10:29:39 +04:00
/*
* Allocate new elevator
*/
2006-07-20 01:39:40 +04:00
e = elevator_alloc ( q , new_e ) ;
2005-04-17 02:20:36 +04:00
if ( ! e )
2010-08-23 15:52:19 +04:00
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
2006-06-08 10:49:06 +04:00
data = elevator_init_queue ( q , e ) ;
if ( ! data ) {
kobject_put ( & e - > kobj ) ;
2010-08-23 15:52:19 +04:00
return - ENOMEM ;
2006-06-08 10:49:06 +04:00
}
2005-04-17 02:20:36 +04:00
/*
2005-10-28 10:29:39 +04:00
* Turn on BYPASS and drain all requests w / elevator private data
2005-04-17 02:20:36 +04:00
*/
2005-10-28 10:29:39 +04:00
spin_lock_irq ( q - > queue_lock ) ;
2009-04-08 16:22:01 +04:00
elv_quiesce_start ( q ) ;
2005-10-28 10:29:39 +04:00
2005-04-17 02:20:36 +04:00
/*
2006-06-08 10:49:06 +04:00
* Remember old elevator .
2005-04-17 02:20:36 +04:00
*/
old_elevator = q - > elevator ;
/*
* attach and start new elevator
*/
2006-06-08 10:49:06 +04:00
elevator_attach ( q , e , data ) ;
spin_unlock_irq ( q - > queue_lock ) ;
2010-10-07 11:35:16 +04:00
if ( old_elevator - > registered ) {
__elv_unregister_queue ( old_elevator ) ;
2005-04-17 02:20:36 +04:00
2010-10-07 11:35:16 +04:00
err = elv_register_queue ( q ) ;
if ( err )
goto fail_register ;
}
2005-04-17 02:20:36 +04:00
/*
2005-10-28 10:29:39 +04:00
* finally exit old elevator and turn off BYPASS .
2005-04-17 02:20:36 +04:00
*/
elevator_exit ( old_elevator ) ;
2008-04-29 16:48:33 +04:00
spin_lock_irq ( q - > queue_lock ) ;
2009-04-08 16:22:01 +04:00
elv_quiesce_end ( q ) ;
2008-04-29 16:48:33 +04:00
spin_unlock_irq ( q - > queue_lock ) ;
2008-05-27 16:55:00 +04:00
blk_add_trace_msg ( q , " elv switch: %s " , e - > elevator_type - > elevator_name ) ;
2010-08-23 15:52:19 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
fail_register :
/*
* switch failed , exit the new io scheduler and reattach the old
* one again ( along with re - adding the sysfs dir )
*/
elevator_exit ( e ) ;
q - > elevator = old_elevator ;
elv_register_queue ( q ) ;
2008-04-29 16:48:33 +04:00
spin_lock_irq ( q - > queue_lock ) ;
queue_flag_clear ( QUEUE_FLAG_ELVSWITCH , q ) ;
spin_unlock_irq ( q - > queue_lock ) ;
2010-08-23 15:52:19 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
2010-08-23 15:52:19 +04:00
/*
* Switch this queue to the given IO scheduler .
*/
int elevator_change ( struct request_queue * q , const char * name )
2005-04-17 02:20:36 +04:00
{
char elevator_name [ ELV_NAME_MAX ] ;
struct elevator_type * e ;
2009-05-23 01:17:52 +04:00
if ( ! q - > elevator )
2010-08-23 15:52:19 +04:00
return - ENXIO ;
2009-05-23 01:17:52 +04:00
2008-10-14 10:49:56 +04:00
strlcpy ( elevator_name , name , sizeof ( elevator_name ) ) ;
2009-10-09 10:48:08 +04:00
e = elevator_get ( strstrip ( elevator_name ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! e ) {
printk ( KERN_ERR " elevator: type %s not found \n " , elevator_name ) ;
return - EINVAL ;
}
2005-10-31 02:02:24 +03:00
if ( ! strcmp ( elevator_name , q - > elevator - > elevator_type - > elevator_name ) ) {
elevator_put ( e ) ;
2010-08-23 15:52:19 +04:00
return 0 ;
2005-10-31 02:02:24 +03:00
}
2005-04-17 02:20:36 +04:00
2010-08-23 15:52:19 +04:00
return elevator_switch ( q , e ) ;
}
EXPORT_SYMBOL ( elevator_change ) ;
ssize_t elv_iosched_store ( struct request_queue * q , const char * name ,
size_t count )
{
int ret ;
if ( ! q - > elevator )
return count ;
ret = elevator_change ( q , name ) ;
if ( ! ret )
return count ;
printk ( KERN_ERR " elevator: switch to %s failed \n " , name ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
ssize_t elv_iosched_show ( struct request_queue * q , char * name )
2005-04-17 02:20:36 +04:00
{
2008-10-31 12:05:07 +03:00
struct elevator_queue * e = q - > elevator ;
2009-05-23 01:17:52 +04:00
struct elevator_type * elv ;
2007-07-10 14:26:24 +04:00
struct elevator_type * __e ;
2005-04-17 02:20:36 +04:00
int len = 0 ;
2010-05-24 11:07:32 +04:00
if ( ! q - > elevator | | ! blk_queue_stackable ( q ) )
2009-05-23 01:17:52 +04:00
return sprintf ( name , " none \n " ) ;
elv = e - > elevator_type ;
2007-04-26 16:41:53 +04:00
spin_lock ( & elv_list_lock ) ;
2007-07-10 14:26:24 +04:00
list_for_each_entry ( __e , & elv_list , list ) {
2005-04-17 02:20:36 +04:00
if ( ! strcmp ( elv - > elevator_name , __e - > elevator_name ) )
len + = sprintf ( name + len , " [%s] " , elv - > elevator_name ) ;
else
len + = sprintf ( name + len , " %s " , __e - > elevator_name ) ;
}
2007-04-26 16:41:53 +04:00
spin_unlock ( & elv_list_lock ) ;
2005-04-17 02:20:36 +04:00
len + = sprintf ( len + name , " \n " ) ;
return len ;
}
2007-07-24 11:28:11 +04:00
struct request * elv_rb_former_request ( struct request_queue * q ,
struct request * rq )
2006-07-13 13:55:04 +04:00
{
struct rb_node * rbprev = rb_prev ( & rq - > rb_node ) ;
if ( rbprev )
return rb_entry_rq ( rbprev ) ;
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_former_request ) ;
2007-07-24 11:28:11 +04:00
struct request * elv_rb_latter_request ( struct request_queue * q ,
struct request * rq )
2006-07-13 13:55:04 +04:00
{
struct rb_node * rbnext = rb_next ( & rq - > rb_node ) ;
if ( rbnext )
return rb_entry_rq ( rbnext ) ;
return NULL ;
}
EXPORT_SYMBOL ( elv_rb_latter_request ) ;