2005-04-17 02:20:36 +04:00
/*
* CFQ , or complete fairness queueing , disk scheduler .
*
* Based on ideas from a previously unfinished io
* scheduler ( round robin per - process disk scheduling ) and Andrea Arcangeli .
*
2006-09-04 17:41:16 +04:00
* Copyright ( C ) 2003 Jens Axboe < axboe @ kernel . dk >
2005-04-17 02:20:36 +04:00
*/
# include <linux/module.h>
2006-03-18 20:29:52 +03:00
# include <linux/blkdev.h>
# include <linux/elevator.h>
2005-04-17 02:20:36 +04:00
# include <linux/rbtree.h>
2005-06-27 12:55:12 +04:00
# include <linux/ioprio.h>
2008-05-30 14:23:07 +04:00
# include <linux/blktrace_api.h>
2005-04-17 02:20:36 +04:00
/*
* tunables
*/
2008-01-31 15:08:54 +03:00
/* max queue in one round of service */
static const int cfq_quantum = 4 ;
2006-01-06 11:46:02 +03:00
static const int cfq_fifo_expire [ 2 ] = { HZ / 4 , HZ / 8 } ;
2008-01-31 15:08:54 +03:00
/* maximum backwards seek, in KiB */
static const int cfq_back_max = 16 * 1024 ;
/* penalty of a backwards seek */
static const int cfq_back_penalty = 2 ;
2006-01-06 11:46:02 +03:00
static const int cfq_slice_sync = HZ / 10 ;
2005-06-27 12:56:24 +04:00
static int cfq_slice_async = HZ / 25 ;
2006-01-06 11:46:02 +03:00
static const int cfq_slice_async_rq = 2 ;
2006-06-16 13:23:00 +04:00
static int cfq_slice_idle = HZ / 125 ;
2005-06-27 12:55:12 +04:00
2007-04-20 16:27:50 +04:00
/*
2008-01-28 13:38:15 +03:00
* offset from end of service tree
2007-04-20 16:27:50 +04:00
*/
2008-01-28 13:38:15 +03:00
# define CFQ_IDLE_DELAY (HZ / 5)
2007-04-20 16:27:50 +04:00
/*
* below this threshold , we consider thinktime immediate
*/
# define CFQ_MIN_TT (2)
2005-06-27 12:55:12 +04:00
# define CFQ_SLICE_SCALE (5)
2008-08-26 17:52:36 +04:00
# define CFQ_HW_QUEUE_MIN (5)
2005-06-27 12:55:12 +04:00
2008-01-31 15:08:54 +03:00
# define RQ_CIC(rq) \
( ( struct cfq_io_context * ) ( rq ) - > elevator_private )
2008-05-30 14:23:07 +04:00
# define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
2005-04-17 02:20:36 +04:00
2006-12-07 07:33:20 +03:00
static struct kmem_cache * cfq_pool ;
static struct kmem_cache * cfq_ioc_pool ;
2005-04-17 02:20:36 +04:00
2006-07-19 07:07:12 +04:00
static DEFINE_PER_CPU ( unsigned long , ioc_count ) ;
2006-03-18 23:05:53 +03:00
static struct completion * ioc_gone ;
2008-05-29 11:32:08 +04:00
static DEFINE_SPINLOCK ( ioc_gone_lock ) ;
2006-03-18 23:05:53 +03:00
2005-06-27 12:55:12 +04:00
# define CFQ_PRIO_LISTS IOPRIO_BE_NR
# define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
# define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
2005-06-27 12:56:24 +04:00
# define ASYNC (0)
# define SYNC (1)
2006-03-28 15:03:44 +04:00
# define sample_valid(samples) ((samples) > 80)
2007-04-26 14:53:50 +04:00
/*
* Most of our rbtree usage is for sorting with min extraction , so
* if we cache the leftmost node we don ' t have to walk down the tree
* to find it . Idea borrowed from Ingo Molnars CFS scheduler . We should
* move this into the elevator for the rq sorting as well .
*/
struct cfq_rb_root {
struct rb_root rb ;
struct rb_node * left ;
} ;
# define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
2005-06-27 12:55:12 +04:00
/*
* Per block device queue structure
*/
2005-04-17 02:20:36 +04:00
struct cfq_data {
2007-07-24 11:28:11 +04:00
struct request_queue * queue ;
2005-06-27 12:55:12 +04:00
/*
* rr list of queues with requests and the count of them
*/
2007-04-26 14:53:50 +04:00
struct cfq_rb_root service_tree ;
2005-06-27 12:55:12 +04:00
unsigned int busy_queues ;
2009-01-30 14:46:41 +03:00
/*
* Used to track any pending rt requests so we can pre - empt current
* non - RT cfqq in service when this value is non - zero .
*/
unsigned int busy_rt_queues ;
2005-06-27 12:55:12 +04:00
int rq_in_driver ;
2007-04-23 10:33:33 +04:00
int sync_flight ;
2008-08-26 17:52:36 +04:00
/*
* queue - depth detection
*/
int rq_queued ;
2006-06-01 12:12:26 +04:00
int hw_tag ;
2008-08-26 17:52:36 +04:00
int hw_tag_samples ;
int rq_in_driver_peak ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
/*
* idle window management
*/
struct timer_list idle_slice_timer ;
struct work_struct unplug_work ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
struct cfq_queue * active_queue ;
struct cfq_io_context * active_cic ;
2007-07-20 12:06:38 +04:00
/*
* async queue for each priority case
*/
struct cfq_queue * async_cfqq [ 2 ] [ IOPRIO_BE_NR ] ;
struct cfq_queue * async_idle_cfqq ;
2007-07-10 15:43:25 +04:00
2007-04-25 14:44:27 +04:00
sector_t last_position ;
2005-06-27 12:55:12 +04:00
unsigned long last_end_request ;
2005-04-17 02:20:36 +04:00
/*
* tunables , see top of file
*/
unsigned int cfq_quantum ;
2005-06-27 12:55:12 +04:00
unsigned int cfq_fifo_expire [ 2 ] ;
2005-04-17 02:20:36 +04:00
unsigned int cfq_back_penalty ;
unsigned int cfq_back_max ;
2005-06-27 12:55:12 +04:00
unsigned int cfq_slice [ 2 ] ;
unsigned int cfq_slice_async_rq ;
unsigned int cfq_slice_idle ;
2006-03-18 21:51:22 +03:00
struct list_head cic_list ;
2005-04-17 02:20:36 +04:00
} ;
2005-06-27 12:55:12 +04:00
/*
* Per process - grouping structure
*/
2005-04-17 02:20:36 +04:00
struct cfq_queue {
/* reference count */
atomic_t ref ;
2008-05-23 08:52:00 +04:00
/* various state flags, see below */
unsigned int flags ;
2005-04-17 02:20:36 +04:00
/* parent cfq_data */
struct cfq_data * cfqd ;
2007-04-20 16:27:50 +04:00
/* service_tree member */
struct rb_node rb_node ;
/* service_tree key */
unsigned long rb_key ;
2005-04-17 02:20:36 +04:00
/* sorted list of pending requests */
struct rb_root sort_list ;
/* if fifo isn't expired, next request to serve */
2006-07-13 14:39:25 +04:00
struct request * next_rq ;
2005-04-17 02:20:36 +04:00
/* requests queued in sort_list */
int queued [ 2 ] ;
/* currently allocated requests */
int allocated [ 2 ] ;
/* fifo list of requests in sort_list */
2005-06-27 12:55:12 +04:00
struct list_head fifo ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
unsigned long slice_end ;
2007-01-19 03:56:49 +03:00
long slice_resid ;
2005-04-17 02:20:36 +04:00
2008-05-23 08:52:00 +04:00
/* pending metadata requests */
int meta_pending ;
2007-04-25 14:44:27 +04:00
/* number of requests that are on the dispatch list or inside driver */
int dispatched ;
2005-06-27 12:55:12 +04:00
/* io prio of this group */
unsigned short ioprio , org_ioprio ;
unsigned short ioprio_class , org_ioprio_class ;
2008-05-30 14:23:07 +04:00
pid_t pid ;
2005-04-17 02:20:36 +04:00
} ;
2005-06-27 12:56:24 +04:00
enum cfqq_state_flags {
2007-01-19 03:35:30 +03:00
CFQ_CFQQ_FLAG_on_rr = 0 , /* on round-robin busy list */
CFQ_CFQQ_FLAG_wait_request , /* waiting for a request */
CFQ_CFQQ_FLAG_must_alloc , /* must be allowed rq alloc */
CFQ_CFQQ_FLAG_must_alloc_slice , /* per-slice must_alloc flag */
CFQ_CFQQ_FLAG_must_dispatch , /* must dispatch, even if expired */
CFQ_CFQQ_FLAG_fifo_expire , /* FIFO checked in this slice */
CFQ_CFQQ_FLAG_idle_window , /* slice idling enabled */
CFQ_CFQQ_FLAG_prio_changed , /* task priority has changed */
CFQ_CFQQ_FLAG_queue_new , /* queue never been serviced */
2007-01-19 03:51:58 +03:00
CFQ_CFQQ_FLAG_slice_new , /* no requests dispatched in slice */
2007-04-25 14:29:51 +04:00
CFQ_CFQQ_FLAG_sync , /* synchronous queue */
2005-06-27 12:56:24 +04:00
} ;
# define CFQ_CFQQ_FNS(name) \
static inline void cfq_mark_cfqq_ # # name ( struct cfq_queue * cfqq ) \
{ \
2008-01-31 15:08:54 +03:00
( cfqq ) - > flags | = ( 1 < < CFQ_CFQQ_FLAG_ # # name ) ; \
2005-06-27 12:56:24 +04:00
} \
static inline void cfq_clear_cfqq_ # # name ( struct cfq_queue * cfqq ) \
{ \
2008-01-31 15:08:54 +03:00
( cfqq ) - > flags & = ~ ( 1 < < CFQ_CFQQ_FLAG_ # # name ) ; \
2005-06-27 12:56:24 +04:00
} \
static inline int cfq_cfqq_ # # name ( const struct cfq_queue * cfqq ) \
{ \
2008-01-31 15:08:54 +03:00
return ( ( cfqq ) - > flags & ( 1 < < CFQ_CFQQ_FLAG_ # # name ) ) ! = 0 ; \
2005-06-27 12:56:24 +04:00
}
CFQ_CFQQ_FNS ( on_rr ) ;
CFQ_CFQQ_FNS ( wait_request ) ;
CFQ_CFQQ_FNS ( must_alloc ) ;
CFQ_CFQQ_FNS ( must_alloc_slice ) ;
CFQ_CFQQ_FNS ( must_dispatch ) ;
CFQ_CFQQ_FNS ( fifo_expire ) ;
CFQ_CFQQ_FNS ( idle_window ) ;
CFQ_CFQQ_FNS ( prio_changed ) ;
2006-07-28 11:48:51 +04:00
CFQ_CFQQ_FNS ( queue_new ) ;
2007-01-19 03:51:58 +03:00
CFQ_CFQQ_FNS ( slice_new ) ;
2007-04-25 14:29:51 +04:00
CFQ_CFQQ_FNS ( sync ) ;
2005-06-27 12:56:24 +04:00
# undef CFQ_CFQQ_FNS
2008-05-30 14:23:07 +04:00
# define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg ( ( cfqd ) - > queue , " cfq%d " fmt , ( cfqq ) - > pid , # # args )
# define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg ( ( cfqd ) - > queue , " cfq " fmt , # # args )
2007-07-24 11:28:11 +04:00
static void cfq_dispatch_insert ( struct request_queue * , struct request * ) ;
2007-04-25 14:29:51 +04:00
static struct cfq_queue * cfq_get_queue ( struct cfq_data * , int ,
2008-01-24 10:52:45 +03:00
struct io_context * , gfp_t ) ;
2008-01-24 10:44:49 +03:00
static struct cfq_io_context * cfq_cic_lookup ( struct cfq_data * ,
2007-04-25 14:29:51 +04:00
struct io_context * ) ;
static inline struct cfq_queue * cic_to_cfqq ( struct cfq_io_context * cic ,
int is_sync )
{
return cic - > cfqq [ ! ! is_sync ] ;
}
static inline void cic_set_cfqq ( struct cfq_io_context * cic ,
struct cfq_queue * cfqq , int is_sync )
{
cic - > cfqq [ ! ! is_sync ] = cfqq ;
}
/*
* We regard a request as SYNC , if it ' s either a read or has the SYNC bit
* set ( in which case it could also be direct WRITE ) .
*/
static inline int cfq_bio_sync ( struct bio * bio )
{
if ( bio_data_dir ( bio ) = = READ | | bio_sync ( bio ) )
return 1 ;
return 0 ;
}
2005-04-17 02:20:36 +04:00
2005-06-28 07:14:05 +04:00
/*
* scheduler run of queue , if there are requests pending and no one in the
* driver that will restart queueing
*/
static inline void cfq_schedule_dispatch ( struct cfq_data * cfqd )
{
2008-05-30 14:23:07 +04:00
if ( cfqd - > busy_queues ) {
cfq_log ( cfqd , " schedule dispatch " ) ;
2008-07-28 15:08:45 +04:00
kblockd_schedule_work ( cfqd - > queue , & cfqd - > unplug_work ) ;
2008-05-30 14:23:07 +04:00
}
2005-06-28 07:14:05 +04:00
}
2007-07-24 11:28:11 +04:00
static int cfq_queue_empty ( struct request_queue * q )
2005-06-28 07:14:05 +04:00
{
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
2005-10-20 18:42:29 +04:00
return ! cfqd - > busy_queues ;
2005-06-28 07:14:05 +04:00
}
2007-01-19 03:51:58 +03:00
/*
* Scale schedule slice based on io priority . Use the sync time slice only
* if a queue is marked sync and has sync io queued . A sync queue with async
* io only , should not get full sync slice length .
*/
2007-04-20 16:27:50 +04:00
static inline int cfq_prio_slice ( struct cfq_data * cfqd , int sync ,
unsigned short prio )
2007-01-19 03:51:58 +03:00
{
2007-04-20 16:27:50 +04:00
const int base_slice = cfqd - > cfq_slice [ sync ] ;
2007-01-19 03:51:58 +03:00
2007-04-20 16:27:50 +04:00
WARN_ON ( prio > = IOPRIO_BE_NR ) ;
return base_slice + ( base_slice / CFQ_SLICE_SCALE * ( 4 - prio ) ) ;
}
2007-01-19 03:51:58 +03:00
2007-04-20 16:27:50 +04:00
static inline int
cfq_prio_to_slice ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
{
return cfq_prio_slice ( cfqd , cfq_cfqq_sync ( cfqq ) , cfqq - > ioprio ) ;
2007-01-19 03:51:58 +03:00
}
static inline void
cfq_set_prio_slice ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
{
cfqq - > slice_end = cfq_prio_to_slice ( cfqd , cfqq ) + jiffies ;
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " set_slice=%lu " , cfqq - > slice_end - jiffies ) ;
2007-01-19 03:51:58 +03:00
}
/*
* We need to wrap this check in cfq_cfqq_slice_new ( ) , since - > slice_end
* isn ' t valid until the first request from the dispatch is activated
* and the slice time set .
*/
static inline int cfq_slice_used ( struct cfq_queue * cfqq )
{
if ( cfq_cfqq_slice_new ( cfqq ) )
return 0 ;
if ( time_before ( jiffies , cfqq - > slice_end ) )
return 0 ;
return 1 ;
}
2005-04-17 02:20:36 +04:00
/*
2006-07-13 14:39:25 +04:00
* Lifted from AS - choose which of rq1 and rq2 that is best served now .
2005-04-17 02:20:36 +04:00
* We choose the request that is closest to the head right now . Distance
2006-03-28 10:59:49 +04:00
* behind the head is penalized and only allowed to a certain extent .
2005-04-17 02:20:36 +04:00
*/
2006-07-13 14:39:25 +04:00
static struct request *
cfq_choose_req ( struct cfq_data * cfqd , struct request * rq1 , struct request * rq2 )
2005-04-17 02:20:36 +04:00
{
sector_t last , s1 , s2 , d1 = 0 , d2 = 0 ;
unsigned long back_max ;
2006-03-28 10:59:49 +04:00
# define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
# define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
unsigned wrap = 0 ; /* bit mask: requests behind the disk head? */
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
if ( rq1 = = NULL | | rq1 = = rq2 )
return rq2 ;
if ( rq2 = = NULL )
return rq1 ;
2005-08-24 16:57:54 +04:00
2006-07-13 14:39:25 +04:00
if ( rq_is_sync ( rq1 ) & & ! rq_is_sync ( rq2 ) )
return rq1 ;
else if ( rq_is_sync ( rq2 ) & & ! rq_is_sync ( rq1 ) )
return rq2 ;
2006-07-23 03:42:19 +04:00
if ( rq_is_meta ( rq1 ) & & ! rq_is_meta ( rq2 ) )
return rq1 ;
else if ( rq_is_meta ( rq2 ) & & ! rq_is_meta ( rq1 ) )
return rq2 ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
s1 = rq1 - > sector ;
s2 = rq2 - > sector ;
2005-04-17 02:20:36 +04:00
2007-04-25 14:44:27 +04:00
last = cfqd - > last_position ;
2005-04-17 02:20:36 +04:00
/*
* by definition , 1 KiB is 2 sectors
*/
back_max = cfqd - > cfq_back_max * 2 ;
/*
* Strict one way elevator _except_ in the case where we allow
* short backward seeks which are biased as twice the cost of a
* similar forward seek .
*/
if ( s1 > = last )
d1 = s1 - last ;
else if ( s1 + back_max > = last )
d1 = ( last - s1 ) * cfqd - > cfq_back_penalty ;
else
2006-03-28 10:59:49 +04:00
wrap | = CFQ_RQ1_WRAP ;
2005-04-17 02:20:36 +04:00
if ( s2 > = last )
d2 = s2 - last ;
else if ( s2 + back_max > = last )
d2 = ( last - s2 ) * cfqd - > cfq_back_penalty ;
else
2006-03-28 10:59:49 +04:00
wrap | = CFQ_RQ2_WRAP ;
2005-04-17 02:20:36 +04:00
/* Found required data */
2006-03-28 10:59:49 +04:00
/*
* By doing switch ( ) on the bit mask " wrap " we avoid having to
* check two variables for all permutations : - - > faster !
*/
switch ( wrap ) {
2006-07-13 14:39:25 +04:00
case 0 : /* common case for CFQ: rq1 and rq2 not wrapped */
2006-03-28 10:59:49 +04:00
if ( d1 < d2 )
2006-07-13 14:39:25 +04:00
return rq1 ;
2006-03-28 10:59:49 +04:00
else if ( d2 < d1 )
2006-07-13 14:39:25 +04:00
return rq2 ;
2006-03-28 10:59:49 +04:00
else {
if ( s1 > = s2 )
2006-07-13 14:39:25 +04:00
return rq1 ;
2006-03-28 10:59:49 +04:00
else
2006-07-13 14:39:25 +04:00
return rq2 ;
2006-03-28 10:59:49 +04:00
}
2005-04-17 02:20:36 +04:00
2006-03-28 10:59:49 +04:00
case CFQ_RQ2_WRAP :
2006-07-13 14:39:25 +04:00
return rq1 ;
2006-03-28 10:59:49 +04:00
case CFQ_RQ1_WRAP :
2006-07-13 14:39:25 +04:00
return rq2 ;
case ( CFQ_RQ1_WRAP | CFQ_RQ2_WRAP ) : /* both rqs wrapped */
2006-03-28 10:59:49 +04:00
default :
/*
* Since both rqs are wrapped ,
* start with the one that ' s further behind head
* ( - - > only * one * back seek required ) ,
* since back seek takes more time than forward .
*/
if ( s1 < = s2 )
2006-07-13 14:39:25 +04:00
return rq1 ;
2005-04-17 02:20:36 +04:00
else
2006-07-13 14:39:25 +04:00
return rq2 ;
2005-04-17 02:20:36 +04:00
}
}
2007-04-26 14:54:48 +04:00
/*
* The below is leftmost cache rbtree addon
*/
2008-01-28 13:38:15 +03:00
static struct cfq_queue * cfq_rb_first ( struct cfq_rb_root * root )
2007-04-26 14:53:50 +04:00
{
if ( ! root - > left )
root - > left = rb_first ( & root - > rb ) ;
2008-01-28 13:38:15 +03:00
if ( root - > left )
return rb_entry ( root - > left , struct cfq_queue , rb_node ) ;
return NULL ;
2007-04-26 14:53:50 +04:00
}
static void cfq_rb_erase ( struct rb_node * n , struct cfq_rb_root * root )
{
if ( root - > left = = n )
root - > left = NULL ;
rb_erase ( n , & root - > rb ) ;
RB_CLEAR_NODE ( n ) ;
}
2005-04-17 02:20:36 +04:00
/*
* would be nice to take fifo expire time into account as well
*/
2006-07-13 14:39:25 +04:00
static struct request *
cfq_find_next_rq ( struct cfq_data * cfqd , struct cfq_queue * cfqq ,
struct request * last )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:33:14 +04:00
struct rb_node * rbnext = rb_next ( & last - > rb_node ) ;
struct rb_node * rbprev = rb_prev ( & last - > rb_node ) ;
2006-07-13 14:39:25 +04:00
struct request * next = NULL , * prev = NULL ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:33:14 +04:00
BUG_ON ( RB_EMPTY_NODE ( & last - > rb_node ) ) ;
2005-04-17 02:20:36 +04:00
if ( rbprev )
2006-07-13 14:39:25 +04:00
prev = rb_entry_rq ( rbprev ) ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:33:14 +04:00
if ( rbnext )
2006-07-13 14:39:25 +04:00
next = rb_entry_rq ( rbnext ) ;
2006-07-13 14:33:14 +04:00
else {
rbnext = rb_first ( & cfqq - > sort_list ) ;
if ( rbnext & & rbnext ! = & last - > rb_node )
2006-07-13 14:39:25 +04:00
next = rb_entry_rq ( rbnext ) ;
2006-07-13 14:33:14 +04:00
}
2005-04-17 02:20:36 +04:00
2006-07-13 14:33:14 +04:00
return cfq_choose_req ( cfqd , next , prev ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-20 16:27:50 +04:00
static unsigned long cfq_slice_offset ( struct cfq_data * cfqd ,
struct cfq_queue * cfqq )
2005-04-17 02:20:36 +04:00
{
2007-04-20 16:27:50 +04:00
/*
* just an approximation , should be ok .
*/
2007-04-20 16:18:00 +04:00
return ( cfqd - > busy_queues - 1 ) * ( cfq_prio_slice ( cfqd , 1 , 0 ) -
cfq_prio_slice ( cfqd , cfq_cfqq_sync ( cfqq ) , cfqq - > ioprio ) ) ;
2007-04-20 16:27:50 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* The cfqd - > service_tree holds all pending cfq_queue ' s that have
* requests waiting to be processed . It is sorted in the order that
* we will service the queues .
*/
2007-04-20 16:27:50 +04:00
static void cfq_service_tree_add ( struct cfq_data * cfqd ,
2007-04-19 14:03:34 +04:00
struct cfq_queue * cfqq , int add_front )
2007-04-20 16:27:50 +04:00
{
2008-01-28 13:38:15 +03:00
struct rb_node * * p , * parent ;
struct cfq_queue * __cfqq ;
2007-04-20 16:27:50 +04:00
unsigned long rb_key ;
2007-04-26 14:54:48 +04:00
int left ;
2007-04-20 16:27:50 +04:00
2008-01-28 13:38:15 +03:00
if ( cfq_class_idle ( cfqq ) ) {
rb_key = CFQ_IDLE_DELAY ;
parent = rb_last ( & cfqd - > service_tree . rb ) ;
if ( parent & & parent ! = & cfqq - > rb_node ) {
__cfqq = rb_entry ( parent , struct cfq_queue , rb_node ) ;
rb_key + = __cfqq - > rb_key ;
} else
rb_key + = jiffies ;
} else if ( ! add_front ) {
2007-04-19 14:03:34 +04:00
rb_key = cfq_slice_offset ( cfqd , cfqq ) + jiffies ;
rb_key + = cfqq - > slice_resid ;
cfqq - > slice_resid = 0 ;
} else
rb_key = 0 ;
2005-04-17 02:20:36 +04:00
2007-04-20 16:27:50 +04:00
if ( ! RB_EMPTY_NODE ( & cfqq - > rb_node ) ) {
2007-02-05 13:56:25 +03:00
/*
2007-04-20 16:27:50 +04:00
* same position , nothing more to do
2007-02-05 13:56:25 +03:00
*/
2007-04-20 16:27:50 +04:00
if ( rb_key = = cfqq - > rb_key )
return ;
2005-04-17 02:20:36 +04:00
2007-04-26 14:53:50 +04:00
cfq_rb_erase ( & cfqq - > rb_node , & cfqd - > service_tree ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-20 16:27:50 +04:00
2007-04-26 14:54:48 +04:00
left = 1 ;
2008-01-28 13:38:15 +03:00
parent = NULL ;
p = & cfqd - > service_tree . rb . rb_node ;
2007-04-20 16:27:50 +04:00
while ( * p ) {
2007-04-18 22:13:32 +04:00
struct rb_node * * n ;
2007-04-26 14:53:50 +04:00
2007-04-20 16:27:50 +04:00
parent = * p ;
__cfqq = rb_entry ( parent , struct cfq_queue , rb_node ) ;
2007-04-18 22:01:57 +04:00
/*
* sort RT queues first , we always want to give
2007-04-18 22:13:32 +04:00
* preference to them . IDLE queues goes to the back .
* after that , sort on the next service time .
2007-04-18 22:01:57 +04:00
*/
if ( cfq_class_rt ( cfqq ) > cfq_class_rt ( __cfqq ) )
2007-04-18 22:13:32 +04:00
n = & ( * p ) - > rb_left ;
2007-04-18 22:01:57 +04:00
else if ( cfq_class_rt ( cfqq ) < cfq_class_rt ( __cfqq ) )
2007-04-18 22:13:32 +04:00
n = & ( * p ) - > rb_right ;
else if ( cfq_class_idle ( cfqq ) < cfq_class_idle ( __cfqq ) )
n = & ( * p ) - > rb_left ;
else if ( cfq_class_idle ( cfqq ) > cfq_class_idle ( __cfqq ) )
n = & ( * p ) - > rb_right ;
2007-04-18 22:01:57 +04:00
else if ( rb_key < __cfqq - > rb_key )
2007-04-18 22:13:32 +04:00
n = & ( * p ) - > rb_left ;
else
n = & ( * p ) - > rb_right ;
if ( n = = & ( * p ) - > rb_right )
2007-04-26 14:53:50 +04:00
left = 0 ;
2007-04-18 22:13:32 +04:00
p = n ;
2007-04-20 16:27:50 +04:00
}
2007-04-26 14:53:50 +04:00
if ( left )
cfqd - > service_tree . left = & cfqq - > rb_node ;
2007-04-20 16:27:50 +04:00
cfqq - > rb_key = rb_key ;
rb_link_node ( & cfqq - > rb_node , parent , p ) ;
2007-04-26 14:53:50 +04:00
rb_insert_color ( & cfqq - > rb_node , & cfqd - > service_tree . rb ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* Update cfqq ' s position in the service tree .
*/
2007-04-19 14:03:34 +04:00
static void cfq_resort_rr_list ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
2007-04-25 14:44:27 +04:00
{
/*
* Resorting requires the cfqq to be on the RR list already .
*/
2007-04-26 14:54:48 +04:00
if ( cfq_cfqq_on_rr ( cfqq ) )
2007-04-19 14:03:34 +04:00
cfq_service_tree_add ( cfqd , cfqq , 0 ) ;
2007-04-25 14:44:27 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* add to busy list of queues for service , trying to be fair in ordering
2005-06-27 12:55:12 +04:00
* the pending list according to last request service
2005-04-17 02:20:36 +04:00
*/
2008-01-28 15:19:43 +03:00
static void cfq_add_cfqq_rr ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
2005-04-17 02:20:36 +04:00
{
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " add_to_rr " ) ;
2005-06-27 12:56:24 +04:00
BUG_ON ( cfq_cfqq_on_rr ( cfqq ) ) ;
cfq_mark_cfqq_on_rr ( cfqq ) ;
2005-04-17 02:20:36 +04:00
cfqd - > busy_queues + + ;
2009-01-30 14:46:41 +03:00
if ( cfq_class_rt ( cfqq ) )
cfqd - > busy_rt_queues + + ;
2005-04-17 02:20:36 +04:00
2007-04-19 14:03:34 +04:00
cfq_resort_rr_list ( cfqd , cfqq ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* Called when the cfqq no longer has requests pending , remove it from
* the service tree .
*/
2008-01-28 15:19:43 +03:00
static void cfq_del_cfqq_rr ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
2005-04-17 02:20:36 +04:00
{
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " del_from_rr " ) ;
2005-06-27 12:56:24 +04:00
BUG_ON ( ! cfq_cfqq_on_rr ( cfqq ) ) ;
cfq_clear_cfqq_on_rr ( cfqq ) ;
2005-04-17 02:20:36 +04:00
2007-04-26 14:53:50 +04:00
if ( ! RB_EMPTY_NODE ( & cfqq - > rb_node ) )
cfq_rb_erase ( & cfqq - > rb_node , & cfqd - > service_tree ) ;
2007-04-20 16:27:50 +04:00
2005-04-17 02:20:36 +04:00
BUG_ON ( ! cfqd - > busy_queues ) ;
cfqd - > busy_queues - - ;
2009-01-30 14:46:41 +03:00
if ( cfq_class_rt ( cfqq ) )
cfqd - > busy_rt_queues - - ;
2005-04-17 02:20:36 +04:00
}
/*
* rb tree support functions
*/
2008-01-28 15:19:43 +03:00
static void cfq_del_rq_rb ( struct request * rq )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2005-10-20 18:42:29 +04:00
struct cfq_data * cfqd = cfqq - > cfqd ;
2006-07-13 14:39:25 +04:00
const int sync = rq_is_sync ( rq ) ;
2005-04-17 02:20:36 +04:00
2005-10-20 18:42:29 +04:00
BUG_ON ( ! cfqq - > queued [ sync ] ) ;
cfqq - > queued [ sync ] - - ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
elv_rb_del ( & cfqq - > sort_list , rq ) ;
2005-04-17 02:20:36 +04:00
2006-06-21 11:36:18 +04:00
if ( cfq_cfqq_on_rr ( cfqq ) & & RB_EMPTY_ROOT ( & cfqq - > sort_list ) )
2005-10-20 18:42:29 +04:00
cfq_del_cfqq_rr ( cfqd , cfqq ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-13 14:39:25 +04:00
static void cfq_add_rq_rb ( struct request * rq )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2005-04-17 02:20:36 +04:00
struct cfq_data * cfqd = cfqq - > cfqd ;
2006-07-13 14:33:14 +04:00
struct request * __alias ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:37:56 +04:00
cfqq - > queued [ rq_is_sync ( rq ) ] + + ;
2005-04-17 02:20:36 +04:00
/*
* looks a little odd , but the first insert might return an alias .
* if that happens , put the alias on the dispatch list
*/
2006-07-13 14:33:14 +04:00
while ( ( __alias = elv_rb_add ( & cfqq - > sort_list , rq ) ) ! = NULL )
2006-07-13 14:39:25 +04:00
cfq_dispatch_insert ( cfqd - > queue , __alias ) ;
2006-10-31 16:21:55 +03:00
if ( ! cfq_cfqq_on_rr ( cfqq ) )
cfq_add_cfqq_rr ( cfqd , cfqq ) ;
2007-04-25 13:53:48 +04:00
/*
* check if this request is a better next - serve candidate
*/
cfqq - > next_rq = cfq_choose_req ( cfqd , cfqq - > next_rq , rq ) ;
BUG_ON ( ! cfqq - > next_rq ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-28 15:19:43 +03:00
static void cfq_reposition_rq_rb ( struct cfq_queue * cfqq , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:37:56 +04:00
elv_rb_del ( & cfqq - > sort_list , rq ) ;
cfqq - > queued [ rq_is_sync ( rq ) ] - - ;
2006-07-13 14:39:25 +04:00
cfq_add_rq_rb ( rq ) ;
2005-04-17 02:20:36 +04:00
}
2006-03-28 15:03:44 +04:00
static struct request *
cfq_find_rq_fmerge ( struct cfq_data * cfqd , struct bio * bio )
2005-04-17 02:20:36 +04:00
{
2006-03-28 15:03:44 +04:00
struct task_struct * tsk = current ;
2007-04-25 14:29:51 +04:00
struct cfq_io_context * cic ;
2006-03-28 15:03:44 +04:00
struct cfq_queue * cfqq ;
2005-04-17 02:20:36 +04:00
2008-01-24 10:44:49 +03:00
cic = cfq_cic_lookup ( cfqd , tsk - > io_context ) ;
2007-04-25 14:29:51 +04:00
if ( ! cic )
return NULL ;
cfqq = cic_to_cfqq ( cic , cfq_bio_sync ( bio ) ) ;
2006-07-22 18:48:31 +04:00
if ( cfqq ) {
sector_t sector = bio - > bi_sector + bio_sectors ( bio ) ;
2006-07-13 14:33:14 +04:00
return elv_rb_find ( & cfqq - > sort_list , sector ) ;
2006-07-22 18:48:31 +04:00
}
2005-04-17 02:20:36 +04:00
return NULL ;
}
2007-07-24 11:28:11 +04:00
static void cfq_activate_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2005-06-27 12:55:12 +04:00
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
2005-06-27 12:56:24 +04:00
2005-10-20 18:42:29 +04:00
cfqd - > rq_in_driver + + ;
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , RQ_CFQQ ( rq ) , " activate rq, drv=%d " ,
cfqd - > rq_in_driver ) ;
2006-06-01 12:12:26 +04:00
2007-04-25 14:44:27 +04:00
cfqd - > last_position = rq - > hard_sector + rq - > hard_nr_sectors ;
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
static void cfq_deactivate_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2005-10-20 18:42:29 +04:00
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
WARN_ON ( ! cfqd - > rq_in_driver ) ;
cfqd - > rq_in_driver - - ;
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , RQ_CFQQ ( rq ) , " deactivate rq, drv=%d " ,
cfqd - > rq_in_driver ) ;
2005-04-17 02:20:36 +04:00
}
2005-10-20 18:42:29 +04:00
static void cfq_remove_request ( struct request * rq )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2006-07-13 14:33:14 +04:00
2006-07-13 14:39:25 +04:00
if ( cfqq - > next_rq = = rq )
cfqq - > next_rq = cfq_find_next_rq ( cfqq - > cfqd , cfqq , rq ) ;
2005-04-17 02:20:36 +04:00
2005-10-20 18:42:29 +04:00
list_del_init ( & rq - > queuelist ) ;
2006-07-13 14:39:25 +04:00
cfq_del_rq_rb ( rq ) ;
2006-07-23 03:42:19 +04:00
2008-08-26 17:52:36 +04:00
cfqq - > cfqd - > rq_queued - - ;
2006-07-23 03:42:19 +04:00
if ( rq_is_meta ( rq ) ) {
WARN_ON ( ! cfqq - > meta_pending ) ;
cfqq - > meta_pending - - ;
}
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
static int cfq_merge ( struct request_queue * q , struct request * * req ,
struct bio * bio )
2005-04-17 02:20:36 +04:00
{
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
struct request * __rq ;
2006-03-28 15:03:44 +04:00
__rq = cfq_find_rq_fmerge ( cfqd , bio ) ;
2005-06-27 12:55:12 +04:00
if ( __rq & & elv_rq_merge_ok ( __rq , bio ) ) {
2006-07-28 11:23:08 +04:00
* req = __rq ;
return ELEVATOR_FRONT_MERGE ;
2005-04-17 02:20:36 +04:00
}
return ELEVATOR_NO_MERGE ;
}
2007-07-24 11:28:11 +04:00
static void cfq_merged_request ( struct request_queue * q , struct request * req ,
2006-07-13 14:33:14 +04:00
int type )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:33:14 +04:00
if ( type = = ELEVATOR_FRONT_MERGE ) {
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( req ) ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
cfq_reposition_rq_rb ( cfqq , req ) ;
2005-04-17 02:20:36 +04:00
}
}
static void
2007-07-24 11:28:11 +04:00
cfq_merged_requests ( struct request_queue * q , struct request * rq ,
2005-04-17 02:20:36 +04:00
struct request * next )
{
2005-06-27 12:55:12 +04:00
/*
* reposition in fifo if next is older than rq
*/
if ( ! list_empty ( & rq - > queuelist ) & & ! list_empty ( & next - > queuelist ) & &
time_before ( next - > start_time , rq - > start_time ) )
list_move ( & rq - > queuelist , & next - > queuelist ) ;
2005-10-20 18:42:29 +04:00
cfq_remove_request ( next ) ;
2005-06-27 12:55:12 +04:00
}
2007-07-24 11:28:11 +04:00
static int cfq_allow_merge ( struct request_queue * q , struct request * rq ,
2006-12-20 13:04:12 +03:00
struct bio * bio )
{
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
2007-04-25 14:29:51 +04:00
struct cfq_io_context * cic ;
2006-12-20 13:04:12 +03:00
struct cfq_queue * cfqq ;
/*
2007-01-02 20:32:11 +03:00
* Disallow merge of a sync bio into an async request .
2006-12-20 13:04:12 +03:00
*/
2007-04-25 14:29:51 +04:00
if ( cfq_bio_sync ( bio ) & & ! rq_is_sync ( rq ) )
2006-12-20 13:04:12 +03:00
return 0 ;
/*
2006-12-22 11:38:53 +03:00
* Lookup the cfqq that this bio will be queued with . Allow
* merge only if rq is queued there .
2006-12-20 13:04:12 +03:00
*/
2008-01-24 10:44:49 +03:00
cic = cfq_cic_lookup ( cfqd , current - > io_context ) ;
2007-04-25 14:29:51 +04:00
if ( ! cic )
return 0 ;
2006-12-22 11:38:53 +03:00
2007-04-25 14:29:51 +04:00
cfqq = cic_to_cfqq ( cic , cfq_bio_sync ( bio ) ) ;
2006-12-22 11:38:53 +03:00
if ( cfqq = = RQ_CFQQ ( rq ) )
return 1 ;
2006-12-20 13:04:12 +03:00
2007-01-02 20:32:11 +03:00
return 0 ;
2006-12-20 13:04:12 +03:00
}
2008-01-28 15:19:43 +03:00
static void __cfq_set_active_queue ( struct cfq_data * cfqd ,
struct cfq_queue * cfqq )
2005-06-27 12:55:12 +04:00
{
if ( cfqq ) {
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " set_active " ) ;
2005-06-27 12:55:12 +04:00
cfqq - > slice_end = 0 ;
2005-06-27 12:56:24 +04:00
cfq_clear_cfqq_must_alloc_slice ( cfqq ) ;
cfq_clear_cfqq_fifo_expire ( cfqq ) ;
2007-01-19 03:51:58 +03:00
cfq_mark_cfqq_slice_new ( cfqq ) ;
2007-04-17 14:47:55 +04:00
cfq_clear_cfqq_queue_new ( cfqq ) ;
2005-06-27 12:55:12 +04:00
}
cfqd - > active_queue = cfqq ;
}
2006-02-28 11:35:11 +03:00
/*
* current cfqq expired its slice ( or was too idle ) , select new one
*/
static void
__cfq_slice_expired ( struct cfq_data * cfqd , struct cfq_queue * cfqq ,
2007-04-23 10:25:00 +04:00
int timed_out )
2006-02-28 11:35:11 +03:00
{
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " slice expired t=%d " , timed_out ) ;
2006-02-28 11:35:11 +03:00
if ( cfq_cfqq_wait_request ( cfqq ) )
del_timer ( & cfqd - > idle_slice_timer ) ;
cfq_clear_cfqq_must_dispatch ( cfqq ) ;
cfq_clear_cfqq_wait_request ( cfqq ) ;
/*
2007-04-23 10:25:00 +04:00
* store what was left of this slice , if the queue idled / timed out
2006-02-28 11:35:11 +03:00
*/
2008-05-30 14:23:07 +04:00
if ( timed_out & & ! cfq_cfqq_slice_new ( cfqq ) ) {
2007-01-19 03:56:49 +03:00
cfqq - > slice_resid = cfqq - > slice_end - jiffies ;
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " resid=%ld " , cfqq - > slice_resid ) ;
}
2006-02-28 11:35:11 +03:00
2007-04-19 14:03:34 +04:00
cfq_resort_rr_list ( cfqd , cfqq ) ;
2006-02-28 11:35:11 +03:00
if ( cfqq = = cfqd - > active_queue )
cfqd - > active_queue = NULL ;
if ( cfqd - > active_cic ) {
put_io_context ( cfqd - > active_cic - > ioc ) ;
cfqd - > active_cic = NULL ;
}
}
2007-04-23 10:25:00 +04:00
static inline void cfq_slice_expired ( struct cfq_data * cfqd , int timed_out )
2006-02-28 11:35:11 +03:00
{
struct cfq_queue * cfqq = cfqd - > active_queue ;
if ( cfqq )
2007-04-23 10:25:00 +04:00
__cfq_slice_expired ( cfqd , cfqq , timed_out ) ;
2006-02-28 11:35:11 +03:00
}
2007-04-26 14:54:48 +04:00
/*
* Get next queue for service . Unless we have a queue preemption ,
* we ' ll simply select the first cfqq in the service tree .
*/
2007-04-25 14:44:27 +04:00
static struct cfq_queue * cfq_get_next_queue ( struct cfq_data * cfqd )
2005-06-27 12:55:12 +04:00
{
2007-04-19 14:03:34 +04:00
if ( RB_EMPTY_ROOT ( & cfqd - > service_tree . rb ) )
return NULL ;
2007-04-20 16:27:50 +04:00
2008-01-28 13:38:15 +03:00
return cfq_rb_first ( & cfqd - > service_tree ) ;
2007-04-25 14:44:27 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* Get and set a new active queue for service .
*/
2007-04-25 14:44:27 +04:00
static struct cfq_queue * cfq_set_active_queue ( struct cfq_data * cfqd )
{
struct cfq_queue * cfqq ;
2007-04-20 16:27:50 +04:00
cfqq = cfq_get_next_queue ( cfqd ) ;
2005-06-27 12:55:12 +04:00
__cfq_set_active_queue ( cfqd , cfqq ) ;
2005-06-27 12:56:24 +04:00
return cfqq ;
2005-06-27 12:55:12 +04:00
}
2007-04-20 16:27:50 +04:00
static inline sector_t cfq_dist_from_last ( struct cfq_data * cfqd ,
struct request * rq )
{
if ( rq - > sector > = cfqd - > last_position )
return rq - > sector - cfqd - > last_position ;
else
return cfqd - > last_position - rq - > sector ;
}
2007-04-25 14:44:27 +04:00
static inline int cfq_rq_close ( struct cfq_data * cfqd , struct request * rq )
{
struct cfq_io_context * cic = cfqd - > active_cic ;
if ( ! sample_valid ( cic - > seek_samples ) )
return 0 ;
return cfq_dist_from_last ( cfqd , rq ) < = cic - > seek_mean ;
}
2007-04-20 16:27:50 +04:00
static int cfq_close_cooperator ( struct cfq_data * cfq_data ,
struct cfq_queue * cfqq )
2007-04-25 14:44:27 +04:00
{
/*
2007-04-20 16:27:50 +04:00
* We should notice if some of the queues are cooperating , eg
* working closely on the same area of the disk . In that case ,
* we can group them together and don ' t waste time idling .
2007-04-25 14:44:27 +04:00
*/
2007-04-20 16:27:50 +04:00
return 0 ;
2007-04-25 14:44:27 +04:00
}
# define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
2006-06-16 13:23:00 +04:00
2007-04-25 14:44:27 +04:00
static void cfq_arm_slice_timer ( struct cfq_data * cfqd )
2005-06-27 12:55:12 +04:00
{
2007-01-19 03:59:30 +03:00
struct cfq_queue * cfqq = cfqd - > active_queue ;
2006-03-28 15:03:44 +04:00
struct cfq_io_context * cic ;
2006-02-28 11:35:11 +03:00
unsigned long sl ;
2008-09-24 15:03:33 +04:00
/*
2008-09-25 13:37:50 +04:00
* SSD device without seek penalty , disable idling . But only do so
* for devices that support queuing , otherwise we still have a problem
* with sync vs async workloads .
2008-09-24 15:03:33 +04:00
*/
2008-09-25 13:37:50 +04:00
if ( blk_queue_nonrot ( cfqd - > queue ) & & cfqd - > hw_tag )
2008-09-24 15:03:33 +04:00
return ;
2006-06-21 11:36:18 +04:00
WARN_ON ( ! RB_EMPTY_ROOT ( & cfqq - > sort_list ) ) ;
2007-04-25 14:44:27 +04:00
WARN_ON ( cfq_cfqq_slice_new ( cfqq ) ) ;
2005-06-27 12:55:12 +04:00
/*
* idle is disabled , either manually or by past process history
*/
2007-04-25 14:44:27 +04:00
if ( ! cfqd - > cfq_slice_idle | | ! cfq_cfqq_idle_window ( cfqq ) )
return ;
2008-05-30 14:23:07 +04:00
/*
* still requests with the driver , don ' t idle
*/
if ( cfqd - > rq_in_driver )
return ;
2005-06-27 12:55:12 +04:00
/*
* task has exited , don ' t wait
*/
2006-03-28 15:03:44 +04:00
cic = cfqd - > active_cic ;
2007-11-27 14:47:04 +03:00
if ( ! cic | | ! atomic_read ( & cic - > ioc - > nr_tasks ) )
2007-04-25 14:44:27 +04:00
return ;
/*
* See if this prio level has a good candidate
*/
2007-04-17 14:47:55 +04:00
if ( cfq_close_cooperator ( cfqd , cfqq ) & &
( sample_valid ( cic - > ttime_samples ) & & cic - > ttime_mean > 2 ) )
2007-04-25 14:44:27 +04:00
return ;
2005-06-27 12:55:12 +04:00
2005-06-27 12:56:24 +04:00
cfq_mark_cfqq_must_dispatch ( cfqq ) ;
cfq_mark_cfqq_wait_request ( cfqq ) ;
2005-06-27 12:55:12 +04:00
2006-03-28 15:03:44 +04:00
/*
* we don ' t want to idle for seeks , but we do want to allow
* fair distribution of slice time for a process doing back - to - back
* seeks . so allow a little bit of time for him to submit a new rq
*/
2007-04-25 14:44:27 +04:00
sl = cfqd - > cfq_slice_idle ;
2006-06-16 13:23:00 +04:00
if ( sample_valid ( cic - > seek_samples ) & & CIC_SEEKY ( cic ) )
2007-04-20 16:27:50 +04:00
sl = min ( sl , msecs_to_jiffies ( CFQ_MIN_TT ) ) ;
2006-03-28 15:03:44 +04:00
2006-02-28 11:35:11 +03:00
mod_timer ( & cfqd - > idle_slice_timer , jiffies + sl ) ;
2008-05-30 14:23:07 +04:00
cfq_log ( cfqd , " arm_idle: %lu " , sl ) ;
2005-04-17 02:20:36 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* Move request from internal lists to the request queue dispatch list .
*/
2007-07-24 11:28:11 +04:00
static void cfq_dispatch_insert ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2007-04-23 10:33:33 +04:00
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2005-06-27 12:55:12 +04:00
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " dispatch_insert " ) ;
2006-07-13 14:37:56 +04:00
cfq_remove_request ( rq ) ;
2007-04-25 14:44:27 +04:00
cfqq - > dispatched + + ;
2006-07-13 14:37:56 +04:00
elv_dispatch_sort ( q , rq ) ;
2007-04-23 10:33:33 +04:00
if ( cfq_cfqq_sync ( cfqq ) )
cfqd - > sync_flight + + ;
2005-04-17 02:20:36 +04:00
}
/*
* return expired entry , or NULL to just start from scratch in rbtree
*/
2008-01-28 15:19:43 +03:00
static struct request * cfq_check_fifo ( struct cfq_queue * cfqq )
2005-04-17 02:20:36 +04:00
{
struct cfq_data * cfqd = cfqq - > cfqd ;
2005-06-27 12:55:12 +04:00
struct request * rq ;
2006-07-22 18:48:31 +04:00
int fifo ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:56:24 +04:00
if ( cfq_cfqq_fifo_expire ( cfqq ) )
2005-04-17 02:20:36 +04:00
return NULL ;
2007-01-19 04:01:16 +03:00
cfq_mark_cfqq_fifo_expire ( cfqq ) ;
2006-07-22 18:48:31 +04:00
if ( list_empty ( & cfqq - > fifo ) )
return NULL ;
2005-04-17 02:20:36 +04:00
2007-04-25 14:44:27 +04:00
fifo = cfq_cfqq_sync ( cfqq ) ;
2006-07-22 18:48:31 +04:00
rq = rq_entry_fifo ( cfqq - > fifo . next ) ;
2005-04-17 02:20:36 +04:00
2007-04-25 14:44:27 +04:00
if ( time_before ( jiffies , rq - > start_time + cfqd - > cfq_fifo_expire [ fifo ] ) )
2008-05-30 14:23:07 +04:00
rq = NULL ;
2005-04-17 02:20:36 +04:00
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " fifo=%p " , rq ) ;
2007-04-25 14:44:27 +04:00
return rq ;
2005-04-17 02:20:36 +04:00
}
2005-06-27 12:55:12 +04:00
static inline int
cfq_prio_to_maxrq ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
{
const int base_rq = cfqd - > cfq_slice_async_rq ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
WARN_ON ( cfqq - > ioprio > = IOPRIO_BE_NR ) ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
return 2 * ( base_rq + base_rq * ( CFQ_PRIO_LISTS - 1 - cfqq - > ioprio ) ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-27 12:55:12 +04:00
/*
2007-04-26 14:54:48 +04:00
* Select a queue for service . If we have a current active queue ,
* check whether to continue servicing it , or retrieve and set a new one .
2005-06-27 12:55:12 +04:00
*/
2005-11-10 10:49:19 +03:00
static struct cfq_queue * cfq_select_queue ( struct cfq_data * cfqd )
2005-04-17 02:20:36 +04:00
{
struct cfq_queue * cfqq ;
2005-06-27 12:55:12 +04:00
cfqq = cfqd - > active_queue ;
if ( ! cfqq )
goto new_queue ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
/*
2007-04-25 14:44:27 +04:00
* The active queue has run out of time , expire it and select new .
2005-06-27 12:55:12 +04:00
*/
2007-04-25 14:44:27 +04:00
if ( cfq_slice_used ( cfqq ) )
2005-06-27 12:56:24 +04:00
goto expire ;
2005-04-17 02:20:36 +04:00
2009-01-30 14:46:41 +03:00
/*
* If we have a RT cfqq waiting , then we pre - empt the current non - rt
* cfqq .
*/
if ( ! cfq_class_rt ( cfqq ) & & cfqd - > busy_rt_queues ) {
/*
* We simulate this as cfqq timed out so that it gets to bank
* the remaining of its time slice .
*/
cfq_log_cfqq ( cfqd , cfqq , " preempt " ) ;
cfq_slice_expired ( cfqd , 1 ) ;
goto new_queue ;
}
2005-06-27 12:55:12 +04:00
/*
2007-04-25 14:44:27 +04:00
* The active queue has requests and isn ' t expired , allow it to
* dispatch .
2005-06-27 12:55:12 +04:00
*/
2006-06-21 11:36:18 +04:00
if ( ! RB_EMPTY_ROOT ( & cfqq - > sort_list ) )
2005-06-27 12:55:12 +04:00
goto keep_queue ;
2007-04-25 14:44:27 +04:00
/*
* No requests pending . If the active queue still has requests in
* flight or is idling for a new request , allow either of these
* conditions to happen ( or time out ) before selecting a new queue .
*/
2007-04-20 22:45:39 +04:00
if ( timer_pending ( & cfqd - > idle_slice_timer ) | |
( cfqq - > dispatched & & cfq_cfqq_idle_window ( cfqq ) ) ) {
2006-06-16 13:23:00 +04:00
cfqq = NULL ;
goto keep_queue ;
2005-06-27 12:55:12 +04:00
}
2005-06-27 12:56:24 +04:00
expire :
2007-04-23 10:25:00 +04:00
cfq_slice_expired ( cfqd , 0 ) ;
2005-06-27 12:56:24 +04:00
new_queue :
cfqq = cfq_set_active_queue ( cfqd ) ;
2005-06-27 12:55:12 +04:00
keep_queue :
2005-06-27 12:56:24 +04:00
return cfqq ;
2005-06-27 12:55:12 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* Dispatch some requests from cfqq , moving them to the request queue
* dispatch list .
*/
2005-06-27 12:55:12 +04:00
static int
__cfq_dispatch_requests ( struct cfq_data * cfqd , struct cfq_queue * cfqq ,
int max_dispatch )
{
int dispatched = 0 ;
2006-06-21 11:36:18 +04:00
BUG_ON ( RB_EMPTY_ROOT ( & cfqq - > sort_list ) ) ;
2005-06-27 12:55:12 +04:00
do {
2006-07-13 14:39:25 +04:00
struct request * rq ;
2005-04-17 02:20:36 +04:00
/*
2005-06-27 12:55:12 +04:00
* follow expired path , else get first next available
2005-04-17 02:20:36 +04:00
*/
2008-01-31 15:08:54 +03:00
rq = cfq_check_fifo ( cfqq ) ;
if ( rq = = NULL )
2006-07-13 14:39:25 +04:00
rq = cfqq - > next_rq ;
2005-06-27 12:55:12 +04:00
/*
* finally , insert request into driver dispatch list
*/
2006-07-13 14:39:25 +04:00
cfq_dispatch_insert ( cfqd - > queue , rq ) ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
dispatched + + ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
if ( ! cfqd - > active_cic ) {
2006-07-13 14:39:25 +04:00
atomic_inc ( & RQ_CIC ( rq ) - > ioc - > refcount ) ;
cfqd - > active_cic = RQ_CIC ( rq ) ;
2005-06-27 12:55:12 +04:00
}
2005-04-17 02:20:36 +04:00
2006-06-21 11:36:18 +04:00
if ( RB_EMPTY_ROOT ( & cfqq - > sort_list ) )
2005-06-27 12:55:12 +04:00
break ;
2009-01-30 14:46:41 +03:00
/*
* If there is a non - empty RT cfqq waiting for current
* cfqq ' s timeslice to complete , pre - empt this cfqq
*/
if ( ! cfq_class_rt ( cfqq ) & & cfqd - > busy_rt_queues )
break ;
2005-06-27 12:55:12 +04:00
} while ( dispatched < max_dispatch ) ;
/*
* expire an async queue immediately if it has used up its slice . idle
* queue always expire after 1 dispatch round .
*/
2007-04-20 10:55:52 +04:00
if ( cfqd - > busy_queues > 1 & & ( ( ! cfq_cfqq_sync ( cfqq ) & &
2007-04-23 10:26:36 +04:00
dispatched > = cfq_prio_to_maxrq ( cfqd , cfqq ) ) | |
2007-04-20 10:55:52 +04:00
cfq_class_idle ( cfqq ) ) ) {
2007-01-19 03:51:58 +03:00
cfqq - > slice_end = jiffies + 1 ;
2007-04-23 10:25:00 +04:00
cfq_slice_expired ( cfqd , 0 ) ;
2007-01-19 03:51:58 +03:00
}
2005-06-27 12:55:12 +04:00
return dispatched ;
}
2008-01-28 15:19:43 +03:00
static int __cfq_forced_dispatch_cfqq ( struct cfq_queue * cfqq )
2007-04-20 16:27:50 +04:00
{
int dispatched = 0 ;
while ( cfqq - > next_rq ) {
cfq_dispatch_insert ( cfqq - > cfqd - > queue , cfqq - > next_rq ) ;
dispatched + + ;
}
BUG_ON ( ! list_empty ( & cfqq - > fifo ) ) ;
return dispatched ;
}
2007-04-26 14:54:48 +04:00
/*
* Drain our current requests . Used for barriers and when switching
* io schedulers on - the - fly .
*/
2007-04-20 16:27:50 +04:00
static int cfq_forced_dispatch ( struct cfq_data * cfqd )
2005-11-10 10:49:19 +03:00
{
2008-01-28 13:38:15 +03:00
struct cfq_queue * cfqq ;
2007-04-20 16:27:50 +04:00
int dispatched = 0 ;
2005-11-10 10:49:19 +03:00
2008-01-28 13:38:15 +03:00
while ( ( cfqq = cfq_rb_first ( & cfqd - > service_tree ) ) ! = NULL )
2007-04-20 16:27:50 +04:00
dispatched + = __cfq_forced_dispatch_cfqq ( cfqq ) ;
2005-11-10 10:49:19 +03:00
2007-04-23 10:25:00 +04:00
cfq_slice_expired ( cfqd , 0 ) ;
2005-11-10 10:49:19 +03:00
BUG_ON ( cfqd - > busy_queues ) ;
2008-05-30 14:23:07 +04:00
cfq_log ( cfqd , " forced_dispatch=%d \n " , dispatched ) ;
2005-11-10 10:49:19 +03:00
return dispatched ;
}
2007-07-24 11:28:11 +04:00
static int cfq_dispatch_requests ( struct request_queue * q , int force )
2005-06-27 12:55:12 +04:00
{
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
2007-04-25 14:44:27 +04:00
struct cfq_queue * cfqq ;
2006-06-16 13:23:00 +04:00
int dispatched ;
2005-06-27 12:55:12 +04:00
if ( ! cfqd - > busy_queues )
return 0 ;
2005-11-10 10:49:19 +03:00
if ( unlikely ( force ) )
return cfq_forced_dispatch ( cfqd ) ;
2006-06-16 13:23:00 +04:00
dispatched = 0 ;
while ( ( cfqq = cfq_select_queue ( cfqd ) ) ! = NULL ) {
2005-10-20 18:42:29 +04:00
int max_dispatch ;
2007-04-23 10:33:33 +04:00
max_dispatch = cfqd - > cfq_quantum ;
if ( cfq_class_idle ( cfqq ) )
max_dispatch = 1 ;
2008-10-20 17:44:28 +04:00
if ( cfqq - > dispatched > = max_dispatch & & cfqd - > busy_queues > 1 )
break ;
2007-01-19 04:11:44 +03:00
2007-04-23 10:33:33 +04:00
if ( cfqd - > sync_flight & & ! cfq_cfqq_sync ( cfqq ) )
break ;
2005-06-27 12:56:24 +04:00
cfq_clear_cfqq_must_dispatch ( cfqq ) ;
cfq_clear_cfqq_wait_request ( cfqq ) ;
2005-06-27 12:55:12 +04:00
del_timer ( & cfqd - > idle_slice_timer ) ;
2006-06-16 13:23:00 +04:00
dispatched + = __cfq_dispatch_requests ( cfqd , cfqq , max_dispatch ) ;
2005-04-17 02:20:36 +04:00
}
2008-05-30 14:23:07 +04:00
cfq_log ( cfqd , " dispatched=%d " , dispatched ) ;
2006-06-16 13:23:00 +04:00
return dispatched ;
2005-04-17 02:20:36 +04:00
}
/*
2006-07-13 14:39:25 +04:00
* task holds one reference to the queue , dropped when task exits . each rq
* in - flight on this queue also holds a reference , dropped when rq is freed .
2005-04-17 02:20:36 +04:00
*
* queue lock must be held here .
*/
static void cfq_put_queue ( struct cfq_queue * cfqq )
{
2005-06-27 12:55:12 +04:00
struct cfq_data * cfqd = cfqq - > cfqd ;
BUG_ON ( atomic_read ( & cfqq - > ref ) < = 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! atomic_dec_and_test ( & cfqq - > ref ) )
return ;
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " put_queue " ) ;
2005-04-17 02:20:36 +04:00
BUG_ON ( rb_first ( & cfqq - > sort_list ) ) ;
2005-06-27 12:55:12 +04:00
BUG_ON ( cfqq - > allocated [ READ ] + cfqq - > allocated [ WRITE ] ) ;
2005-06-27 12:56:24 +04:00
BUG_ON ( cfq_cfqq_on_rr ( cfqq ) ) ;
2005-04-17 02:20:36 +04:00
2007-01-19 04:09:53 +03:00
if ( unlikely ( cfqd - > active_queue = = cfqq ) ) {
2007-04-23 10:25:00 +04:00
__cfq_slice_expired ( cfqd , cfqq , 0 ) ;
2007-01-19 04:09:53 +03:00
cfq_schedule_dispatch ( cfqd ) ;
}
2005-06-27 12:55:12 +04:00
2005-04-17 02:20:36 +04:00
kmem_cache_free ( cfq_pool , cfqq ) ;
}
2008-05-28 16:46:59 +04:00
/*
* Must always be called with the rcu_read_lock ( ) held
*/
2008-05-07 11:17:12 +04:00
static void
__call_for_each_cic ( struct io_context * ioc ,
void ( * func ) ( struct io_context * , struct cfq_io_context * ) )
{
struct cfq_io_context * cic ;
struct hlist_node * n ;
hlist_for_each_entry_rcu ( cic , n , & ioc - > cic_list , cic_list )
func ( ioc , cic ) ;
}
2008-01-24 10:44:49 +03:00
/*
2008-04-02 16:31:02 +04:00
* Call func for each cic attached to this ioc .
2008-01-24 10:44:49 +03:00
*/
2008-04-02 16:31:02 +04:00
static void
2008-01-24 10:44:49 +03:00
call_for_each_cic ( struct io_context * ioc ,
void ( * func ) ( struct io_context * , struct cfq_io_context * ) )
2005-04-17 02:20:36 +04:00
{
2008-01-24 10:44:49 +03:00
rcu_read_lock ( ) ;
2008-05-07 11:17:12 +04:00
__call_for_each_cic ( ioc , func ) ;
2008-01-24 10:44:49 +03:00
rcu_read_unlock ( ) ;
2008-04-02 16:31:02 +04:00
}
static void cfq_cic_free_rcu ( struct rcu_head * head )
{
struct cfq_io_context * cic ;
cic = container_of ( head , struct cfq_io_context , rcu_head ) ;
kmem_cache_free ( cfq_ioc_pool , cic ) ;
elv_ioc_count_dec ( ioc_count ) ;
2008-05-29 11:32:08 +04:00
if ( ioc_gone ) {
/*
* CFQ scheduler is exiting , grab exit lock and check
* the pending io context count . If it hits zero ,
* complete ioc_gone and set it back to NULL
*/
spin_lock ( & ioc_gone_lock ) ;
if ( ioc_gone & & ! elv_ioc_count_read ( ioc_count ) ) {
complete ( ioc_gone ) ;
ioc_gone = NULL ;
}
spin_unlock ( & ioc_gone_lock ) ;
}
2008-04-02 16:31:02 +04:00
}
2008-01-24 10:44:49 +03:00
2008-04-02 16:31:02 +04:00
static void cfq_cic_free ( struct cfq_io_context * cic )
{
call_rcu ( & cic - > rcu_head , cfq_cic_free_rcu ) ;
2008-01-24 10:44:49 +03:00
}
static void cic_free_func ( struct io_context * ioc , struct cfq_io_context * cic )
{
unsigned long flags ;
BUG_ON ( ! cic - > dead_key ) ;
spin_lock_irqsave ( & ioc - > lock , flags ) ;
radix_tree_delete ( & ioc - > radix_root , cic - > dead_key ) ;
2008-02-19 12:02:29 +03:00
hlist_del_rcu ( & cic - > cic_list ) ;
2008-01-24 10:44:49 +03:00
spin_unlock_irqrestore ( & ioc - > lock , flags ) ;
2008-04-02 16:31:02 +04:00
cfq_cic_free ( cic ) ;
2008-01-24 10:44:49 +03:00
}
2008-05-28 16:46:59 +04:00
/*
* Must be called with rcu_read_lock ( ) held or preemption otherwise disabled .
* Only two callers of this - - > dtor ( ) which is called with the rcu_read_lock ( ) ,
* and - > trim ( ) which is called with the task lock held
*/
2008-01-24 10:44:49 +03:00
static void cfq_free_io_context ( struct io_context * ioc )
{
/*
2008-04-02 16:31:02 +04:00
* ioc - > refcount is zero here , or we are called from elv_unregister ( ) ,
* so no more cic ' s are allowed to be linked into this ioc . So it
* should be ok to iterate over the known list , we will see all cic ' s
* since no new ones are added .
2008-01-24 10:44:49 +03:00
*/
2008-05-07 11:17:12 +04:00
__call_for_each_cic ( ioc , cic_free_func ) ;
2005-04-17 02:20:36 +04:00
}
2006-07-22 18:48:31 +04:00
static void cfq_exit_cfqq ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
2005-04-17 02:20:36 +04:00
{
2007-01-19 04:09:53 +03:00
if ( unlikely ( cfqq = = cfqd - > active_queue ) ) {
2007-04-23 10:25:00 +04:00
__cfq_slice_expired ( cfqd , cfqq , 0 ) ;
2007-01-19 04:09:53 +03:00
cfq_schedule_dispatch ( cfqd ) ;
}
2005-06-27 12:55:12 +04:00
2006-07-22 18:48:31 +04:00
cfq_put_queue ( cfqq ) ;
}
2005-06-27 12:55:12 +04:00
2006-07-22 18:48:31 +04:00
static void __cfq_exit_single_io_context ( struct cfq_data * cfqd ,
struct cfq_io_context * cic )
{
2008-04-10 10:28:01 +04:00
struct io_context * ioc = cic - > ioc ;
2006-08-29 11:05:44 +04:00
list_del_init ( & cic - > queue_list ) ;
2008-01-24 10:44:49 +03:00
/*
* Make sure key = = NULL is seen for dead queues
*/
2006-08-29 11:05:44 +04:00
smp_wmb ( ) ;
2008-01-24 10:44:49 +03:00
cic - > dead_key = ( unsigned long ) cic - > key ;
2006-08-29 11:05:44 +04:00
cic - > key = NULL ;
2008-04-10 10:28:01 +04:00
if ( ioc - > ioc_data = = cic )
rcu_assign_pointer ( ioc - > ioc_data , NULL ) ;
2006-03-18 21:38:01 +03:00
if ( cic - > cfqq [ ASYNC ] ) {
2006-07-22 18:48:31 +04:00
cfq_exit_cfqq ( cfqd , cic - > cfqq [ ASYNC ] ) ;
2006-03-18 21:38:01 +03:00
cic - > cfqq [ ASYNC ] = NULL ;
}
if ( cic - > cfqq [ SYNC ] ) {
2006-07-22 18:48:31 +04:00
cfq_exit_cfqq ( cfqd , cic - > cfqq [ SYNC ] ) ;
2006-03-18 21:38:01 +03:00
cic - > cfqq [ SYNC ] = NULL ;
}
2006-07-22 18:48:31 +04:00
}
2008-01-24 10:44:49 +03:00
static void cfq_exit_single_io_context ( struct io_context * ioc ,
struct cfq_io_context * cic )
2006-07-22 18:48:31 +04:00
{
struct cfq_data * cfqd = cic - > key ;
if ( cfqd ) {
2007-07-24 11:28:11 +04:00
struct request_queue * q = cfqd - > queue ;
2008-01-24 10:44:49 +03:00
unsigned long flags ;
2006-07-22 18:48:31 +04:00
2008-01-24 10:44:49 +03:00
spin_lock_irqsave ( q - > queue_lock , flags ) ;
2008-12-15 23:19:25 +03:00
/*
* Ensure we get a fresh copy of the - > key to prevent
* race between exiting task and queue
*/
smp_read_barrier_depends ( ) ;
if ( cic - > key )
__cfq_exit_single_io_context ( cfqd , cic ) ;
2008-01-24 10:44:49 +03:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
2006-07-22 18:48:31 +04:00
}
2005-04-17 02:20:36 +04:00
}
2007-04-26 14:54:48 +04:00
/*
* The process that ioc belongs to has exited , we need to clean up
* and put the internal structures we have that belongs to that process .
*/
2006-03-28 10:59:01 +04:00
static void cfq_exit_io_context ( struct io_context * ioc )
2005-04-17 02:20:36 +04:00
{
2008-01-24 10:44:49 +03:00
call_for_each_cic ( ioc , cfq_exit_single_io_context ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-27 12:55:12 +04:00
static struct cfq_io_context *
2005-10-21 11:20:53 +04:00
cfq_alloc_io_context ( struct cfq_data * cfqd , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
2006-07-20 01:39:40 +04:00
struct cfq_io_context * cic ;
2005-04-17 02:20:36 +04:00
2007-07-17 15:03:29 +04:00
cic = kmem_cache_alloc_node ( cfq_ioc_pool , gfp_mask | __GFP_ZERO ,
cfqd - > queue - > node ) ;
2005-04-17 02:20:36 +04:00
if ( cic ) {
2005-06-27 12:55:12 +04:00
cic - > last_end_request = jiffies ;
2006-06-14 21:11:57 +04:00
INIT_LIST_HEAD ( & cic - > queue_list ) ;
2008-02-19 12:02:29 +03:00
INIT_HLIST_NODE ( & cic - > cic_list ) ;
2005-06-27 12:55:12 +04:00
cic - > dtor = cfq_free_io_context ;
cic - > exit = cfq_exit_io_context ;
2006-07-19 07:07:12 +04:00
elv_ioc_count_inc ( ioc_count ) ;
2005-04-17 02:20:36 +04:00
}
return cic ;
}
2008-01-24 10:52:45 +03:00
static void cfq_init_prio_data ( struct cfq_queue * cfqq , struct io_context * ioc )
2005-06-27 12:55:12 +04:00
{
struct task_struct * tsk = current ;
int ioprio_class ;
2005-06-27 12:56:24 +04:00
if ( ! cfq_cfqq_prio_changed ( cfqq ) )
2005-06-27 12:55:12 +04:00
return ;
2008-01-24 10:52:45 +03:00
ioprio_class = IOPRIO_PRIO_CLASS ( ioc - > ioprio ) ;
2005-06-27 12:55:12 +04:00
switch ( ioprio_class ) {
2008-01-31 15:08:54 +03:00
default :
printk ( KERN_ERR " cfq: bad prio %x \n " , ioprio_class ) ;
case IOPRIO_CLASS_NONE :
/*
2008-05-07 11:51:23 +04:00
* no prio set , inherit CPU scheduling settings
2008-01-31 15:08:54 +03:00
*/
cfqq - > ioprio = task_nice_ioprio ( tsk ) ;
2008-05-07 11:51:23 +04:00
cfqq - > ioprio_class = task_nice_ioclass ( tsk ) ;
2008-01-31 15:08:54 +03:00
break ;
case IOPRIO_CLASS_RT :
cfqq - > ioprio = task_ioprio ( ioc ) ;
cfqq - > ioprio_class = IOPRIO_CLASS_RT ;
break ;
case IOPRIO_CLASS_BE :
cfqq - > ioprio = task_ioprio ( ioc ) ;
cfqq - > ioprio_class = IOPRIO_CLASS_BE ;
break ;
case IOPRIO_CLASS_IDLE :
cfqq - > ioprio_class = IOPRIO_CLASS_IDLE ;
cfqq - > ioprio = 7 ;
cfq_clear_cfqq_idle_window ( cfqq ) ;
break ;
2005-06-27 12:55:12 +04:00
}
/*
* keep track of original prio settings in case we have to temporarily
* elevate the priority of this queue
*/
cfqq - > org_ioprio = cfqq - > ioprio ;
cfqq - > org_ioprio_class = cfqq - > ioprio_class ;
2005-06-27 12:56:24 +04:00
cfq_clear_cfqq_prio_changed ( cfqq ) ;
2005-06-27 12:55:12 +04:00
}
2008-01-28 15:19:43 +03:00
static void changed_ioprio ( struct io_context * ioc , struct cfq_io_context * cic )
2005-06-27 12:55:12 +04:00
{
2006-03-18 21:25:24 +03:00
struct cfq_data * cfqd = cic - > key ;
struct cfq_queue * cfqq ;
2006-10-30 21:54:23 +03:00
unsigned long flags ;
2006-06-14 11:10:45 +04:00
2006-06-16 13:23:00 +04:00
if ( unlikely ( ! cfqd ) )
return ;
2006-10-30 21:54:23 +03:00
spin_lock_irqsave ( cfqd - > queue - > queue_lock , flags ) ;
2006-06-16 13:23:00 +04:00
cfqq = cic - > cfqq [ ASYNC ] ;
if ( cfqq ) {
struct cfq_queue * new_cfqq ;
2008-01-24 10:52:45 +03:00
new_cfqq = cfq_get_queue ( cfqd , ASYNC , cic - > ioc , GFP_ATOMIC ) ;
2006-06-16 13:23:00 +04:00
if ( new_cfqq ) {
cic - > cfqq [ ASYNC ] = new_cfqq ;
cfq_put_queue ( cfqq ) ;
}
2005-06-27 12:55:12 +04:00
}
2006-06-16 13:23:00 +04:00
cfqq = cic - > cfqq [ SYNC ] ;
if ( cfqq )
cfq_mark_cfqq_prio_changed ( cfqq ) ;
2006-10-30 21:54:23 +03:00
spin_unlock_irqrestore ( cfqd - > queue - > queue_lock , flags ) ;
2005-06-27 12:55:12 +04:00
}
2006-08-29 11:05:44 +04:00
static void cfq_ioc_set_ioprio ( struct io_context * ioc )
2005-06-27 12:55:12 +04:00
{
2008-01-24 10:44:49 +03:00
call_for_each_cic ( ioc , changed_ioprio ) ;
2006-08-29 11:05:44 +04:00
ioc - > ioprio_changed = 0 ;
2005-06-27 12:55:12 +04:00
}
static struct cfq_queue *
2007-07-10 15:43:25 +04:00
cfq_find_alloc_queue ( struct cfq_data * cfqd , int is_sync ,
2008-01-24 10:52:45 +03:00
struct io_context * ioc , gfp_t gfp_mask )
2005-06-27 12:55:12 +04:00
{
struct cfq_queue * cfqq , * new_cfqq = NULL ;
2007-04-25 14:29:51 +04:00
struct cfq_io_context * cic ;
2005-06-27 12:55:12 +04:00
retry :
2008-01-24 10:44:49 +03:00
cic = cfq_cic_lookup ( cfqd , ioc ) ;
2007-04-25 14:29:51 +04:00
/* cic always exists here */
cfqq = cic_to_cfqq ( cic , is_sync ) ;
2005-06-27 12:55:12 +04:00
if ( ! cfqq ) {
if ( new_cfqq ) {
cfqq = new_cfqq ;
new_cfqq = NULL ;
} else if ( gfp_mask & __GFP_WAIT ) {
2006-07-22 18:48:31 +04:00
/*
* Inform the allocator of the fact that we will
* just repeat this allocation if it fails , to allow
* the allocator to do whatever it needs to attempt to
* free memory .
*/
2005-06-27 12:55:12 +04:00
spin_unlock_irq ( cfqd - > queue - > queue_lock ) ;
2007-07-17 15:03:29 +04:00
new_cfqq = kmem_cache_alloc_node ( cfq_pool ,
gfp_mask | __GFP_NOFAIL | __GFP_ZERO ,
cfqd - > queue - > node ) ;
2005-06-27 12:55:12 +04:00
spin_lock_irq ( cfqd - > queue - > queue_lock ) ;
goto retry ;
} else {
2007-07-17 15:03:29 +04:00
cfqq = kmem_cache_alloc_node ( cfq_pool ,
gfp_mask | __GFP_ZERO ,
cfqd - > queue - > node ) ;
2005-06-27 12:55:12 +04:00
if ( ! cfqq )
goto out ;
}
2007-04-20 16:27:50 +04:00
RB_CLEAR_NODE ( & cfqq - > rb_node ) ;
2005-06-27 12:55:12 +04:00
INIT_LIST_HEAD ( & cfqq - > fifo ) ;
atomic_set ( & cfqq - > ref , 0 ) ;
cfqq - > cfqd = cfqd ;
2007-01-19 03:56:49 +03:00
2005-06-27 12:56:24 +04:00
cfq_mark_cfqq_prio_changed ( cfqq ) ;
2006-07-28 11:48:51 +04:00
cfq_mark_cfqq_queue_new ( cfqq ) ;
2007-04-25 14:29:51 +04:00
2008-01-24 10:52:45 +03:00
cfq_init_prio_data ( cfqq , ioc ) ;
2008-01-28 13:38:15 +03:00
if ( is_sync ) {
if ( ! cfq_class_idle ( cfqq ) )
cfq_mark_cfqq_idle_window ( cfqq ) ;
cfq_mark_cfqq_sync ( cfqq ) ;
}
2008-05-30 14:23:07 +04:00
cfqq - > pid = current - > pid ;
cfq_log_cfqq ( cfqd , cfqq , " alloced " ) ;
2005-06-27 12:55:12 +04:00
}
if ( new_cfqq )
kmem_cache_free ( cfq_pool , new_cfqq ) ;
out :
WARN_ON ( ( gfp_mask & __GFP_WAIT ) & & ! cfqq ) ;
return cfqq ;
}
2007-07-20 12:06:38 +04:00
static struct cfq_queue * *
cfq_async_queue_prio ( struct cfq_data * cfqd , int ioprio_class , int ioprio )
{
2008-01-31 15:08:54 +03:00
switch ( ioprio_class ) {
2007-07-20 12:06:38 +04:00
case IOPRIO_CLASS_RT :
return & cfqd - > async_cfqq [ 0 ] [ ioprio ] ;
case IOPRIO_CLASS_BE :
return & cfqd - > async_cfqq [ 1 ] [ ioprio ] ;
case IOPRIO_CLASS_IDLE :
return & cfqd - > async_idle_cfqq ;
default :
BUG ( ) ;
}
}
2007-07-10 15:43:25 +04:00
static struct cfq_queue *
2008-01-24 10:52:45 +03:00
cfq_get_queue ( struct cfq_data * cfqd , int is_sync , struct io_context * ioc ,
2007-07-10 15:43:25 +04:00
gfp_t gfp_mask )
{
2008-01-24 10:52:45 +03:00
const int ioprio = task_ioprio ( ioc ) ;
const int ioprio_class = task_ioprio_class ( ioc ) ;
2007-07-20 12:06:38 +04:00
struct cfq_queue * * async_cfqq = NULL ;
2007-07-10 15:43:25 +04:00
struct cfq_queue * cfqq = NULL ;
2007-07-20 12:06:38 +04:00
if ( ! is_sync ) {
async_cfqq = cfq_async_queue_prio ( cfqd , ioprio_class , ioprio ) ;
cfqq = * async_cfqq ;
}
2007-10-23 17:08:21 +04:00
if ( ! cfqq ) {
2008-01-24 10:52:45 +03:00
cfqq = cfq_find_alloc_queue ( cfqd , is_sync , ioc , gfp_mask ) ;
2007-10-23 17:08:21 +04:00
if ( ! cfqq )
return NULL ;
}
2007-07-10 15:43:25 +04:00
/*
* pin the queue now that it ' s allocated , scheduler exit will prune it
*/
2007-07-20 12:06:38 +04:00
if ( ! is_sync & & ! ( * async_cfqq ) ) {
2007-07-10 15:43:25 +04:00
atomic_inc ( & cfqq - > ref ) ;
2007-07-20 12:06:38 +04:00
* async_cfqq = cfqq ;
2007-07-10 15:43:25 +04:00
}
atomic_inc ( & cfqq - > ref ) ;
return cfqq ;
}
2007-04-26 14:54:48 +04:00
/*
* We drop cfq io contexts lazily , so we may find a dead one .
*/
2006-04-18 11:45:18 +04:00
static void
2008-01-24 10:44:49 +03:00
cfq_drop_dead_cic ( struct cfq_data * cfqd , struct io_context * ioc ,
struct cfq_io_context * cic )
2006-04-18 11:45:18 +04:00
{
2008-01-24 10:44:49 +03:00
unsigned long flags ;
2006-08-29 11:05:44 +04:00
WARN_ON ( ! list_empty ( & cic - > queue_list ) ) ;
2007-04-24 23:23:53 +04:00
2008-01-24 10:44:49 +03:00
spin_lock_irqsave ( & ioc - > lock , flags ) ;
2008-04-10 10:28:01 +04:00
BUG_ON ( ioc - > ioc_data = = cic ) ;
2007-04-24 23:23:53 +04:00
2008-01-24 10:44:49 +03:00
radix_tree_delete ( & ioc - > radix_root , ( unsigned long ) cfqd ) ;
2008-02-19 12:02:29 +03:00
hlist_del_rcu ( & cic - > cic_list ) ;
2008-01-24 10:44:49 +03:00
spin_unlock_irqrestore ( & ioc - > lock , flags ) ;
cfq_cic_free ( cic ) ;
2006-04-18 11:45:18 +04:00
}
2006-03-28 10:59:01 +04:00
static struct cfq_io_context *
2008-01-24 10:44:49 +03:00
cfq_cic_lookup ( struct cfq_data * cfqd , struct io_context * ioc )
2006-03-28 10:59:01 +04:00
{
struct cfq_io_context * cic ;
2008-05-28 16:46:59 +04:00
unsigned long flags ;
2008-01-24 10:44:49 +03:00
void * k ;
2006-03-28 10:59:01 +04:00
2007-04-25 14:29:51 +04:00
if ( unlikely ( ! ioc ) )
return NULL ;
2008-05-28 16:46:59 +04:00
rcu_read_lock ( ) ;
2007-04-24 23:23:53 +04:00
/*
* we maintain a last - hit cache , to avoid browsing over the tree
*/
2008-01-24 10:44:49 +03:00
cic = rcu_dereference ( ioc - > ioc_data ) ;
2008-05-28 16:46:59 +04:00
if ( cic & & cic - > key = = cfqd ) {
rcu_read_unlock ( ) ;
2007-04-24 23:23:53 +04:00
return cic ;
2008-05-28 16:46:59 +04:00
}
2007-04-24 23:23:53 +04:00
2008-01-24 10:44:49 +03:00
do {
cic = radix_tree_lookup ( & ioc - > radix_root , ( unsigned long ) cfqd ) ;
rcu_read_unlock ( ) ;
if ( ! cic )
break ;
2006-04-18 21:18:31 +04:00
/* ->key must be copied to avoid race with cfq_exit_queue() */
k = cic - > key ;
if ( unlikely ( ! k ) ) {
2008-01-24 10:44:49 +03:00
cfq_drop_dead_cic ( cfqd , ioc , cic ) ;
2008-05-28 16:46:59 +04:00
rcu_read_lock ( ) ;
2008-01-24 10:44:49 +03:00
continue ;
2006-04-18 11:45:18 +04:00
}
2006-03-28 10:59:01 +04:00
2008-05-28 16:46:59 +04:00
spin_lock_irqsave ( & ioc - > lock , flags ) ;
2008-01-24 10:44:49 +03:00
rcu_assign_pointer ( ioc - > ioc_data , cic ) ;
2008-05-28 16:46:59 +04:00
spin_unlock_irqrestore ( & ioc - > lock , flags ) ;
2008-01-24 10:44:49 +03:00
break ;
} while ( 1 ) ;
2006-03-28 10:59:01 +04:00
2008-01-24 10:44:49 +03:00
return cic ;
2006-03-28 10:59:01 +04:00
}
2008-01-24 10:44:49 +03:00
/*
* Add cic into ioc , using cfqd as the search key . This enables us to lookup
* the process specific cfq io context when entered from the block layer .
* Also adds the cic to a per - cfqd list , used when this queue is removed .
*/
2008-01-28 15:19:43 +03:00
static int cfq_cic_link ( struct cfq_data * cfqd , struct io_context * ioc ,
struct cfq_io_context * cic , gfp_t gfp_mask )
2006-03-28 10:59:01 +04:00
{
2006-10-30 21:07:48 +03:00
unsigned long flags ;
2008-01-24 10:44:49 +03:00
int ret ;
2006-03-28 10:59:01 +04:00
2008-01-24 10:44:49 +03:00
ret = radix_tree_preload ( gfp_mask ) ;
if ( ! ret ) {
cic - > ioc = ioc ;
cic - > key = cfqd ;
2006-03-28 10:59:01 +04:00
2008-01-24 10:44:49 +03:00
spin_lock_irqsave ( & ioc - > lock , flags ) ;
ret = radix_tree_insert ( & ioc - > radix_root ,
( unsigned long ) cfqd , cic ) ;
2008-02-19 12:02:29 +03:00
if ( ! ret )
hlist_add_head_rcu ( & cic - > cic_list , & ioc - > cic_list ) ;
2008-01-24 10:44:49 +03:00
spin_unlock_irqrestore ( & ioc - > lock , flags ) ;
2006-03-28 10:59:01 +04:00
2008-01-24 10:44:49 +03:00
radix_tree_preload_end ( ) ;
if ( ! ret ) {
spin_lock_irqsave ( cfqd - > queue - > queue_lock , flags ) ;
list_add ( & cic - > queue_list , & cfqd - > cic_list ) ;
spin_unlock_irqrestore ( cfqd - > queue - > queue_lock , flags ) ;
}
2006-03-28 10:59:01 +04:00
}
2008-01-24 10:44:49 +03:00
if ( ret )
printk ( KERN_ERR " cfq: cic link failed! \n " ) ;
2006-08-29 11:05:44 +04:00
2008-01-24 10:44:49 +03:00
return ret ;
2006-03-28 10:59:01 +04:00
}
2005-04-17 02:20:36 +04:00
/*
* Setup general io context and cfq io context . There can be several cfq
* io contexts per general io context , if this process is doing io to more
2006-03-28 10:59:01 +04:00
* than one device managed by cfq .
2005-04-17 02:20:36 +04:00
*/
static struct cfq_io_context *
2006-03-28 10:59:01 +04:00
cfq_get_io_context ( struct cfq_data * cfqd , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
2005-06-27 12:55:12 +04:00
struct io_context * ioc = NULL ;
2005-04-17 02:20:36 +04:00
struct cfq_io_context * cic ;
2005-06-27 12:55:12 +04:00
might_sleep_if ( gfp_mask & __GFP_WAIT ) ;
2005-04-17 02:20:36 +04:00
2006-07-20 01:39:40 +04:00
ioc = get_io_context ( gfp_mask , cfqd - > queue - > node ) ;
2005-04-17 02:20:36 +04:00
if ( ! ioc )
return NULL ;
2008-01-24 10:44:49 +03:00
cic = cfq_cic_lookup ( cfqd , ioc ) ;
2006-03-28 10:59:01 +04:00
if ( cic )
goto out ;
2005-04-17 02:20:36 +04:00
2006-03-28 10:59:01 +04:00
cic = cfq_alloc_io_context ( cfqd , gfp_mask ) ;
if ( cic = = NULL )
goto err ;
2005-04-17 02:20:36 +04:00
2008-01-24 10:44:49 +03:00
if ( cfq_cic_link ( cfqd , ioc , cic , gfp_mask ) )
goto err_free ;
2005-04-17 02:20:36 +04:00
out :
2006-08-29 11:05:44 +04:00
smp_read_barrier_depends ( ) ;
if ( unlikely ( ioc - > ioprio_changed ) )
cfq_ioc_set_ioprio ( ioc ) ;
2005-04-17 02:20:36 +04:00
return cic ;
2008-01-24 10:44:49 +03:00
err_free :
cfq_cic_free ( cic ) ;
2005-04-17 02:20:36 +04:00
err :
put_io_context ( ioc ) ;
return NULL ;
}
2005-06-27 12:55:12 +04:00
static void
cfq_update_io_thinktime ( struct cfq_data * cfqd , struct cfq_io_context * cic )
2005-04-17 02:20:36 +04:00
{
2007-01-19 03:30:16 +03:00
unsigned long elapsed = jiffies - cic - > last_end_request ;
unsigned long ttime = min ( elapsed , 2UL * cfqd - > cfq_slice_idle ) ;
2005-06-17 18:15:10 +04:00
2005-06-27 12:55:12 +04:00
cic - > ttime_samples = ( 7 * cic - > ttime_samples + 256 ) / 8 ;
cic - > ttime_total = ( 7 * cic - > ttime_total + 256 * ttime ) / 8 ;
cic - > ttime_mean = ( cic - > ttime_total + 128 ) / cic - > ttime_samples ;
}
2005-04-17 02:20:36 +04:00
2006-03-28 15:03:44 +04:00
static void
2007-04-25 14:44:27 +04:00
cfq_update_io_seektime ( struct cfq_data * cfqd , struct cfq_io_context * cic ,
struct request * rq )
2006-03-28 15:03:44 +04:00
{
sector_t sdist ;
u64 total ;
2006-07-13 14:39:25 +04:00
if ( cic - > last_request_pos < rq - > sector )
sdist = rq - > sector - cic - > last_request_pos ;
2006-03-28 15:03:44 +04:00
else
2006-07-13 14:39:25 +04:00
sdist = cic - > last_request_pos - rq - > sector ;
2006-03-28 15:03:44 +04:00
/*
* Don ' t allow the seek distance to get too large from the
* odd fragment , pagein , etc
*/
if ( cic - > seek_samples < = 60 ) /* second&third seek */
sdist = min ( sdist , ( cic - > seek_mean * 4 ) + 2 * 1024 * 1024 ) ;
else
sdist = min ( sdist , ( cic - > seek_mean * 4 ) + 2 * 1024 * 64 ) ;
cic - > seek_samples = ( 7 * cic - > seek_samples + 256 ) / 8 ;
cic - > seek_total = ( 7 * cic - > seek_total + ( u64 ) 256 * sdist ) / 8 ;
total = cic - > seek_total + ( cic - > seek_samples / 2 ) ;
do_div ( total , cic - > seek_samples ) ;
cic - > seek_mean = ( sector_t ) total ;
}
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
/*
* Disable idle window if the process thinks too long or seeks so much that
* it doesn ' t matter
*/
static void
cfq_update_idle_window ( struct cfq_data * cfqd , struct cfq_queue * cfqq ,
struct cfq_io_context * cic )
{
2008-05-30 14:23:07 +04:00
int old_idle , enable_idle ;
2007-04-19 16:32:26 +04:00
2008-01-28 13:38:15 +03:00
/*
* Don ' t idle for async or idle io prio class
*/
if ( ! cfq_cfqq_sync ( cfqq ) | | cfq_class_idle ( cfqq ) )
2007-04-19 16:32:26 +04:00
return ;
2008-06-26 15:49:33 +04:00
enable_idle = old_idle = cfq_cfqq_idle_window ( cfqq ) ;
2005-04-17 02:20:36 +04:00
2007-11-27 14:47:04 +03:00
if ( ! atomic_read ( & cic - > ioc - > nr_tasks ) | | ! cfqd - > cfq_slice_idle | |
2006-06-16 13:23:00 +04:00
( cfqd - > hw_tag & & CIC_SEEKY ( cic ) ) )
2005-06-27 12:55:12 +04:00
enable_idle = 0 ;
else if ( sample_valid ( cic - > ttime_samples ) ) {
if ( cic - > ttime_mean > cfqd - > cfq_slice_idle )
enable_idle = 0 ;
else
enable_idle = 1 ;
2005-04-17 02:20:36 +04:00
}
2008-05-30 14:23:07 +04:00
if ( old_idle ! = enable_idle ) {
cfq_log_cfqq ( cfqd , cfqq , " idle=%d " , enable_idle ) ;
if ( enable_idle )
cfq_mark_cfqq_idle_window ( cfqq ) ;
else
cfq_clear_cfqq_idle_window ( cfqq ) ;
}
2005-06-27 12:55:12 +04:00
}
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
/*
* Check if new_cfqq should preempt the currently active queue . Return 0 for
* no or if we aren ' t sure , a 1 will cause a preempt .
*/
static int
cfq_should_preempt ( struct cfq_data * cfqd , struct cfq_queue * new_cfqq ,
2006-07-13 14:39:25 +04:00
struct request * rq )
2005-06-27 12:55:12 +04:00
{
2007-04-25 14:44:27 +04:00
struct cfq_queue * cfqq ;
2005-06-27 12:55:12 +04:00
2007-04-25 14:44:27 +04:00
cfqq = cfqd - > active_queue ;
if ( ! cfqq )
2005-06-27 12:55:12 +04:00
return 0 ;
2007-04-25 14:44:27 +04:00
if ( cfq_slice_used ( cfqq ) )
return 1 ;
if ( cfq_class_idle ( new_cfqq ) )
2006-06-16 13:23:00 +04:00
return 0 ;
2005-06-27 12:55:12 +04:00
if ( cfq_class_idle ( cfqq ) )
return 1 ;
2007-02-14 21:59:49 +03:00
2006-07-23 03:42:19 +04:00
/*
* if the new request is sync , but the currently running queue is
* not , let the sync request have priority .
*/
2006-07-13 14:39:25 +04:00
if ( rq_is_sync ( rq ) & & ! cfq_cfqq_sync ( cfqq ) )
2005-06-27 12:55:12 +04:00
return 1 ;
2007-02-14 21:59:49 +03:00
2006-07-23 03:42:19 +04:00
/*
* So both queues are sync . Let the new request get disk time if
* it ' s a metadata request and the current queue is doing regular IO .
*/
if ( rq_is_meta ( rq ) & & ! cfqq - > meta_pending )
return 1 ;
2005-06-27 12:55:12 +04:00
2009-01-30 14:46:41 +03:00
/*
* Allow an RT request to pre - empt an ongoing non - RT cfqq timeslice .
*/
if ( cfq_class_rt ( new_cfqq ) & & ! cfq_class_rt ( cfqq ) )
return 1 ;
2007-02-14 21:59:49 +03:00
if ( ! cfqd - > active_cic | | ! cfq_cfqq_wait_request ( cfqq ) )
return 0 ;
/*
* if this request is as - good as one we would expect from the
* current cfqq , let it preempt
*/
2007-04-25 14:44:27 +04:00
if ( cfq_rq_close ( cfqd , rq ) )
2007-02-14 21:59:49 +03:00
return 1 ;
2005-06-27 12:55:12 +04:00
return 0 ;
}
/*
* cfqq preempts the active queue . if we allowed preempt with no slice left ,
* let it have half of its nominal slice .
*/
static void cfq_preempt_queue ( struct cfq_data * cfqd , struct cfq_queue * cfqq )
{
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " preempt " ) ;
2007-04-23 10:25:00 +04:00
cfq_slice_expired ( cfqd , 1 ) ;
2005-06-27 12:55:12 +04:00
2006-07-19 22:29:12 +04:00
/*
* Put the new queue at the front of the of the current list ,
* so we know that it will be selected next .
*/
BUG_ON ( ! cfq_cfqq_on_rr ( cfqq ) ) ;
2007-04-19 14:03:34 +04:00
cfq_service_tree_add ( cfqd , cfqq , 1 ) ;
2006-07-19 22:29:12 +04:00
2007-01-19 03:51:58 +03:00
cfqq - > slice_end = 0 ;
cfq_mark_cfqq_slice_new ( cfqq ) ;
2005-06-27 12:55:12 +04:00
}
/*
2006-07-13 14:39:25 +04:00
* Called when a new fs request ( rq ) is added ( to cfqq ) . Check if there ' s
2005-06-27 12:55:12 +04:00
* something we should do about it
*/
static void
2006-07-13 14:39:25 +04:00
cfq_rq_enqueued ( struct cfq_data * cfqd , struct cfq_queue * cfqq ,
struct request * rq )
2005-06-27 12:55:12 +04:00
{
2006-07-13 14:39:25 +04:00
struct cfq_io_context * cic = RQ_CIC ( rq ) ;
2006-06-01 12:09:56 +04:00
2008-08-26 17:52:36 +04:00
cfqd - > rq_queued + + ;
2006-07-23 03:42:19 +04:00
if ( rq_is_meta ( rq ) )
cfqq - > meta_pending + + ;
2005-08-24 16:57:54 +04:00
cfq_update_io_thinktime ( cfqd , cic ) ;
2007-04-25 14:44:27 +04:00
cfq_update_io_seektime ( cfqd , cic , rq ) ;
2005-08-24 16:57:54 +04:00
cfq_update_idle_window ( cfqd , cfqq , cic ) ;
2006-07-13 14:39:25 +04:00
cic - > last_request_pos = rq - > sector + rq - > nr_sectors ;
2005-06-27 12:55:12 +04:00
if ( cfqq = = cfqd - > active_queue ) {
/*
* if we are waiting for a request for this queue , let it rip
* immediately and flag that we must not expire this queue
* just now
*/
2005-06-27 12:56:24 +04:00
if ( cfq_cfqq_wait_request ( cfqq ) ) {
cfq_mark_cfqq_must_dispatch ( cfqq ) ;
2005-06-27 12:55:12 +04:00
del_timer ( & cfqd - > idle_slice_timer ) ;
2006-07-20 16:54:05 +04:00
blk_start_queueing ( cfqd - > queue ) ;
2005-06-27 12:55:12 +04:00
}
2006-07-13 14:39:25 +04:00
} else if ( cfq_should_preempt ( cfqd , cfqq , rq ) ) {
2005-06-27 12:55:12 +04:00
/*
* not the active queue - expire current slice if it is
* idle and has expired it ' s mean thinktime or this new queue
2009-01-30 14:46:41 +03:00
* has some old slice time left and is of higher priority or
* this new queue is RT and the current one is BE
2005-06-27 12:55:12 +04:00
*/
cfq_preempt_queue ( cfqd , cfqq ) ;
2005-06-27 12:56:24 +04:00
cfq_mark_cfqq_must_dispatch ( cfqq ) ;
2006-07-20 16:54:05 +04:00
blk_start_queueing ( cfqd - > queue ) ;
2005-06-27 12:55:12 +04:00
}
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
static void cfq_insert_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2005-10-20 18:42:29 +04:00
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2005-06-27 12:55:12 +04:00
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " insert_request " ) ;
2008-01-24 10:52:45 +03:00
cfq_init_prio_data ( cfqq , RQ_CIC ( rq ) - > ioc ) ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
cfq_add_rq_rb ( rq ) ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
list_add_tail ( & rq - > queuelist , & cfqq - > fifo ) ;
2006-07-13 14:39:25 +04:00
cfq_rq_enqueued ( cfqd , cfqq , rq ) ;
2005-04-17 02:20:36 +04:00
}
2008-08-26 17:52:36 +04:00
/*
* Update hw_tag based on peak queue depth over 50 samples under
* sufficient load .
*/
static void cfq_update_hw_tag ( struct cfq_data * cfqd )
{
if ( cfqd - > rq_in_driver > cfqd - > rq_in_driver_peak )
cfqd - > rq_in_driver_peak = cfqd - > rq_in_driver ;
if ( cfqd - > rq_queued < = CFQ_HW_QUEUE_MIN & &
cfqd - > rq_in_driver < = CFQ_HW_QUEUE_MIN )
return ;
if ( cfqd - > hw_tag_samples + + < 50 )
return ;
if ( cfqd - > rq_in_driver_peak > = CFQ_HW_QUEUE_MIN )
cfqd - > hw_tag = 1 ;
else
cfqd - > hw_tag = 0 ;
cfqd - > hw_tag_samples = 0 ;
cfqd - > rq_in_driver_peak = 0 ;
}
2007-07-24 11:28:11 +04:00
static void cfq_completed_request ( struct request_queue * q , struct request * rq )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2005-10-20 18:42:29 +04:00
struct cfq_data * cfqd = cfqq - > cfqd ;
2006-07-13 14:37:56 +04:00
const int sync = rq_is_sync ( rq ) ;
2005-10-20 18:42:29 +04:00
unsigned long now ;
2005-04-17 02:20:36 +04:00
2005-10-20 18:42:29 +04:00
now = jiffies ;
2008-05-30 14:23:07 +04:00
cfq_log_cfqq ( cfqd , cfqq , " complete " ) ;
2005-04-17 02:20:36 +04:00
2008-08-26 17:52:36 +04:00
cfq_update_hw_tag ( cfqd ) ;
2005-10-20 18:42:29 +04:00
WARN_ON ( ! cfqd - > rq_in_driver ) ;
2007-04-25 14:44:27 +04:00
WARN_ON ( ! cfqq - > dispatched ) ;
2005-10-20 18:42:29 +04:00
cfqd - > rq_in_driver - - ;
2007-04-25 14:44:27 +04:00
cfqq - > dispatched - - ;
2005-04-17 02:20:36 +04:00
2007-04-23 10:33:33 +04:00
if ( cfq_cfqq_sync ( cfqq ) )
cfqd - > sync_flight - - ;
2005-10-20 18:42:29 +04:00
if ( ! cfq_class_idle ( cfqq ) )
cfqd - > last_end_request = now ;
2005-06-27 12:56:24 +04:00
2006-06-16 13:23:00 +04:00
if ( sync )
2006-07-13 14:39:25 +04:00
RQ_CIC ( rq ) - > last_end_request = now ;
2006-06-16 13:23:00 +04:00
/*
* If this is the active queue , check if it needs to be expired ,
* or if we want to idle in case it has no pending requests .
*/
if ( cfqd - > active_queue = = cfqq ) {
2007-01-19 03:51:58 +03:00
if ( cfq_cfqq_slice_new ( cfqq ) ) {
cfq_set_prio_slice ( cfqd , cfqq ) ;
cfq_clear_cfqq_slice_new ( cfqq ) ;
}
2008-01-28 13:38:15 +03:00
if ( cfq_slice_used ( cfqq ) | | cfq_class_idle ( cfqq ) )
2007-04-23 10:25:00 +04:00
cfq_slice_expired ( cfqd , 1 ) ;
2007-04-25 14:44:27 +04:00
else if ( sync & & RB_EMPTY_ROOT ( & cfqq - > sort_list ) )
cfq_arm_slice_timer ( cfqd ) ;
2006-06-16 13:23:00 +04:00
}
2007-04-25 14:44:27 +04:00
if ( ! cfqd - > rq_in_driver )
cfq_schedule_dispatch ( cfqd ) ;
2005-04-17 02:20:36 +04:00
}
2005-06-27 12:55:12 +04:00
/*
* we temporarily boost lower priority queues if they are holding fs exclusive
* resources . they are boosted to normal prio ( CLASS_BE / 4 )
*/
static void cfq_prio_boost ( struct cfq_queue * cfqq )
2005-04-17 02:20:36 +04:00
{
2005-06-27 12:55:12 +04:00
if ( has_fs_excl ( ) ) {
/*
* boost idle prio on transactions that would lock out other
* users of the filesystem
*/
if ( cfq_class_idle ( cfqq ) )
cfqq - > ioprio_class = IOPRIO_CLASS_BE ;
if ( cfqq - > ioprio > IOPRIO_NORM )
cfqq - > ioprio = IOPRIO_NORM ;
} else {
/*
* check if we need to unboost the queue
*/
if ( cfqq - > ioprio_class ! = cfqq - > org_ioprio_class )
cfqq - > ioprio_class = cfqq - > org_ioprio_class ;
if ( cfqq - > ioprio ! = cfqq - > org_ioprio )
cfqq - > ioprio = cfqq - > org_ioprio ;
}
}
2005-04-17 02:20:36 +04:00
2006-07-22 18:48:31 +04:00
static inline int __cfq_may_queue ( struct cfq_queue * cfqq )
2005-06-27 12:55:12 +04:00
{
2005-06-27 12:56:24 +04:00
if ( ( cfq_cfqq_wait_request ( cfqq ) | | cfq_cfqq_must_alloc ( cfqq ) ) & &
2005-06-28 07:14:05 +04:00
! cfq_cfqq_must_alloc_slice ( cfqq ) ) {
2005-06-27 12:56:24 +04:00
cfq_mark_cfqq_must_alloc_slice ( cfqq ) ;
2005-06-27 12:55:12 +04:00
return ELV_MQUEUE_MUST ;
2005-06-27 12:56:24 +04:00
}
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
return ELV_MQUEUE_MAY ;
}
2007-07-24 11:28:11 +04:00
static int cfq_may_queue ( struct request_queue * q , int rw )
2005-06-27 12:55:12 +04:00
{
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
struct task_struct * tsk = current ;
2007-04-25 14:29:51 +04:00
struct cfq_io_context * cic ;
2005-06-27 12:55:12 +04:00
struct cfq_queue * cfqq ;
/*
* don ' t force setup of a queue from here , as a call to may_queue
* does not necessarily imply that a request actually will be queued .
* so just lookup a possibly existing queue , or return ' may queue '
* if that fails
*/
2008-01-24 10:44:49 +03:00
cic = cfq_cic_lookup ( cfqd , tsk - > io_context ) ;
2007-04-25 14:29:51 +04:00
if ( ! cic )
return ELV_MQUEUE_MAY ;
cfqq = cic_to_cfqq ( cic , rw & REQ_RW_SYNC ) ;
2005-06-27 12:55:12 +04:00
if ( cfqq ) {
2008-01-24 10:52:45 +03:00
cfq_init_prio_data ( cfqq , cic - > ioc ) ;
2005-06-27 12:55:12 +04:00
cfq_prio_boost ( cfqq ) ;
2006-07-22 18:48:31 +04:00
return __cfq_may_queue ( cfqq ) ;
2005-06-27 12:55:12 +04:00
}
return ELV_MQUEUE_MAY ;
2005-04-17 02:20:36 +04:00
}
/*
* queue lock held here
*/
2006-12-01 12:42:33 +03:00
static void cfq_put_request ( struct request * rq )
2005-04-17 02:20:36 +04:00
{
2006-07-13 14:39:25 +04:00
struct cfq_queue * cfqq = RQ_CFQQ ( rq ) ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
if ( cfqq ) {
2005-06-27 12:55:12 +04:00
const int rw = rq_data_dir ( rq ) ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
BUG_ON ( ! cfqq - > allocated [ rw ] ) ;
cfqq - > allocated [ rw ] - - ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
put_io_context ( RQ_CIC ( rq ) - > ioc ) ;
2005-04-17 02:20:36 +04:00
rq - > elevator_private = NULL ;
2006-07-13 14:39:25 +04:00
rq - > elevator_private2 = NULL ;
2005-04-17 02:20:36 +04:00
cfq_put_queue ( cfqq ) ;
}
}
/*
2005-06-27 12:55:12 +04:00
* Allocate cfq data structures associated with this request .
2005-04-17 02:20:36 +04:00
*/
2005-06-27 12:55:12 +04:00
static int
2007-07-24 11:28:11 +04:00
cfq_set_request ( struct request_queue * q , struct request * rq , gfp_t gfp_mask )
2005-04-17 02:20:36 +04:00
{
struct cfq_data * cfqd = q - > elevator - > elevator_data ;
struct cfq_io_context * cic ;
const int rw = rq_data_dir ( rq ) ;
2006-12-13 15:02:26 +03:00
const int is_sync = rq_is_sync ( rq ) ;
2005-06-27 12:55:12 +04:00
struct cfq_queue * cfqq ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
might_sleep_if ( gfp_mask & __GFP_WAIT ) ;
2006-03-28 10:59:01 +04:00
cic = cfq_get_io_context ( cfqd , gfp_mask ) ;
2005-06-27 12:55:12 +04:00
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( q - > queue_lock , flags ) ;
2005-06-27 12:55:12 +04:00
if ( ! cic )
goto queue_fail ;
2007-04-25 14:29:51 +04:00
cfqq = cic_to_cfqq ( cic , is_sync ) ;
if ( ! cfqq ) {
2008-01-24 10:52:45 +03:00
cfqq = cfq_get_queue ( cfqd , is_sync , cic - > ioc , gfp_mask ) ;
2007-04-25 14:29:51 +04:00
2005-06-27 12:55:12 +04:00
if ( ! cfqq )
goto queue_fail ;
2005-04-17 02:20:36 +04:00
2007-04-25 14:29:51 +04:00
cic_set_cfqq ( cic , cfqq , is_sync ) ;
}
2005-04-17 02:20:36 +04:00
cfqq - > allocated [ rw ] + + ;
2005-06-27 12:56:24 +04:00
cfq_clear_cfqq_must_alloc ( cfqq ) ;
2005-06-27 12:55:12 +04:00
atomic_inc ( & cfqq - > ref ) ;
2005-04-17 02:20:36 +04:00
2006-07-13 14:39:25 +04:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
2005-06-27 12:56:24 +04:00
2006-07-13 14:39:25 +04:00
rq - > elevator_private = cic ;
rq - > elevator_private2 = cfqq ;
return 0 ;
2005-04-17 02:20:36 +04:00
2005-06-27 12:55:12 +04:00
queue_fail :
if ( cic )
put_io_context ( cic - > ioc ) ;
2006-07-22 18:48:31 +04:00
2005-06-27 12:56:24 +04:00
cfq_schedule_dispatch ( cfqd ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
2008-05-30 14:23:07 +04:00
cfq_log ( cfqd , " set_request fail " ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
2006-11-22 17:55:48 +03:00
static void cfq_kick_queue ( struct work_struct * work )
2005-06-27 12:55:12 +04:00
{
2006-11-22 17:55:48 +03:00
struct cfq_data * cfqd =
container_of ( work , struct cfq_data , unplug_work ) ;
2007-07-24 11:28:11 +04:00
struct request_queue * q = cfqd - > queue ;
2005-06-27 12:55:12 +04:00
unsigned long flags ;
spin_lock_irqsave ( q - > queue_lock , flags ) ;
2006-07-20 16:54:05 +04:00
blk_start_queueing ( q ) ;
2005-06-27 12:55:12 +04:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
/*
* Timer running if the active_queue is currently idling inside its time slice
*/
static void cfq_idle_slice_timer ( unsigned long data )
{
struct cfq_data * cfqd = ( struct cfq_data * ) data ;
struct cfq_queue * cfqq ;
unsigned long flags ;
2007-01-19 04:06:33 +03:00
int timed_out = 1 ;
2005-06-27 12:55:12 +04:00
2008-05-30 14:23:07 +04:00
cfq_log ( cfqd , " idle timer fired " ) ;
2005-06-27 12:55:12 +04:00
spin_lock_irqsave ( cfqd - > queue - > queue_lock , flags ) ;
2008-01-31 15:08:54 +03:00
cfqq = cfqd - > active_queue ;
if ( cfqq ) {
2007-01-19 04:06:33 +03:00
timed_out = 0 ;
2005-06-27 12:55:12 +04:00
/*
* expired
*/
2007-01-19 03:51:58 +03:00
if ( cfq_slice_used ( cfqq ) )
2005-06-27 12:55:12 +04:00
goto expire ;
/*
* only expire and reinvoke request handler , if there are
* other queues with pending requests
*/
2006-06-16 13:23:00 +04:00
if ( ! cfqd - > busy_queues )
2005-06-27 12:55:12 +04:00
goto out_cont ;
/*
* not expired and it has a request pending , let it dispatch
*/
2006-06-21 11:36:18 +04:00
if ( ! RB_EMPTY_ROOT ( & cfqq - > sort_list ) ) {
2005-06-27 12:56:24 +04:00
cfq_mark_cfqq_must_dispatch ( cfqq ) ;
2005-06-27 12:55:12 +04:00
goto out_kick ;
}
}
expire :
2007-04-23 10:25:00 +04:00
cfq_slice_expired ( cfqd , timed_out ) ;
2005-06-27 12:55:12 +04:00
out_kick :
2005-06-27 12:56:24 +04:00
cfq_schedule_dispatch ( cfqd ) ;
2005-06-27 12:55:12 +04:00
out_cont :
spin_unlock_irqrestore ( cfqd - > queue - > queue_lock , flags ) ;
}
2005-06-27 12:56:24 +04:00
static void cfq_shutdown_timer_wq ( struct cfq_data * cfqd )
{
del_timer_sync ( & cfqd - > idle_slice_timer ) ;
2008-12-03 14:41:39 +03:00
cancel_work_sync ( & cfqd - > unplug_work ) ;
2005-06-27 12:56:24 +04:00
}
2005-06-27 12:55:12 +04:00
2007-07-20 12:06:38 +04:00
static void cfq_put_async_queues ( struct cfq_data * cfqd )
{
int i ;
for ( i = 0 ; i < IOPRIO_BE_NR ; i + + ) {
if ( cfqd - > async_cfqq [ 0 ] [ i ] )
cfq_put_queue ( cfqd - > async_cfqq [ 0 ] [ i ] ) ;
if ( cfqd - > async_cfqq [ 1 ] [ i ] )
cfq_put_queue ( cfqd - > async_cfqq [ 1 ] [ i ] ) ;
}
2007-11-05 10:58:05 +03:00
if ( cfqd - > async_idle_cfqq )
cfq_put_queue ( cfqd - > async_idle_cfqq ) ;
2007-07-20 12:06:38 +04:00
}
2008-10-31 12:05:07 +03:00
static void cfq_exit_queue ( struct elevator_queue * e )
2005-04-17 02:20:36 +04:00
{
2005-06-27 12:55:12 +04:00
struct cfq_data * cfqd = e - > elevator_data ;
2007-07-24 11:28:11 +04:00
struct request_queue * q = cfqd - > queue ;
2005-06-27 12:55:12 +04:00
2005-06-27 12:56:24 +04:00
cfq_shutdown_timer_wq ( cfqd ) ;
2006-03-28 10:59:01 +04:00
2006-03-18 21:51:22 +03:00
spin_lock_irq ( q - > queue_lock ) ;
2006-03-28 10:59:01 +04:00
2006-03-18 21:51:22 +03:00
if ( cfqd - > active_queue )
2007-04-23 10:25:00 +04:00
__cfq_slice_expired ( cfqd , cfqd - > active_queue , 0 ) ;
2006-03-28 10:59:01 +04:00
while ( ! list_empty ( & cfqd - > cic_list ) ) {
2006-03-18 21:51:22 +03:00
struct cfq_io_context * cic = list_entry ( cfqd - > cic_list . next ,
struct cfq_io_context ,
queue_list ) ;
2006-07-22 18:48:31 +04:00
__cfq_exit_single_io_context ( cfqd , cic ) ;
2006-03-18 21:51:22 +03:00
}
2006-03-28 10:59:01 +04:00
2007-07-20 12:06:38 +04:00
cfq_put_async_queues ( cfqd ) ;
2007-07-10 15:43:25 +04:00
2006-03-18 21:51:22 +03:00
spin_unlock_irq ( q - > queue_lock ) ;
2006-03-18 20:05:37 +03:00
cfq_shutdown_timer_wq ( cfqd ) ;
kfree ( cfqd ) ;
2005-04-17 02:20:36 +04:00
}
2007-07-24 11:28:11 +04:00
static void * cfq_init_queue ( struct request_queue * q )
2005-04-17 02:20:36 +04:00
{
struct cfq_data * cfqd ;
2007-07-17 15:03:29 +04:00
cfqd = kmalloc_node ( sizeof ( * cfqd ) , GFP_KERNEL | __GFP_ZERO , q - > node ) ;
2005-04-17 02:20:36 +04:00
if ( ! cfqd )
2006-06-08 10:49:06 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
2007-04-26 14:53:50 +04:00
cfqd - > service_tree = CFQ_RB_ROOT ;
2006-03-18 21:51:22 +03:00
INIT_LIST_HEAD ( & cfqd - > cic_list ) ;
2005-04-17 02:20:36 +04:00
cfqd - > queue = q ;
2005-06-27 12:55:12 +04:00
init_timer ( & cfqd - > idle_slice_timer ) ;
cfqd - > idle_slice_timer . function = cfq_idle_slice_timer ;
cfqd - > idle_slice_timer . data = ( unsigned long ) cfqd ;
2006-11-22 17:55:48 +03:00
INIT_WORK ( & cfqd - > unplug_work , cfq_kick_queue ) ;
2005-06-27 12:55:12 +04:00
2007-11-07 11:46:13 +03:00
cfqd - > last_end_request = jiffies ;
2005-04-17 02:20:36 +04:00
cfqd - > cfq_quantum = cfq_quantum ;
2005-06-27 12:55:12 +04:00
cfqd - > cfq_fifo_expire [ 0 ] = cfq_fifo_expire [ 0 ] ;
cfqd - > cfq_fifo_expire [ 1 ] = cfq_fifo_expire [ 1 ] ;
2005-04-17 02:20:36 +04:00
cfqd - > cfq_back_max = cfq_back_max ;
cfqd - > cfq_back_penalty = cfq_back_penalty ;
2005-06-27 12:55:12 +04:00
cfqd - > cfq_slice [ 0 ] = cfq_slice_async ;
cfqd - > cfq_slice [ 1 ] = cfq_slice_sync ;
cfqd - > cfq_slice_async_rq = cfq_slice_async_rq ;
cfqd - > cfq_slice_idle = cfq_slice_idle ;
2008-08-26 17:52:36 +04:00
cfqd - > hw_tag = 1 ;
2005-06-27 12:56:24 +04:00
2006-06-08 10:49:06 +04:00
return cfqd ;
2005-04-17 02:20:36 +04:00
}
static void cfq_slab_kill ( void )
{
2008-05-28 16:46:59 +04:00
/*
* Caller already ensured that pending RCU callbacks are completed ,
* so we should have no busy allocations at this point .
*/
2005-04-17 02:20:36 +04:00
if ( cfq_pool )
kmem_cache_destroy ( cfq_pool ) ;
if ( cfq_ioc_pool )
kmem_cache_destroy ( cfq_ioc_pool ) ;
}
static int __init cfq_slab_setup ( void )
{
2007-05-07 01:49:57 +04:00
cfq_pool = KMEM_CACHE ( cfq_queue , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cfq_pool )
goto fail ;
2008-04-02 16:31:02 +04:00
cfq_ioc_pool = KMEM_CACHE ( cfq_io_context , 0 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cfq_ioc_pool )
goto fail ;
return 0 ;
fail :
cfq_slab_kill ( ) ;
return - ENOMEM ;
}
/*
* sysfs parts below - - >
*/
static ssize_t
cfq_var_show ( unsigned int var , char * page )
{
return sprintf ( page , " %d \n " , var ) ;
}
static ssize_t
cfq_var_store ( unsigned int * var , const char * page , size_t count )
{
char * p = ( char * ) page ;
* var = simple_strtoul ( p , & p , 10 ) ;
return count ;
}
# define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
2008-10-31 12:05:07 +03:00
static ssize_t __FUNC ( struct elevator_queue * e , char * page ) \
2005-04-17 02:20:36 +04:00
{ \
2006-03-19 02:35:43 +03:00
struct cfq_data * cfqd = e - > elevator_data ; \
2005-04-17 02:20:36 +04:00
unsigned int __data = __VAR ; \
if ( __CONV ) \
__data = jiffies_to_msecs ( __data ) ; \
return cfq_var_show ( __data , ( page ) ) ; \
}
SHOW_FUNCTION ( cfq_quantum_show , cfqd - > cfq_quantum , 0 ) ;
2005-06-27 12:55:12 +04:00
SHOW_FUNCTION ( cfq_fifo_expire_sync_show , cfqd - > cfq_fifo_expire [ 1 ] , 1 ) ;
SHOW_FUNCTION ( cfq_fifo_expire_async_show , cfqd - > cfq_fifo_expire [ 0 ] , 1 ) ;
2006-03-19 06:27:18 +03:00
SHOW_FUNCTION ( cfq_back_seek_max_show , cfqd - > cfq_back_max , 0 ) ;
SHOW_FUNCTION ( cfq_back_seek_penalty_show , cfqd - > cfq_back_penalty , 0 ) ;
2005-06-27 12:55:12 +04:00
SHOW_FUNCTION ( cfq_slice_idle_show , cfqd - > cfq_slice_idle , 1 ) ;
SHOW_FUNCTION ( cfq_slice_sync_show , cfqd - > cfq_slice [ 1 ] , 1 ) ;
SHOW_FUNCTION ( cfq_slice_async_show , cfqd - > cfq_slice [ 0 ] , 1 ) ;
SHOW_FUNCTION ( cfq_slice_async_rq_show , cfqd - > cfq_slice_async_rq , 0 ) ;
2005-04-17 02:20:36 +04:00
# undef SHOW_FUNCTION
# define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
2008-10-31 12:05:07 +03:00
static ssize_t __FUNC ( struct elevator_queue * e , const char * page , size_t count ) \
2005-04-17 02:20:36 +04:00
{ \
2006-03-19 02:35:43 +03:00
struct cfq_data * cfqd = e - > elevator_data ; \
2005-04-17 02:20:36 +04:00
unsigned int __data ; \
int ret = cfq_var_store ( & __data , ( page ) , count ) ; \
if ( __data < ( MIN ) ) \
__data = ( MIN ) ; \
else if ( __data > ( MAX ) ) \
__data = ( MAX ) ; \
if ( __CONV ) \
* ( __PTR ) = msecs_to_jiffies ( __data ) ; \
else \
* ( __PTR ) = __data ; \
return ret ; \
}
STORE_FUNCTION ( cfq_quantum_store , & cfqd - > cfq_quantum , 1 , UINT_MAX , 0 ) ;
2008-01-31 15:08:54 +03:00
STORE_FUNCTION ( cfq_fifo_expire_sync_store , & cfqd - > cfq_fifo_expire [ 1 ] , 1 ,
UINT_MAX , 1 ) ;
STORE_FUNCTION ( cfq_fifo_expire_async_store , & cfqd - > cfq_fifo_expire [ 0 ] , 1 ,
UINT_MAX , 1 ) ;
2006-03-19 06:27:18 +03:00
STORE_FUNCTION ( cfq_back_seek_max_store , & cfqd - > cfq_back_max , 0 , UINT_MAX , 0 ) ;
2008-01-31 15:08:54 +03:00
STORE_FUNCTION ( cfq_back_seek_penalty_store , & cfqd - > cfq_back_penalty , 1 ,
UINT_MAX , 0 ) ;
2005-06-27 12:55:12 +04:00
STORE_FUNCTION ( cfq_slice_idle_store , & cfqd - > cfq_slice_idle , 0 , UINT_MAX , 1 ) ;
STORE_FUNCTION ( cfq_slice_sync_store , & cfqd - > cfq_slice [ 1 ] , 1 , UINT_MAX , 1 ) ;
STORE_FUNCTION ( cfq_slice_async_store , & cfqd - > cfq_slice [ 0 ] , 1 , UINT_MAX , 1 ) ;
2008-01-31 15:08:54 +03:00
STORE_FUNCTION ( cfq_slice_async_rq_store , & cfqd - > cfq_slice_async_rq , 1 ,
UINT_MAX , 0 ) ;
2005-04-17 02:20:36 +04:00
# undef STORE_FUNCTION
2006-03-19 06:27:18 +03:00
# define CFQ_ATTR(name) \
__ATTR ( name , S_IRUGO | S_IWUSR , cfq_ # # name # # _show , cfq_ # # name # # _store )
static struct elv_fs_entry cfq_attrs [ ] = {
CFQ_ATTR ( quantum ) ,
CFQ_ATTR ( fifo_expire_sync ) ,
CFQ_ATTR ( fifo_expire_async ) ,
CFQ_ATTR ( back_seek_max ) ,
CFQ_ATTR ( back_seek_penalty ) ,
CFQ_ATTR ( slice_sync ) ,
CFQ_ATTR ( slice_async ) ,
CFQ_ATTR ( slice_async_rq ) ,
CFQ_ATTR ( slice_idle ) ,
__ATTR_NULL
2005-04-17 02:20:36 +04:00
} ;
static struct elevator_type iosched_cfq = {
. ops = {
. elevator_merge_fn = cfq_merge ,
. elevator_merged_fn = cfq_merged_request ,
. elevator_merge_req_fn = cfq_merged_requests ,
2006-12-20 13:04:12 +03:00
. elevator_allow_merge_fn = cfq_allow_merge ,
2005-10-20 18:42:29 +04:00
. elevator_dispatch_fn = cfq_dispatch_requests ,
2005-04-17 02:20:36 +04:00
. elevator_add_req_fn = cfq_insert_request ,
2005-10-20 18:42:29 +04:00
. elevator_activate_req_fn = cfq_activate_request ,
2005-04-17 02:20:36 +04:00
. elevator_deactivate_req_fn = cfq_deactivate_request ,
. elevator_queue_empty_fn = cfq_queue_empty ,
. elevator_completed_req_fn = cfq_completed_request ,
2006-07-13 14:33:14 +04:00
. elevator_former_req_fn = elv_rb_former_request ,
. elevator_latter_req_fn = elv_rb_latter_request ,
2005-04-17 02:20:36 +04:00
. elevator_set_req_fn = cfq_set_request ,
. elevator_put_req_fn = cfq_put_request ,
. elevator_may_queue_fn = cfq_may_queue ,
. elevator_init_fn = cfq_init_queue ,
. elevator_exit_fn = cfq_exit_queue ,
2006-08-29 11:05:44 +04:00
. trim = cfq_free_io_context ,
2005-04-17 02:20:36 +04:00
} ,
2006-03-19 02:35:43 +03:00
. elevator_attrs = cfq_attrs ,
2005-04-17 02:20:36 +04:00
. elevator_name = " cfq " ,
. elevator_owner = THIS_MODULE ,
} ;
static int __init cfq_init ( void )
{
2005-06-27 12:55:12 +04:00
/*
* could be 0 on HZ < 1000 setups
*/
if ( ! cfq_slice_async )
cfq_slice_async = 1 ;
if ( ! cfq_slice_idle )
cfq_slice_idle = 1 ;
2005-04-17 02:20:36 +04:00
if ( cfq_slab_setup ( ) )
return - ENOMEM ;
2007-12-12 20:51:56 +03:00
elv_register ( & iosched_cfq ) ;
2005-04-17 02:20:36 +04:00
2007-12-12 20:51:56 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void __exit cfq_exit ( void )
{
2006-10-01 10:28:10 +04:00
DECLARE_COMPLETION_ONSTACK ( all_gone ) ;
2005-04-17 02:20:36 +04:00
elv_unregister ( & iosched_cfq ) ;
2006-03-18 23:05:53 +03:00
ioc_gone = & all_gone ;
[PATCH 1/2] iosched: fix typo and barrier()
On rmmod path, cfq/as waits to make sure all io-contexts was
freed. However, it's using complete(), not wait_for_completion().
I think barrier() is not enough in here. To avoid the following case,
this patch replaces barrier() with smb_wmb().
cpu0 visibility cpu1
[ioc_gnone=NULL,ioc_count=1]
ioc_gnone = &all_gone NULL,ioc_count=1
atomic_read(&ioc_count) NULL,ioc_count=1
wait_for_completion() NULL,ioc_count=0 atomic_sub_and_test()
NULL,ioc_count=0 if ( && ioc_gone)
[ioc_gone==NULL,
so doesn't call complete()]
&all_gone,ioc_count=0
Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
Signed-off-by: Jens Axboe <axboe@suse.de>
2006-04-18 11:44:06 +04:00
/* ioc_gone's update must be visible before reading ioc_count */
smp_wmb ( ) ;
2008-05-28 16:46:59 +04:00
/*
* this also protects us from entering cfq_slab_kill ( ) with
* pending RCU callbacks
*/
2006-07-19 07:07:12 +04:00
if ( elv_ioc_count_read ( ioc_count ) )
2008-05-29 11:32:08 +04:00
wait_for_completion ( & all_gone ) ;
2005-10-31 02:01:39 +03:00
cfq_slab_kill ( ) ;
2005-04-17 02:20:36 +04:00
}
module_init ( cfq_init ) ;
module_exit ( cfq_exit ) ;
MODULE_AUTHOR ( " Jens Axboe " ) ;
MODULE_LICENSE ( " GPL " ) ;
MODULE_DESCRIPTION ( " Completely Fair Queueing IO scheduler " ) ;