2008-01-29 16:51:59 +03:00
# ifndef BLK_INTERNAL_H
# define BLK_INTERNAL_H
2008-01-29 16:53:40 +03:00
/* Amount of time in which a process may batch requests */
# define BLK_BATCH_TIME (HZ / 50UL)
/* Number of requests a "batching" process may submit */
# define BLK_BATCH_REQ 32
2008-01-29 16:51:59 +03:00
extern struct kmem_cache * blk_requestq_cachep ;
extern struct kobj_type blk_queue_ktype ;
2008-01-29 16:53:40 +03:00
void init_request_from_bio ( struct request * req , struct bio * bio ) ;
void blk_rq_bio_prep ( struct request_queue * q , struct request * rq ,
struct bio * bio ) ;
2009-05-17 20:00:01 +04:00
int blk_rq_append_bio ( struct request_queue * q , struct request * rq ,
struct bio * bio ) ;
2009-05-08 06:54:16 +04:00
void blk_dequeue_request ( struct request * rq ) ;
2008-01-29 16:51:59 +03:00
void __blk_queue_free_tags ( struct request_queue * q ) ;
2008-01-29 16:53:40 +03:00
void blk_unplug_work ( struct work_struct * work ) ;
void blk_unplug_timeout ( unsigned long data ) ;
2008-09-14 16:55:09 +04:00
void blk_rq_timed_out_timer ( unsigned long data ) ;
void blk_delete_timer ( struct request * ) ;
void blk_add_timer ( struct request * ) ;
2008-10-17 16:03:08 +04:00
void __generic_unplug_device ( struct request_queue * ) ;
2008-09-14 16:55:09 +04:00
/*
* Internal atomic flags for request handling
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0 ,
} ;
/*
* EH timer and IO completion will both attempt to ' grab ' the request , make
* sure that only one of them suceeds
*/
static inline int blk_mark_rq_complete ( struct request * rq )
{
return test_and_set_bit ( REQ_ATOM_COMPLETE , & rq - > atomic_flags ) ;
}
static inline void blk_clear_rq_complete ( struct request * rq )
{
clear_bit ( REQ_ATOM_COMPLETE , & rq - > atomic_flags ) ;
}
2008-01-29 16:53:40 +03:00
2009-04-23 06:05:18 +04:00
/*
* Internal elevator interface
*/
# define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
2010-09-03 13:56:16 +04:00
struct request * blk_do_flush ( struct request_queue * q , struct request * rq ) ;
2010-09-03 13:56:16 +04:00
2009-04-23 06:05:18 +04:00
static inline struct request * __elv_next_request ( struct request_queue * q )
{
struct request * rq ;
while ( 1 ) {
while ( ! list_empty ( & q - > queue_head ) ) {
rq = list_entry_rq ( q - > queue_head . next ) ;
2010-09-03 13:56:17 +04:00
if ( ! ( rq - > cmd_flags & ( REQ_FLUSH | REQ_FUA ) ) | |
rq = = & q - > flush_rq )
return rq ;
2010-09-03 13:56:16 +04:00
rq = blk_do_flush ( q , rq ) ;
2010-09-03 13:56:16 +04:00
if ( rq )
2009-04-23 06:05:18 +04:00
return rq ;
}
if ( ! q - > elevator - > ops - > elevator_dispatch_fn ( q , 0 ) )
return NULL ;
}
}
static inline void elv_activate_rq ( struct request_queue * q , struct request * rq )
{
struct elevator_queue * e = q - > elevator ;
if ( e - > ops - > elevator_activate_req_fn )
e - > ops - > elevator_activate_req_fn ( q , rq ) ;
}
static inline void elv_deactivate_rq ( struct request_queue * q , struct request * rq )
{
struct elevator_queue * e = q - > elevator ;
if ( e - > ops - > elevator_deactivate_req_fn )
e - > ops - > elevator_deactivate_req_fn ( q , rq ) ;
}
2008-09-14 16:56:33 +04:00
# ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout ( struct request_queue * ) ;
ssize_t part_timeout_show ( struct device * , struct device_attribute * , char * ) ;
ssize_t part_timeout_store ( struct device * , struct device_attribute * ,
const char * , size_t ) ;
# else
static inline int blk_should_fake_timeout ( struct request_queue * q )
{
return 0 ;
}
# endif
2008-01-29 16:53:40 +03:00
struct io_context * current_io_context ( gfp_t gfp_flags , int node ) ;
2008-01-29 16:04:06 +03:00
int ll_back_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio ) ;
int ll_front_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio ) ;
int attempt_back_merge ( struct request_queue * q , struct request * rq ) ;
int attempt_front_merge ( struct request_queue * q , struct request * rq ) ;
void blk_recalc_rq_segments ( struct request * rq ) ;
2009-07-03 12:48:17 +04:00
void blk_rq_set_mixed_merge ( struct request * rq ) ;
2008-01-29 16:04:06 +03:00
2008-01-29 16:51:59 +03:00
void blk_queue_congestion_threshold ( struct request_queue * q ) ;
2008-03-04 13:23:45 +03:00
int blk_dev_init ( void ) ;
2010-10-25 00:06:02 +04:00
void elv_quiesce_start ( struct request_queue * q ) ;
void elv_quiesce_end ( struct request_queue * q ) ;
2008-01-29 16:51:59 +03:00
/*
* Return the threshold ( number of used requests ) at which the queue is
* considered to be congested . It include a little hysteresis to keep the
* context switch rate down .
*/
static inline int queue_congestion_on_threshold ( struct request_queue * q )
{
return q - > nr_congestion_on ;
}
/*
* The threshold at which a queue is considered to be uncongested
*/
static inline int queue_congestion_off_threshold ( struct request_queue * q )
{
return q - > nr_congestion_off ;
}
2008-09-13 22:26:01 +04:00
static inline int blk_cpu_to_group ( int cpu )
{
2010-09-10 11:03:21 +04:00
int group = NR_CPUS ;
2008-09-13 22:26:01 +04:00
# ifdef CONFIG_SCHED_MC
2008-12-26 14:53:43 +03:00
const struct cpumask * mask = cpu_coregroup_mask ( cpu ) ;
2010-09-10 11:03:21 +04:00
group = cpumask_first ( mask ) ;
2008-09-13 22:26:01 +04:00
# elif defined(CONFIG_SCHED_SMT)
2010-09-10 11:03:21 +04:00
group = cpumask_first ( topology_thread_cpumask ( cpu ) ) ;
2008-09-13 22:26:01 +04:00
# else
return cpu ;
# endif
2010-09-10 11:03:21 +04:00
if ( likely ( group < NR_CPUS ) )
return group ;
return cpu ;
2008-09-13 22:26:01 +04:00
}
2009-04-24 10:10:11 +04:00
/*
* Contribute to IO statistics IFF :
*
* a ) it ' s attached to a gendisk , and
* b ) the queue had IO stats enabled when this request was started , and
2009-05-27 16:50:02 +04:00
* c ) it ' s a file system request or a discard request
2009-04-24 10:10:11 +04:00
*/
2009-03-27 12:31:51 +03:00
static inline int blk_do_io_stat ( struct request * rq )
2009-02-02 10:42:32 +03:00
{
2010-08-07 20:17:56 +04:00
return rq - > rq_disk & &
( rq - > cmd_flags & REQ_IO_STAT ) & &
( rq - > cmd_type = = REQ_TYPE_FS | |
( rq - > cmd_flags & REQ_DISCARD ) ) ;
2009-02-02 10:42:32 +03:00
}
2008-01-29 16:51:59 +03:00
# endif