2008-01-29 16:51:59 +03:00
# ifndef BLK_INTERNAL_H
# define BLK_INTERNAL_H
2008-01-29 16:53:40 +03:00
/* Amount of time in which a process may batch requests */
# define BLK_BATCH_TIME (HZ / 50UL)
/* Number of requests a "batching" process may submit */
# define BLK_BATCH_REQ 32
2008-01-29 16:51:59 +03:00
extern struct kmem_cache * blk_requestq_cachep ;
extern struct kobj_type blk_queue_ktype ;
2008-01-29 16:53:40 +03:00
void init_request_from_bio ( struct request * req , struct bio * bio ) ;
void blk_rq_bio_prep ( struct request_queue * q , struct request * rq ,
struct bio * bio ) ;
2008-01-29 16:51:59 +03:00
void __blk_queue_free_tags ( struct request_queue * q ) ;
2008-01-29 16:53:40 +03:00
void blk_unplug_work ( struct work_struct * work ) ;
void blk_unplug_timeout ( unsigned long data ) ;
2008-09-14 16:55:09 +04:00
void blk_rq_timed_out_timer ( unsigned long data ) ;
void blk_delete_timer ( struct request * ) ;
void blk_add_timer ( struct request * ) ;
2008-10-17 16:03:08 +04:00
void __generic_unplug_device ( struct request_queue * ) ;
2008-09-14 16:55:09 +04:00
/*
* Internal atomic flags for request handling
*/
enum rq_atomic_flags {
REQ_ATOM_COMPLETE = 0 ,
} ;
/*
* EH timer and IO completion will both attempt to ' grab ' the request , make
* sure that only one of them suceeds
*/
static inline int blk_mark_rq_complete ( struct request * rq )
{
return test_and_set_bit ( REQ_ATOM_COMPLETE , & rq - > atomic_flags ) ;
}
static inline void blk_clear_rq_complete ( struct request * rq )
{
clear_bit ( REQ_ATOM_COMPLETE , & rq - > atomic_flags ) ;
}
2008-01-29 16:53:40 +03:00
2009-04-23 06:05:18 +04:00
/*
* Internal elevator interface
*/
# define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
static inline struct request * __elv_next_request ( struct request_queue * q )
{
struct request * rq ;
while ( 1 ) {
while ( ! list_empty ( & q - > queue_head ) ) {
rq = list_entry_rq ( q - > queue_head . next ) ;
if ( blk_do_ordered ( q , & rq ) )
return rq ;
}
if ( ! q - > elevator - > ops - > elevator_dispatch_fn ( q , 0 ) )
return NULL ;
}
}
static inline void elv_activate_rq ( struct request_queue * q , struct request * rq )
{
struct elevator_queue * e = q - > elevator ;
if ( e - > ops - > elevator_activate_req_fn )
e - > ops - > elevator_activate_req_fn ( q , rq ) ;
}
static inline void elv_deactivate_rq ( struct request_queue * q , struct request * rq )
{
struct elevator_queue * e = q - > elevator ;
if ( e - > ops - > elevator_deactivate_req_fn )
e - > ops - > elevator_deactivate_req_fn ( q , rq ) ;
}
2008-09-14 16:56:33 +04:00
# ifdef CONFIG_FAIL_IO_TIMEOUT
int blk_should_fake_timeout ( struct request_queue * ) ;
ssize_t part_timeout_show ( struct device * , struct device_attribute * , char * ) ;
ssize_t part_timeout_store ( struct device * , struct device_attribute * ,
const char * , size_t ) ;
# else
static inline int blk_should_fake_timeout ( struct request_queue * q )
{
return 0 ;
}
# endif
2008-01-29 16:53:40 +03:00
struct io_context * current_io_context ( gfp_t gfp_flags , int node ) ;
2008-01-29 16:04:06 +03:00
int ll_back_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio ) ;
int ll_front_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio ) ;
int attempt_back_merge ( struct request_queue * q , struct request * rq ) ;
int attempt_front_merge ( struct request_queue * q , struct request * rq ) ;
void blk_recalc_rq_segments ( struct request * rq ) ;
void blk_recalc_rq_sectors ( struct request * rq , int nsect ) ;
2008-01-29 16:51:59 +03:00
void blk_queue_congestion_threshold ( struct request_queue * q ) ;
2008-03-04 13:23:45 +03:00
int blk_dev_init ( void ) ;
2009-04-08 16:22:01 +04:00
void elv_quiesce_start ( struct request_queue * q ) ;
void elv_quiesce_end ( struct request_queue * q ) ;
2009-03-27 12:30:47 +03:00
2008-01-29 16:51:59 +03:00
/*
* Return the threshold ( number of used requests ) at which the queue is
* considered to be congested . It include a little hysteresis to keep the
* context switch rate down .
*/
static inline int queue_congestion_on_threshold ( struct request_queue * q )
{
return q - > nr_congestion_on ;
}
/*
* The threshold at which a queue is considered to be uncongested
*/
static inline int queue_congestion_off_threshold ( struct request_queue * q )
{
return q - > nr_congestion_off ;
}
2008-06-30 22:04:41 +04:00
# if defined(CONFIG_BLK_DEV_INTEGRITY)
# define rq_for_each_integrity_segment(bvl, _rq, _iter) \
__rq_for_each_bio ( _iter . bio , _rq ) \
bip_for_each_vec ( bvl , _iter . bio - > bi_integrity , _iter . i )
# endif /* BLK_DEV_INTEGRITY */
2008-09-13 22:26:01 +04:00
static inline int blk_cpu_to_group ( int cpu )
{
# ifdef CONFIG_SCHED_MC
2008-12-26 14:53:43 +03:00
const struct cpumask * mask = cpu_coregroup_mask ( cpu ) ;
return cpumask_first ( mask ) ;
2008-09-13 22:26:01 +04:00
# elif defined(CONFIG_SCHED_SMT)
2009-03-13 07:19:46 +03:00
return cpumask_first ( topology_thread_cpumask ( cpu ) ) ;
2008-09-13 22:26:01 +04:00
# else
return cpu ;
# endif
}
2009-03-27 12:31:51 +03:00
static inline int blk_do_io_stat ( struct request * rq )
2009-02-02 10:42:32 +03:00
{
2009-04-22 16:01:49 +04:00
return rq - > rq_disk & & blk_rq_io_stat ( rq ) ;
2009-02-02 10:42:32 +03:00
}
2008-01-29 16:51:59 +03:00
# endif