2008-01-29 14:51:59 +01:00
# ifndef BLK_INTERNAL_H
# define BLK_INTERNAL_H
2008-01-29 14:53:40 +01:00
/* Amount of time in which a process may batch requests */
# define BLK_BATCH_TIME (HZ / 50UL)
/* Number of requests a "batching" process may submit */
# define BLK_BATCH_REQ 32
2008-01-29 14:51:59 +01:00
extern struct kmem_cache * blk_requestq_cachep ;
extern struct kobj_type blk_queue_ktype ;
2008-01-29 14:53:40 +01:00
void init_request_from_bio ( struct request * req , struct bio * bio ) ;
void blk_rq_bio_prep ( struct request_queue * q , struct request * rq ,
struct bio * bio ) ;
2008-01-29 14:51:59 +01:00
void __blk_queue_free_tags ( struct request_queue * q ) ;
2008-01-29 14:53:40 +01:00
void blk_unplug_work ( struct work_struct * work ) ;
void blk_unplug_timeout ( unsigned long data ) ;
struct io_context * current_io_context ( gfp_t gfp_flags , int node ) ;
2008-01-29 14:04:06 +01:00
int ll_back_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio ) ;
int ll_front_merge_fn ( struct request_queue * q , struct request * req ,
struct bio * bio ) ;
int attempt_back_merge ( struct request_queue * q , struct request * rq ) ;
int attempt_front_merge ( struct request_queue * q , struct request * rq ) ;
void blk_recalc_rq_segments ( struct request * rq ) ;
void blk_recalc_rq_sectors ( struct request * rq , int nsect ) ;
2008-01-29 14:51:59 +01:00
void blk_queue_congestion_threshold ( struct request_queue * q ) ;
2008-03-04 11:23:45 +01:00
int blk_dev_init ( void ) ;
2008-01-29 14:51:59 +01:00
/*
* Return the threshold ( number of used requests ) at which the queue is
* considered to be congested . It include a little hysteresis to keep the
* context switch rate down .
*/
static inline int queue_congestion_on_threshold ( struct request_queue * q )
{
return q - > nr_congestion_on ;
}
/*
* The threshold at which a queue is considered to be uncongested
*/
static inline int queue_congestion_off_threshold ( struct request_queue * q )
{
return q - > nr_congestion_off ;
}
# endif