2006-10-20 10:28:16 +04:00
# include <linux/wait.h>
# include <linux/backing-dev.h>
# include <linux/fs.h>
2009-03-17 11:35:06 +03:00
# include <linux/pagemap.h>
2006-10-20 10:28:16 +04:00
# include <linux/sched.h>
# include <linux/module.h>
2008-04-30 11:54:32 +04:00
# include <linux/writeback.h>
# include <linux/device.h>
2009-03-17 11:35:06 +03:00
void default_unplug_io_fn ( struct backing_dev_info * bdi , struct page * page )
{
}
EXPORT_SYMBOL ( default_unplug_io_fn ) ;
struct backing_dev_info default_backing_dev_info = {
. ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE ,
. state = 0 ,
. capabilities = BDI_CAP_MAP_COPY ,
. unplug_io_fn = default_unplug_io_fn ,
} ;
EXPORT_SYMBOL_GPL ( default_backing_dev_info ) ;
2008-04-30 11:54:32 +04:00
static struct class * bdi_class ;
2008-04-30 11:54:36 +04:00
# ifdef CONFIG_DEBUG_FS
# include <linux/debugfs.h>
# include <linux/seq_file.h>
static struct dentry * bdi_debug_root ;
static void bdi_debug_init ( void )
{
bdi_debug_root = debugfs_create_dir ( " bdi " , NULL ) ;
}
static int bdi_debug_stats_show ( struct seq_file * m , void * v )
{
struct backing_dev_info * bdi = m - > private ;
2009-01-07 01:39:29 +03:00
unsigned long background_thresh ;
unsigned long dirty_thresh ;
unsigned long bdi_thresh ;
2008-04-30 11:54:36 +04:00
get_dirty_limits ( & background_thresh , & dirty_thresh , & bdi_thresh , bdi ) ;
# define K(x) ((x) << (PAGE_SHIFT - 10))
seq_printf ( m ,
" BdiWriteback: %8lu kB \n "
" BdiReclaimable: %8lu kB \n "
" BdiDirtyThresh: %8lu kB \n "
" DirtyThresh: %8lu kB \n "
" BackgroundThresh: %8lu kB \n " ,
( unsigned long ) K ( bdi_stat ( bdi , BDI_WRITEBACK ) ) ,
( unsigned long ) K ( bdi_stat ( bdi , BDI_RECLAIMABLE ) ) ,
K ( bdi_thresh ) ,
K ( dirty_thresh ) ,
K ( background_thresh ) ) ;
# undef K
return 0 ;
}
static int bdi_debug_stats_open ( struct inode * inode , struct file * file )
{
return single_open ( file , bdi_debug_stats_show , inode - > i_private ) ;
}
static const struct file_operations bdi_debug_stats_fops = {
. open = bdi_debug_stats_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static void bdi_debug_register ( struct backing_dev_info * bdi , const char * name )
{
bdi - > debug_dir = debugfs_create_dir ( name , bdi_debug_root ) ;
bdi - > debug_stats = debugfs_create_file ( " stats " , 0444 , bdi - > debug_dir ,
bdi , & bdi_debug_stats_fops ) ;
}
static void bdi_debug_unregister ( struct backing_dev_info * bdi )
{
debugfs_remove ( bdi - > debug_stats ) ;
debugfs_remove ( bdi - > debug_dir ) ;
}
# else
static inline void bdi_debug_init ( void )
{
}
static inline void bdi_debug_register ( struct backing_dev_info * bdi ,
const char * name )
{
}
static inline void bdi_debug_unregister ( struct backing_dev_info * bdi )
{
}
# endif
2008-04-30 11:54:32 +04:00
static ssize_t read_ahead_kb_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
struct backing_dev_info * bdi = dev_get_drvdata ( dev ) ;
char * end ;
unsigned long read_ahead_kb ;
ssize_t ret = - EINVAL ;
read_ahead_kb = simple_strtoul ( buf , & end , 10 ) ;
if ( * buf & & ( end [ 0 ] = = ' \0 ' | | ( end [ 0 ] = = ' \n ' & & end [ 1 ] = = ' \0 ' ) ) ) {
bdi - > ra_pages = read_ahead_kb > > ( PAGE_SHIFT - 10 ) ;
ret = count ;
}
return ret ;
}
# define K(pages) ((pages) << (PAGE_SHIFT - 10))
# define BDI_SHOW(name, expr) \
static ssize_t name # # _show ( struct device * dev , \
struct device_attribute * attr , char * page ) \
{ \
struct backing_dev_info * bdi = dev_get_drvdata ( dev ) ; \
\
return snprintf ( page , PAGE_SIZE - 1 , " %lld \n " , ( long long ) expr ) ; \
}
BDI_SHOW ( read_ahead_kb , K ( bdi - > ra_pages ) )
2008-04-30 11:54:35 +04:00
static ssize_t min_ratio_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t count )
{
struct backing_dev_info * bdi = dev_get_drvdata ( dev ) ;
char * end ;
unsigned int ratio ;
ssize_t ret = - EINVAL ;
ratio = simple_strtoul ( buf , & end , 10 ) ;
if ( * buf & & ( end [ 0 ] = = ' \0 ' | | ( end [ 0 ] = = ' \n ' & & end [ 1 ] = = ' \0 ' ) ) ) {
ret = bdi_set_min_ratio ( bdi , ratio ) ;
if ( ! ret )
ret = count ;
}
return ret ;
}
BDI_SHOW ( min_ratio , bdi - > min_ratio )
2008-04-30 11:54:36 +04:00
static ssize_t max_ratio_store ( struct device * dev ,
struct device_attribute * attr , const char * buf , size_t count )
{
struct backing_dev_info * bdi = dev_get_drvdata ( dev ) ;
char * end ;
unsigned int ratio ;
ssize_t ret = - EINVAL ;
ratio = simple_strtoul ( buf , & end , 10 ) ;
if ( * buf & & ( end [ 0 ] = = ' \0 ' | | ( end [ 0 ] = = ' \n ' & & end [ 1 ] = = ' \0 ' ) ) ) {
ret = bdi_set_max_ratio ( bdi , ratio ) ;
if ( ! ret )
ret = count ;
}
return ret ;
}
BDI_SHOW ( max_ratio , bdi - > max_ratio )
2008-04-30 11:54:32 +04:00
# define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
static struct device_attribute bdi_dev_attrs [ ] = {
__ATTR_RW ( read_ahead_kb ) ,
2008-04-30 11:54:35 +04:00
__ATTR_RW ( min_ratio ) ,
2008-04-30 11:54:36 +04:00
__ATTR_RW ( max_ratio ) ,
2008-04-30 11:54:32 +04:00
__ATTR_NULL ,
} ;
static __init int bdi_class_init ( void )
{
bdi_class = class_create ( THIS_MODULE , " bdi " ) ;
bdi_class - > dev_attrs = bdi_dev_attrs ;
2008-04-30 11:54:36 +04:00
bdi_debug_init ( ) ;
2008-04-30 11:54:32 +04:00
return 0 ;
}
2008-04-30 11:54:36 +04:00
postcore_initcall ( bdi_class_init ) ;
2008-04-30 11:54:32 +04:00
2009-03-17 11:35:06 +03:00
static int __init default_bdi_init ( void )
{
int err ;
err = bdi_init ( & default_backing_dev_info ) ;
if ( ! err )
bdi_register ( & default_backing_dev_info , NULL , " default " ) ;
return err ;
}
subsys_initcall ( default_bdi_init ) ;
2008-04-30 11:54:32 +04:00
int bdi_register ( struct backing_dev_info * bdi , struct device * parent ,
const char * fmt , . . . )
{
va_list args ;
int ret = 0 ;
struct device * dev ;
2008-12-10 00:14:06 +03:00
if ( bdi - > dev ) /* The driver needs to use separate queues per device */
2008-12-02 21:31:50 +03:00
goto exit ;
2008-04-30 11:54:32 +04:00
va_start ( args , fmt ) ;
2008-05-16 00:44:08 +04:00
dev = device_create_vargs ( bdi_class , parent , MKDEV ( 0 , 0 ) , bdi , fmt , args ) ;
2008-04-30 11:54:32 +04:00
va_end ( args ) ;
if ( IS_ERR ( dev ) ) {
ret = PTR_ERR ( dev ) ;
goto exit ;
}
bdi - > dev = dev ;
2008-05-16 00:44:08 +04:00
bdi_debug_register ( bdi , dev_name ( dev ) ) ;
2008-04-30 11:54:32 +04:00
exit :
return ret ;
}
EXPORT_SYMBOL ( bdi_register ) ;
int bdi_register_dev ( struct backing_dev_info * bdi , dev_t dev )
{
return bdi_register ( bdi , NULL , " %u:%u " , MAJOR ( dev ) , MINOR ( dev ) ) ;
}
EXPORT_SYMBOL ( bdi_register_dev ) ;
void bdi_unregister ( struct backing_dev_info * bdi )
{
if ( bdi - > dev ) {
2008-04-30 11:54:36 +04:00
bdi_debug_unregister ( bdi ) ;
2008-04-30 11:54:32 +04:00
device_unregister ( bdi - > dev ) ;
bdi - > dev = NULL ;
}
}
EXPORT_SYMBOL ( bdi_unregister ) ;
2006-10-20 10:28:16 +04:00
2007-10-17 10:25:47 +04:00
int bdi_init ( struct backing_dev_info * bdi )
{
2007-12-05 10:45:07 +03:00
int i ;
2007-10-17 10:25:47 +04:00
int err ;
2008-04-30 11:54:32 +04:00
bdi - > dev = NULL ;
2008-04-30 11:54:35 +04:00
bdi - > min_ratio = 0 ;
2008-04-30 11:54:36 +04:00
bdi - > max_ratio = 100 ;
bdi - > max_prop_frac = PROP_FRAC_BASE ;
2008-04-30 11:54:35 +04:00
2007-10-17 10:25:47 +04:00
for ( i = 0 ; i < NR_BDI_STAT_ITEMS ; i + + ) {
2008-12-26 17:08:55 +03:00
err = percpu_counter_init ( & bdi - > bdi_stat [ i ] , 0 ) ;
2007-10-17 10:25:50 +04:00
if ( err )
goto err ;
}
bdi - > dirty_exceeded = 0 ;
err = prop_local_init_percpu ( & bdi - > completions ) ;
if ( err ) {
err :
2007-12-05 10:45:07 +03:00
while ( i - - )
2007-10-17 10:25:50 +04:00
percpu_counter_destroy ( & bdi - > bdi_stat [ i ] ) ;
2007-10-17 10:25:47 +04:00
}
return err ;
}
EXPORT_SYMBOL ( bdi_init ) ;
void bdi_destroy ( struct backing_dev_info * bdi )
{
int i ;
2008-04-30 11:54:32 +04:00
bdi_unregister ( bdi ) ;
2007-10-17 10:25:47 +04:00
for ( i = 0 ; i < NR_BDI_STAT_ITEMS ; i + + )
percpu_counter_destroy ( & bdi - > bdi_stat [ i ] ) ;
2007-10-17 10:25:50 +04:00
prop_local_destroy_percpu ( & bdi - > completions ) ;
2007-10-17 10:25:47 +04:00
}
EXPORT_SYMBOL ( bdi_destroy ) ;
2006-10-20 10:28:16 +04:00
static wait_queue_head_t congestion_wqh [ 2 ] = {
__WAIT_QUEUE_HEAD_INITIALIZER ( congestion_wqh [ 0 ] ) ,
__WAIT_QUEUE_HEAD_INITIALIZER ( congestion_wqh [ 1 ] )
} ;
2009-04-06 16:48:01 +04:00
void clear_bdi_congested ( struct backing_dev_info * bdi , int sync )
2006-10-20 10:28:16 +04:00
{
enum bdi_state bit ;
2009-04-06 16:48:01 +04:00
wait_queue_head_t * wqh = & congestion_wqh [ sync ] ;
2006-10-20 10:28:16 +04:00
2009-04-06 16:48:01 +04:00
bit = sync ? BDI_sync_congested : BDI_async_congested ;
2006-10-20 10:28:16 +04:00
clear_bit ( bit , & bdi - > state ) ;
smp_mb__after_clear_bit ( ) ;
if ( waitqueue_active ( wqh ) )
wake_up ( wqh ) ;
}
EXPORT_SYMBOL ( clear_bdi_congested ) ;
2009-04-06 16:48:01 +04:00
void set_bdi_congested ( struct backing_dev_info * bdi , int sync )
2006-10-20 10:28:16 +04:00
{
enum bdi_state bit ;
2009-04-06 16:48:01 +04:00
bit = sync ? BDI_sync_congested : BDI_async_congested ;
2006-10-20 10:28:16 +04:00
set_bit ( bit , & bdi - > state ) ;
}
EXPORT_SYMBOL ( set_bdi_congested ) ;
/**
* congestion_wait - wait for a backing_dev to become uncongested
* @ rw : READ or WRITE
* @ timeout : timeout in jiffies
*
* Waits for up to @ timeout jiffies for a backing_dev ( any backing_dev ) to exit
* write congestion . If no backing_devs are congested then just wait for the
* next write to be completed .
*/
long congestion_wait ( int rw , long timeout )
{
long ret ;
DEFINE_WAIT ( wait ) ;
wait_queue_head_t * wqh = & congestion_wqh [ rw ] ;
prepare_to_wait ( wqh , & wait , TASK_UNINTERRUPTIBLE ) ;
ret = io_schedule_timeout ( timeout ) ;
finish_wait ( wqh , & wait ) ;
return ret ;
}
EXPORT_SYMBOL ( congestion_wait ) ;
2007-10-17 10:25:50 +04:00