2007-05-08 11:37:37 +04:00
/*
* linux / drivers / video / fb_defio . c
*
* Copyright ( C ) 2006 Jaya Kumar
*
* This file is subject to the terms and conditions of the GNU General Public
2008-03-20 03:01:10 +03:00
* License . See the file COPYING in the main directory of this archive
2007-05-08 11:37:37 +04:00
* for more details .
*/
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/errno.h>
# include <linux/string.h>
# include <linux/mm.h>
# include <linux/vmalloc.h>
# include <linux/delay.h>
# include <linux/interrupt.h>
# include <linux/fb.h>
# include <linux/list.h>
/* to support deferred IO */
# include <linux/rmap.h>
# include <linux/pagemap.h>
2008-12-19 09:34:32 +03:00
struct page * fb_deferred_io_page ( struct fb_info * info , unsigned long offs )
{
void * screen_base = ( void __force * ) info - > screen_base ;
struct page * page ;
if ( is_vmalloc_addr ( screen_base + offs ) )
page = vmalloc_to_page ( screen_base + offs ) ;
else
page = pfn_to_page ( ( info - > fix . smem_start + offs ) > > PAGE_SHIFT ) ;
return page ;
}
2007-05-08 11:37:37 +04:00
/* this is to find and return the vmalloc-ed fb pages */
2008-02-06 12:39:10 +03:00
static int fb_deferred_io_fault ( struct vm_area_struct * vma ,
struct vm_fault * vmf )
2007-05-08 11:37:37 +04:00
{
unsigned long offset ;
struct page * page ;
struct fb_info * info = vma - > vm_private_data ;
2008-02-06 12:39:10 +03:00
offset = vmf - > pgoff < < PAGE_SHIFT ;
2007-05-08 11:37:37 +04:00
if ( offset > = info - > fix . smem_len )
2008-02-06 12:39:10 +03:00
return VM_FAULT_SIGBUS ;
2007-05-08 11:37:37 +04:00
2008-12-19 09:34:32 +03:00
page = fb_deferred_io_page ( info , offset ) ;
2007-05-08 11:37:37 +04:00
if ( ! page )
2008-02-06 12:39:10 +03:00
return VM_FAULT_SIGBUS ;
2007-05-08 11:37:37 +04:00
get_page ( page ) ;
2008-03-20 03:01:10 +03:00
if ( vma - > vm_file )
page - > mapping = vma - > vm_file - > f_mapping ;
else
printk ( KERN_ERR " no mapping available \n " ) ;
BUG_ON ( ! page - > mapping ) ;
page - > index = vmf - > pgoff ;
2008-02-06 12:39:10 +03:00
vmf - > page = page ;
return 0 ;
2007-05-08 11:37:37 +04:00
}
2010-05-26 19:53:25 +04:00
int fb_deferred_io_fsync ( struct file * file , int datasync )
2007-05-08 11:37:41 +04:00
{
struct fb_info * info = file - > private_data ;
2009-10-16 17:20:49 +04:00
/* Skip if deferred io is compiled-in but disabled on this fbdev */
2008-12-19 09:34:09 +03:00
if ( ! info - > fbdefio )
return 0 ;
2007-05-08 11:37:41 +04:00
/* Kill off the delayed work */
2010-12-14 18:21:17 +03:00
cancel_delayed_work_sync ( & info - > deferred_work ) ;
2007-05-08 11:37:41 +04:00
/* Run it immediately */
return schedule_delayed_work ( & info - > deferred_work , 0 ) ;
}
EXPORT_SYMBOL_GPL ( fb_deferred_io_fsync ) ;
2007-05-08 11:37:37 +04:00
/* vm_ops->page_mkwrite handler */
2007-05-08 11:37:38 +04:00
static int fb_deferred_io_mkwrite ( struct vm_area_struct * vma ,
2009-04-01 02:23:21 +04:00
struct vm_fault * vmf )
2007-05-08 11:37:37 +04:00
{
2009-04-01 02:23:21 +04:00
struct page * page = vmf - > page ;
2007-05-08 11:37:37 +04:00
struct fb_info * info = vma - > vm_private_data ;
struct fb_deferred_io * fbdefio = info - > fbdefio ;
2008-07-13 00:47:51 +04:00
struct page * cur ;
2007-05-08 11:37:37 +04:00
/* this is a callback we get when userspace first tries to
write to the page . we schedule a workqueue . that workqueue
will eventually mkclean the touched pages and execute the
deferred framebuffer IO . then if userspace touches a page
again , we repeat the same scheme */
/* protect against the workqueue changing the page list */
mutex_lock ( & fbdefio - > lock ) ;
2008-07-13 00:47:51 +04:00
2010-06-05 01:14:57 +04:00
/*
* We want the page to remain locked from - > page_mkwrite until
* the PTE is marked dirty to avoid page_mkclean ( ) being called
* before the PTE is updated , which would leave the page ignored
* by defio .
* Do this by locking the page here and informing the caller
* about it with VM_FAULT_LOCKED .
*/
lock_page ( page ) ;
2008-07-13 00:47:51 +04:00
/* we loop through the pagelist before adding in order
to keep the pagelist sorted */
list_for_each_entry ( cur , & fbdefio - > pagelist , lru ) {
/* this check is to catch the case where a new
process could start writing to the same page
through a new pte . this new access can cause the
mkwrite even when the original ps ' s pte is marked
writable */
if ( unlikely ( cur = = page ) )
goto page_already_added ;
else if ( cur - > index > page - > index )
break ;
}
list_add_tail ( & page - > lru , & cur - > lru ) ;
page_already_added :
2007-05-08 11:37:37 +04:00
mutex_unlock ( & fbdefio - > lock ) ;
/* come back after delay to process the deferred IO */
schedule_delayed_work ( & info - > deferred_work , fbdefio - > delay ) ;
2010-06-05 01:14:57 +04:00
return VM_FAULT_LOCKED ;
2007-05-08 11:37:37 +04:00
}
2009-09-27 22:29:37 +04:00
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
2008-02-06 12:39:10 +03:00
. fault = fb_deferred_io_fault ,
2007-05-08 11:37:37 +04:00
. page_mkwrite = fb_deferred_io_mkwrite ,
} ;
2008-08-21 01:09:23 +04:00
static int fb_deferred_io_set_page_dirty ( struct page * page )
{
if ( ! PageDirty ( page ) )
SetPageDirty ( page ) ;
return 0 ;
}
static const struct address_space_operations fb_deferred_io_aops = {
. set_page_dirty = fb_deferred_io_set_page_dirty ,
} ;
2007-05-08 11:37:37 +04:00
static int fb_deferred_io_mmap ( struct fb_info * info , struct vm_area_struct * vma )
{
vma - > vm_ops = & fb_deferred_io_vm_ops ;
2009-12-03 18:31:56 +03:00
vma - > vm_flags | = ( VM_RESERVED | VM_DONTEXPAND ) ;
if ( ! ( info - > flags & FBINFO_VIRTFB ) )
vma - > vm_flags | = VM_IO ;
2007-05-08 11:37:37 +04:00
vma - > vm_private_data = info ;
return 0 ;
}
/* workqueue callback */
static void fb_deferred_io_work ( struct work_struct * work )
{
struct fb_info * info = container_of ( work , struct fb_info ,
deferred_work . work ) ;
2010-06-05 01:14:56 +04:00
struct list_head * node , * next ;
struct page * cur ;
2007-05-08 11:37:37 +04:00
struct fb_deferred_io * fbdefio = info - > fbdefio ;
/* here we mkclean the pages, then do all deferred IO */
mutex_lock ( & fbdefio - > lock ) ;
2010-06-05 01:14:56 +04:00
list_for_each_entry ( cur , & fbdefio - > pagelist , lru ) {
lock_page ( cur ) ;
page_mkclean ( cur ) ;
unlock_page ( cur ) ;
2007-05-08 11:37:37 +04:00
}
/* driver's callback with pagelist */
fbdefio - > deferred_io ( info , & fbdefio - > pagelist ) ;
2010-06-05 01:14:56 +04:00
/* clear the list */
list_for_each_safe ( node , next , & fbdefio - > pagelist ) {
2007-05-08 11:37:37 +04:00
list_del ( node ) ;
}
mutex_unlock ( & fbdefio - > lock ) ;
}
void fb_deferred_io_init ( struct fb_info * info )
{
struct fb_deferred_io * fbdefio = info - > fbdefio ;
BUG_ON ( ! fbdefio ) ;
mutex_init ( & fbdefio - > lock ) ;
info - > fbops - > fb_mmap = fb_deferred_io_mmap ;
INIT_DELAYED_WORK ( & info - > deferred_work , fb_deferred_io_work ) ;
INIT_LIST_HEAD ( & fbdefio - > pagelist ) ;
if ( fbdefio - > delay = = 0 ) /* set a default of 1 s */
fbdefio - > delay = HZ ;
}
EXPORT_SYMBOL_GPL ( fb_deferred_io_init ) ;
2008-08-21 01:09:23 +04:00
void fb_deferred_io_open ( struct fb_info * info ,
struct inode * inode ,
struct file * file )
{
file - > f_mapping - > a_ops = & fb_deferred_io_aops ;
}
EXPORT_SYMBOL_GPL ( fb_deferred_io_open ) ;
2007-05-08 11:37:37 +04:00
void fb_deferred_io_cleanup ( struct fb_info * info )
{
struct fb_deferred_io * fbdefio = info - > fbdefio ;
2008-03-20 03:01:10 +03:00
struct page * page ;
int i ;
2007-05-08 11:37:37 +04:00
BUG_ON ( ! fbdefio ) ;
cancel_delayed_work ( & info - > deferred_work ) ;
flush_scheduled_work ( ) ;
2008-03-20 03:01:10 +03:00
/* clear out the mapping that we setup */
for ( i = 0 ; i < info - > fix . smem_len ; i + = PAGE_SIZE ) {
2008-12-19 09:34:32 +03:00
page = fb_deferred_io_page ( info , i ) ;
2008-03-20 03:01:10 +03:00
page - > mapping = NULL ;
}
2008-12-19 09:34:23 +03:00
info - > fbops - > fb_mmap = NULL ;
mutex_destroy ( & fbdefio - > lock ) ;
2007-05-08 11:37:37 +04:00
}
EXPORT_SYMBOL_GPL ( fb_deferred_io_cleanup ) ;
MODULE_LICENSE ( " GPL " ) ;