2009-06-10 15:20:19 +02:00
/**************************************************************************
*
* Copyright ( c ) 2006 - 2009 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Authors : Thomas Hellstrom < thellstrom - at - vmware - dot - com >
*/
2012-03-16 21:43:50 -07:00
# define pr_fmt(fmt) "[TTM] " fmt
2009-06-10 15:20:19 +02:00
# include <ttm/ttm_module.h>
# include <ttm/ttm_bo_driver.h>
# include <ttm/ttm_placement.h>
2013-07-24 21:08:53 +02:00
# include <drm/drm_vma_manager.h>
2009-06-10 15:20:19 +02:00
# include <linux/mm.h>
2016-01-15 16:56:40 -08:00
# include <linux/pfn_t.h>
2009-06-10 15:20:19 +02:00
# include <linux/rbtree.h>
# include <linux/module.h>
# include <linux/uaccess.h>
# define TTM_BO_VM_NUM_PREFAULT 16
2013-10-09 03:18:07 -07:00
static int ttm_bo_vm_fault_idle ( struct ttm_buffer_object * bo ,
struct vm_area_struct * vma ,
struct vm_fault * vmf )
{
int ret = 0 ;
if ( likely ( ! test_bit ( TTM_BO_PRIV_FLAG_MOVING , & bo - > priv_flags ) ) )
goto out_unlock ;
/*
* Quick non - stalling check for idle .
*/
2016-04-06 11:12:04 +02:00
ret = ttm_bo_wait ( bo , false , true ) ;
2013-10-09 03:18:07 -07:00
if ( likely ( ret = = 0 ) )
goto out_unlock ;
/*
* If possible , avoid waiting for GPU with mmap_sem
* held .
*/
if ( vmf - > flags & FAULT_FLAG_ALLOW_RETRY ) {
ret = VM_FAULT_RETRY ;
if ( vmf - > flags & FAULT_FLAG_RETRY_NOWAIT )
goto out_unlock ;
up_read ( & vma - > vm_mm - > mmap_sem ) ;
2016-04-06 11:12:04 +02:00
( void ) ttm_bo_wait ( bo , true , false ) ;
2013-10-09 03:18:07 -07:00
goto out_unlock ;
}
/*
* Ordinary wait .
*/
2016-04-06 11:12:04 +02:00
ret = ttm_bo_wait ( bo , true , false ) ;
2013-10-09 03:18:07 -07:00
if ( unlikely ( ret ! = 0 ) )
ret = ( ret ! = - ERESTARTSYS ) ? VM_FAULT_SIGBUS :
VM_FAULT_NOPAGE ;
out_unlock :
return ret ;
}
2009-06-10 15:20:19 +02:00
static int ttm_bo_vm_fault ( struct vm_area_struct * vma , struct vm_fault * vmf )
{
struct ttm_buffer_object * bo = ( struct ttm_buffer_object * )
vma - > vm_private_data ;
struct ttm_bo_device * bdev = bo - > bdev ;
unsigned long page_offset ;
unsigned long page_last ;
unsigned long pfn ;
struct ttm_tt * ttm = NULL ;
struct page * page ;
int ret ;
int i ;
unsigned long address = ( unsigned long ) vmf - > virtual_address ;
int retval = VM_FAULT_NOPAGE ;
2010-11-11 09:41:57 +01:00
struct ttm_mem_type_manager * man =
& bdev - > man [ bo - > mem . mem_type ] ;
2013-11-06 09:32:59 -08:00
struct vm_area_struct cvma ;
2009-06-10 15:20:19 +02:00
/*
* Work around locking order reversal in fault / nopfn
* between mmap_sem and bo_reserve : Perform a trylock operation
2013-11-14 10:49:05 -08:00
* for reserve , and if it fails , retry the fault after waiting
* for the buffer to become unreserved .
2009-06-10 15:20:19 +02:00
*/
2016-04-06 11:12:03 +02:00
ret = ttm_bo_reserve ( bo , true , true , NULL ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ret ! = 0 ) ) {
2013-11-14 10:49:05 -08:00
if ( ret ! = - EBUSY )
return VM_FAULT_NOPAGE ;
if ( vmf - > flags & FAULT_FLAG_ALLOW_RETRY ) {
if ( ! ( vmf - > flags & FAULT_FLAG_RETRY_NOWAIT ) ) {
up_read ( & vma - > vm_mm - > mmap_sem ) ;
( void ) ttm_bo_wait_unreserved ( bo ) ;
}
return VM_FAULT_RETRY ;
}
/*
* If we ' d want to change locking order to
* mmap_sem - > bo : : reserve , we ' d use a blocking reserve here
* instead of retrying the fault . . .
*/
2009-06-10 15:20:19 +02:00
return VM_FAULT_NOPAGE ;
}
2014-01-03 11:17:18 +01:00
/*
* Refuse to fault imported pages . This should be handled
* ( if at all ) by redirecting mmap to the exporter .
*/
if ( bo - > ttm & & ( bo - > ttm - > page_flags & TTM_PAGE_FLAG_SG ) ) {
retval = VM_FAULT_SIGBUS ;
goto out_unlock ;
}
2010-04-09 14:39:23 +02:00
if ( bdev - > driver - > fault_reserve_notify ) {
ret = bdev - > driver - > fault_reserve_notify ( bo ) ;
switch ( ret ) {
case 0 :
break ;
case - EBUSY :
case - ERESTARTSYS :
retval = VM_FAULT_NOPAGE ;
goto out_unlock ;
default :
retval = VM_FAULT_SIGBUS ;
goto out_unlock ;
}
}
2009-06-24 09:48:08 +10:00
2009-06-10 15:20:19 +02:00
/*
* Wait for buffer data in transit , due to a pipelined
* move .
*/
2013-10-09 03:18:07 -07:00
ret = ttm_bo_vm_fault_idle ( bo , vma , vmf ) ;
if ( unlikely ( ret ! = 0 ) ) {
retval = ret ;
goto out_unlock ;
}
2009-06-10 15:20:19 +02:00
2010-11-11 09:41:57 +01:00
ret = ttm_mem_io_lock ( man , true ) ;
if ( unlikely ( ret ! = 0 ) ) {
retval = VM_FAULT_NOPAGE ;
2009-06-10 15:20:19 +02:00
goto out_unlock ;
}
2010-11-11 09:41:57 +01:00
ret = ttm_mem_io_reserve_vm ( bo ) ;
if ( unlikely ( ret ! = 0 ) ) {
retval = VM_FAULT_SIGBUS ;
goto out_io_unlock ;
}
2009-06-10 15:20:19 +02:00
page_offset = ( ( address - vma - > vm_start ) > > PAGE_SHIFT ) +
2013-12-08 23:23:57 -08:00
vma - > vm_pgoff - drm_vma_node_start ( & bo - > vma_node ) ;
page_last = vma_pages ( vma ) + vma - > vm_pgoff -
drm_vma_node_start ( & bo - > vma_node ) ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( page_offset > = bo - > num_pages ) ) {
retval = VM_FAULT_SIGBUS ;
2010-11-11 09:41:57 +01:00
goto out_io_unlock ;
2009-06-10 15:20:19 +02:00
}
/*
2013-11-06 09:32:59 -08:00
* Make a local vma copy to modify the page_prot member
* and vm_flags if necessary . The vma parameter is protected
* by mmap_sem in write mode .
2009-06-10 15:20:19 +02:00
*/
2013-11-06 09:32:59 -08:00
cvma = * vma ;
cvma . vm_page_prot = vm_get_page_prot ( cvma . vm_flags ) ;
2010-04-09 14:39:23 +02:00
if ( bo - > mem . bus . is_iomem ) {
2013-11-06 09:32:59 -08:00
cvma . vm_page_prot = ttm_io_prot ( bo - > mem . placement ,
cvma . vm_page_prot ) ;
2009-06-10 15:20:19 +02:00
} else {
ttm = bo - > ttm ;
2014-09-04 17:47:23 +10:00
cvma . vm_page_prot = ttm_io_prot ( bo - > mem . placement ,
cvma . vm_page_prot ) ;
2011-11-02 23:59:28 -04:00
/* Allocate all page at once, most common usage */
if ( ttm - > bdev - > driver - > ttm_tt_populate ( ttm ) ) {
retval = VM_FAULT_OOM ;
goto out_io_unlock ;
}
2009-06-10 15:20:19 +02:00
}
/*
* Speculatively prefault a number of pages . Only error on
* first page .
*/
for ( i = 0 ; i < TTM_BO_VM_NUM_PREFAULT ; + + i ) {
2010-04-09 14:39:23 +02:00
if ( bo - > mem . bus . is_iomem )
pfn = ( ( bo - > mem . bus . base + bo - > mem . bus . offset ) > > PAGE_SHIFT ) + page_offset ;
2009-06-10 15:20:19 +02:00
else {
2011-11-02 23:59:28 -04:00
page = ttm - > pages [ page_offset ] ;
2009-06-10 15:20:19 +02:00
if ( unlikely ( ! page & & i = = 0 ) ) {
retval = VM_FAULT_OOM ;
2010-11-11 09:41:57 +01:00
goto out_io_unlock ;
2009-06-10 15:20:19 +02:00
} else if ( unlikely ( ! page ) ) {
break ;
}
2014-01-03 11:47:23 +01:00
page - > mapping = vma - > vm_file - > f_mapping ;
page - > index = drm_vma_node_start ( & bo - > vma_node ) +
page_offset ;
2009-06-10 15:20:19 +02:00
pfn = page_to_pfn ( page ) ;
}
2014-01-03 09:21:54 +01:00
if ( vma - > vm_flags & VM_MIXEDMAP )
2016-01-15 16:56:40 -08:00
ret = vm_insert_mixed ( & cvma , address ,
__pfn_to_pfn_t ( pfn , PFN_DEV ) ) ;
2014-01-03 09:21:54 +01:00
else
ret = vm_insert_pfn ( & cvma , address , pfn ) ;
2009-06-10 15:20:19 +02:00
/*
* Somebody beat us to this PTE or prefaulting to
* an already populated PTE , or prefaulting error .
*/
if ( unlikely ( ( ret = = - EBUSY ) | | ( ret ! = 0 & & i > 0 ) ) )
break ;
else if ( unlikely ( ret ! = 0 ) ) {
retval =
( ret = = - ENOMEM ) ? VM_FAULT_OOM : VM_FAULT_SIGBUS ;
2010-11-11 09:41:57 +01:00
goto out_io_unlock ;
2009-06-10 15:20:19 +02:00
}
address + = PAGE_SIZE ;
if ( unlikely ( + + page_offset > = page_last ) )
break ;
}
2010-11-11 09:41:57 +01:00
out_io_unlock :
ttm_mem_io_unlock ( man ) ;
2009-06-10 15:20:19 +02:00
out_unlock :
ttm_bo_unreserve ( bo ) ;
return retval ;
}
static void ttm_bo_vm_open ( struct vm_area_struct * vma )
{
struct ttm_buffer_object * bo =
( struct ttm_buffer_object * ) vma - > vm_private_data ;
2014-01-03 11:47:23 +01:00
WARN_ON ( bo - > bdev - > dev_mapping ! = vma - > vm_file - > f_mapping ) ;
2009-06-10 15:20:19 +02:00
( void ) ttm_bo_reference ( bo ) ;
}
static void ttm_bo_vm_close ( struct vm_area_struct * vma )
{
2010-04-09 14:39:23 +02:00
struct ttm_buffer_object * bo = ( struct ttm_buffer_object * ) vma - > vm_private_data ;
2009-06-10 15:20:19 +02:00
ttm_bo_unref ( & bo ) ;
vma - > vm_private_data = NULL ;
}
2009-09-27 22:29:37 +04:00
static const struct vm_operations_struct ttm_bo_vm_ops = {
2009-06-10 15:20:19 +02:00
. fault = ttm_bo_vm_fault ,
. open = ttm_bo_vm_open ,
. close = ttm_bo_vm_close
} ;
2013-07-24 21:08:53 +02:00
static struct ttm_buffer_object * ttm_bo_vm_lookup ( struct ttm_bo_device * bdev ,
unsigned long offset ,
unsigned long pages )
{
struct drm_vma_offset_node * node ;
struct ttm_buffer_object * bo = NULL ;
drm_vma_offset_lock_lookup ( & bdev - > vma_manager ) ;
node = drm_vma_offset_lookup_locked ( & bdev - > vma_manager , offset , pages ) ;
if ( likely ( node ) ) {
bo = container_of ( node , struct ttm_buffer_object , vma_node ) ;
if ( ! kref_get_unless_zero ( & bo - > kref ) )
bo = NULL ;
}
drm_vma_offset_unlock_lookup ( & bdev - > vma_manager ) ;
if ( ! bo )
pr_err ( " Could not find buffer object to map \n " ) ;
return bo ;
}
2009-06-10 15:20:19 +02:00
int ttm_bo_mmap ( struct file * filp , struct vm_area_struct * vma ,
struct ttm_bo_device * bdev )
{
struct ttm_bo_driver * driver ;
struct ttm_buffer_object * bo ;
int ret ;
2013-07-24 21:08:53 +02:00
bo = ttm_bo_vm_lookup ( bdev , vma - > vm_pgoff , vma_pages ( vma ) ) ;
if ( unlikely ( ! bo ) )
2009-06-10 15:20:19 +02:00
return - EINVAL ;
driver = bo - > bdev - > driver ;
if ( unlikely ( ! driver - > verify_access ) ) {
ret = - EPERM ;
goto out_unref ;
}
ret = driver - > verify_access ( bo , filp ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_unref ;
vma - > vm_ops = & ttm_bo_vm_ops ;
/*
* Note : We ' re transferring the bo reference to
* vma - > vm_private_data here .
*/
vma - > vm_private_data = bo ;
2014-01-03 09:21:54 +01:00
/*
2014-03-12 10:41:32 +01:00
* We ' d like to use VM_PFNMAP on shared mappings , where
* ( vma - > vm_flags & VM_SHARED ) ! = 0 , for performance reasons ,
* but for some reason VM_PFNMAP + x86 PAT + write - combine is very
* bad for performance . Until that has been sorted out , use
* VM_MIXEDMAP on all mappings . See freedesktop . org bug # 75719
2014-01-03 09:21:54 +01:00
*/
2014-03-12 10:41:32 +01:00
vma - > vm_flags | = VM_MIXEDMAP ;
2014-01-03 09:21:54 +01:00
vma - > vm_flags | = VM_IO | VM_DONTEXPAND | VM_DONTDUMP ;
2009-06-10 15:20:19 +02:00
return 0 ;
out_unref :
ttm_bo_unref ( & bo ) ;
return ret ;
}
EXPORT_SYMBOL ( ttm_bo_mmap ) ;
int ttm_fbdev_mmap ( struct vm_area_struct * vma , struct ttm_buffer_object * bo )
{
if ( vma - > vm_pgoff ! = 0 )
return - EACCES ;
vma - > vm_ops = & ttm_bo_vm_ops ;
vma - > vm_private_data = ttm_bo_reference ( bo ) ;
2014-03-12 10:41:32 +01:00
vma - > vm_flags | = VM_MIXEDMAP ;
2014-01-03 09:21:54 +01:00
vma - > vm_flags | = VM_IO | VM_DONTEXPAND ;
2009-06-10 15:20:19 +02:00
return 0 ;
}
EXPORT_SYMBOL ( ttm_fbdev_mmap ) ;