2008-07-30 12:06:12 -07:00
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
*
*/
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/uaccess.h>
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/module.h>
# include <linux/mman.h>
# include <linux/pagemap.h>
2011-06-27 16:18:18 -07:00
# include <linux/shmem_fs.h>
2011-11-25 15:21:02 +00:00
# include <linux/dma-buf.h>
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
2013-07-24 21:07:52 +02:00
# include <drm/drm_vma_manager.h>
2008-07-30 12:06:12 -07:00
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver .
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies , implementing that is left up to
* the driver , and all that the general API provides should be generic - -
* allocating objects , reading / writing data with the cpu , freeing objects .
* Even there , platform - dependent optimizations for reading / writing data with
* the CPU mean we ' ll likely hook those out to driver - specific calls . However ,
* the DRI2 implementation wants to have at least allocate / mmap be generic .
*
* The goal was to have swap - backed object allocation managed through
* struct file . However , file descriptors as handles to a struct file have
* two major failings :
* - Process limits prevent more than 1024 or so being used at a time by
* default .
* - Inability to allocate high fds will aggravate the X Server ' s select ( )
* handling , and likely that of many GL client applications as well .
*
* This led to a plan of using our own integer IDs ( called handles , following
* DRM terminology ) to mimic fds , and implement the fd syscalls we need as
* ioctls . The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date , and as our interface with shmfs for memory allocation .
*/
2008-11-05 10:31:53 -08:00
/*
* We make up offsets for buffer objects so we can recognize them at
* mmap time .
*/
2010-05-27 13:40:27 -06:00
/* pgoff in mmap is an unsigned long, so we need to make sure that
* the faked up offset will fit
*/
# if BITS_PER_LONG == 64
2008-11-05 10:31:53 -08:00
# define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
# define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
2010-05-27 13:40:27 -06:00
# else
# define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
# define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
# endif
2008-11-05 10:31:53 -08:00
2008-07-30 12:06:12 -07:00
/**
2014-01-21 12:39:00 +01:00
* drm_gem_init - Initialize the GEM device fields
* @ dev : drm_devic structure to initialize
2008-07-30 12:06:12 -07:00
*/
int
drm_gem_init ( struct drm_device * dev )
{
2013-12-11 14:24:46 +01:00
struct drm_vma_offset_manager * vma_offset_manager ;
2008-11-05 10:31:53 -08:00
2013-08-15 00:02:44 +02:00
mutex_init ( & dev - > object_name_lock ) ;
2008-07-30 12:06:12 -07:00
idr_init ( & dev - > object_name_idr ) ;
2008-11-05 10:31:53 -08:00
2013-12-11 14:24:46 +01:00
vma_offset_manager = kzalloc ( sizeof ( * vma_offset_manager ) , GFP_KERNEL ) ;
if ( ! vma_offset_manager ) {
2008-11-05 10:31:53 -08:00
DRM_ERROR ( " out of memory \n " ) ;
return - ENOMEM ;
}
2013-12-11 14:24:46 +01:00
dev - > vma_offset_manager = vma_offset_manager ;
drm_vma_offset_manager_init ( vma_offset_manager ,
2013-07-24 21:07:52 +02:00
DRM_FILE_PAGE_OFFSET_START ,
DRM_FILE_PAGE_OFFSET_SIZE ) ;
2008-11-05 10:31:53 -08:00
2008-07-30 12:06:12 -07:00
return 0 ;
}
2008-11-05 10:31:53 -08:00
void
drm_gem_destroy ( struct drm_device * dev )
{
2013-12-11 14:24:46 +01:00
drm_vma_offset_manager_destroy ( dev - > vma_offset_manager ) ;
kfree ( dev - > vma_offset_manager ) ;
dev - > vma_offset_manager = NULL ;
2008-11-05 10:31:53 -08:00
}
2010-04-09 19:05:04 +00:00
/**
2014-01-21 12:39:00 +01:00
* drm_gem_object_init - initialize an allocated shmem - backed GEM object
* @ dev : drm_device the object should be initialized for
* @ obj : drm_gem_object to initialize
* @ size : object size
*
2011-06-07 14:17:51 +01:00
* Initialize an already allocated GEM object of the specified size with
2010-04-09 19:05:04 +00:00
* shmfs backing store .
*/
int drm_gem_object_init ( struct drm_device * dev ,
struct drm_gem_object * obj , size_t size )
{
2013-07-11 11:56:32 +02:00
struct file * filp ;
2010-04-09 19:05:04 +00:00
2014-01-20 08:21:54 +01:00
drm_gem_private_object_init ( dev , obj , size ) ;
2013-07-11 11:56:32 +02:00
filp = shmem_file_setup ( " drm mm object " , size , VM_NORESERVE ) ;
if ( IS_ERR ( filp ) )
return PTR_ERR ( filp ) ;
2010-04-09 19:05:04 +00:00
2013-07-11 11:56:32 +02:00
obj - > filp = filp ;
2010-04-09 19:05:04 +00:00
return 0 ;
}
EXPORT_SYMBOL ( drm_gem_object_init ) ;
2011-06-07 14:17:51 +01:00
/**
2014-01-21 12:39:00 +01:00
* drm_gem_object_init - initialize an allocated private GEM object
* @ dev : drm_device the object should be initialized for
* @ obj : drm_gem_object to initialize
* @ size : object size
*
2011-06-07 14:17:51 +01:00
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store . Instead the caller is responsible for
* backing the object and handling it .
*/
2013-07-11 11:56:32 +02:00
void drm_gem_private_object_init ( struct drm_device * dev ,
struct drm_gem_object * obj , size_t size )
2011-06-07 14:17:51 +01:00
{
BUG_ON ( ( size & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
obj - > dev = dev ;
obj - > filp = NULL ;
kref_init ( & obj - > refcount ) ;
2013-08-15 00:02:37 +02:00
obj - > handle_count = 0 ;
2011-06-07 14:17:51 +01:00
obj - > size = size ;
2013-08-25 18:28:57 +02:00
drm_vma_node_reset ( & obj - > vma_node ) ;
2011-06-07 14:17:51 +01:00
}
EXPORT_SYMBOL ( drm_gem_private_object_init ) ;
2012-05-20 17:31:16 +01:00
static void
drm_gem_remove_prime_handles ( struct drm_gem_object * obj , struct drm_file * filp )
{
2013-08-15 00:02:46 +02:00
/*
* Note : obj - > dma_buf can ' t disappear as long as we still hold a
* handle reference in obj - > handle_count .
*/
2013-08-15 00:02:49 +02:00
mutex_lock ( & filp - > prime . lock ) ;
2013-08-15 00:02:46 +02:00
if ( obj - > dma_buf ) {
2013-08-15 00:02:49 +02:00
drm_prime_remove_buf_handle_locked ( & filp - > prime ,
obj - > dma_buf ) ;
2012-05-20 17:31:16 +01:00
}
2013-08-15 00:02:49 +02:00
mutex_unlock ( & filp - > prime . lock ) ;
2012-05-20 17:31:16 +01:00
}
2013-08-15 00:02:34 +02:00
/**
2014-01-21 12:39:00 +01:00
* drm_gem_object_free - release resources bound to userspace handles
* @ obj : GEM object to clean up .
*
2013-08-15 00:02:34 +02:00
* Called after the last handle to the object has been closed
*
* Removes any name for the object . Note that this must be
* called before drm_gem_object_free or we ' ll be touching
* freed memory
*/
static void drm_gem_object_handle_free ( struct drm_gem_object * obj )
{
struct drm_device * dev = obj - > dev ;
/* Remove any name for this object */
if ( obj - > name ) {
idr_remove ( & dev - > object_name_idr , obj - > name ) ;
obj - > name = 0 ;
2013-08-15 00:02:37 +02:00
}
2013-08-15 00:02:34 +02:00
}
2013-08-15 00:02:46 +02:00
static void drm_gem_object_exported_dma_buf_free ( struct drm_gem_object * obj )
{
/* Unbreak the reference cycle if we have an exported dma_buf. */
if ( obj - > dma_buf ) {
dma_buf_put ( obj - > dma_buf ) ;
obj - > dma_buf = NULL ;
}
}
2013-08-15 00:02:39 +02:00
static void
2013-08-15 00:02:34 +02:00
drm_gem_object_handle_unreference_unlocked ( struct drm_gem_object * obj )
{
2013-08-15 00:02:37 +02:00
if ( WARN_ON ( obj - > handle_count = = 0 ) )
2013-08-15 00:02:34 +02:00
return ;
/*
* Must bump handle count first as this may be the last
* ref , in which case the object would disappear before we
* checked for a name
*/
2013-08-15 00:02:44 +02:00
mutex_lock ( & obj - > dev - > object_name_lock ) ;
2013-08-15 00:02:46 +02:00
if ( - - obj - > handle_count = = 0 ) {
2013-08-15 00:02:34 +02:00
drm_gem_object_handle_free ( obj ) ;
2013-08-15 00:02:46 +02:00
drm_gem_object_exported_dma_buf_free ( obj ) ;
}
2013-08-15 00:02:44 +02:00
mutex_unlock ( & obj - > dev - > object_name_lock ) ;
2013-08-15 00:02:37 +02:00
2013-08-15 00:02:34 +02:00
drm_gem_object_unreference_unlocked ( obj ) ;
}
2008-07-30 12:06:12 -07:00
/**
2014-01-21 12:39:00 +01:00
* drm_gem_handle_delete - deletes the given file - private handle
* @ filp : drm file - private structure to use for the handle look up
* @ handle : userspace handle to delete
*
* Removes the GEM handle from the @ filp lookup table and if this is the last
* handle also cleans up linked resources like GEM names .
2008-07-30 12:06:12 -07:00
*/
2011-02-07 12:16:14 +10:00
int
2009-08-23 12:40:55 +03:00
drm_gem_handle_delete ( struct drm_file * filp , u32 handle )
2008-07-30 12:06:12 -07:00
{
struct drm_device * dev ;
struct drm_gem_object * obj ;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code . It just spews if you fail at deleting .
* So , we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount , or the user
* could race us to double - decrement the refcount and cause a
* use - after - free later . Given the frequency of our handle lookups ,
* we may want to use ida for number allocation and a hash table
* for the pointers , anyway .
*/
spin_lock ( & filp - > table_lock ) ;
/* Check if we currently have a reference on the object */
obj = idr_find ( & filp - > object_idr , handle ) ;
if ( obj = = NULL ) {
spin_unlock ( & filp - > table_lock ) ;
return - EINVAL ;
}
dev = obj - > dev ;
/* Release reference and decrement refcount. */
idr_remove ( & filp - > object_idr , handle ) ;
spin_unlock ( & filp - > table_lock ) ;
2013-08-28 12:04:14 +02:00
if ( drm_core_check_feature ( dev , DRIVER_PRIME ) )
drm_gem_remove_prime_handles ( obj , filp ) ;
2013-08-25 18:28:58 +02:00
drm_vma_node_revoke ( & obj - > vma_node , filp - > filp ) ;
2011-11-25 15:21:02 +00:00
2011-06-09 00:24:59 +00:00
if ( dev - > driver - > gem_close_object )
dev - > driver - > gem_close_object ( obj , filp ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_handle_unreference_unlocked ( obj ) ;
2008-07-30 12:06:12 -07:00
return 0 ;
}
2011-02-07 12:16:14 +10:00
EXPORT_SYMBOL ( drm_gem_handle_delete ) ;
2008-07-30 12:06:12 -07:00
2013-07-16 09:12:04 +02:00
/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
2014-01-21 12:39:00 +01:00
* @ file : drm file - private structure to remove the dumb handle from
* @ dev : corresponding drm_device
* @ handle : the dumb handle to remove
2013-07-16 09:12:04 +02:00
*
* This implements the - > dumb_destroy kms driver callback for drivers which use
* gem to manage their backing storage .
*/
int drm_gem_dumb_destroy ( struct drm_file * file ,
struct drm_device * dev ,
uint32_t handle )
{
return drm_gem_handle_delete ( file , handle ) ;
}
EXPORT_SYMBOL ( drm_gem_dumb_destroy ) ;
2008-07-30 12:06:12 -07:00
/**
2013-08-15 00:02:45 +02:00
* drm_gem_handle_create_tail - internal functions to create a handle
2014-01-21 12:39:00 +01:00
* @ file_priv : drm file - private structure to register the handle for
* @ obj : object to register
* @ handlep : pionter to return the created handle to the caller
2013-08-15 00:02:45 +02:00
*
* This expects the dev - > object_name_lock to be held already and will drop it
* before returning . Used to avoid races in establishing new handles when
* importing an object from either an flink name or a dma - buf .
2008-07-30 12:06:12 -07:00
*/
int
2013-08-15 00:02:45 +02:00
drm_gem_handle_create_tail ( struct drm_file * file_priv ,
struct drm_gem_object * obj ,
u32 * handlep )
2008-07-30 12:06:12 -07:00
{
2011-06-09 00:24:59 +00:00
struct drm_device * dev = obj - > dev ;
int ret ;
2008-07-30 12:06:12 -07:00
2013-08-15 00:02:45 +02:00
WARN_ON ( ! mutex_is_locked ( & dev - > object_name_lock ) ) ;
2008-07-30 12:06:12 -07:00
/*
2013-02-27 17:04:08 -08:00
* Get the user - visible handle using idr . Preload and perform
* allocation under our spinlock .
2008-07-30 12:06:12 -07:00
*/
2013-02-27 17:04:08 -08:00
idr_preload ( GFP_KERNEL ) ;
2008-07-30 12:06:12 -07:00
spin_lock ( & file_priv - > table_lock ) ;
2013-02-27 17:04:08 -08:00
ret = idr_alloc ( & file_priv - > object_idr , obj , 1 , 0 , GFP_NOWAIT ) ;
2013-08-15 00:02:37 +02:00
drm_gem_object_reference ( obj ) ;
obj - > handle_count + + ;
2008-07-30 12:06:12 -07:00
spin_unlock ( & file_priv - > table_lock ) ;
2013-02-27 17:04:08 -08:00
idr_preload_end ( ) ;
2013-08-15 00:02:44 +02:00
mutex_unlock ( & dev - > object_name_lock ) ;
2013-08-15 00:02:37 +02:00
if ( ret < 0 ) {
drm_gem_object_handle_unreference_unlocked ( obj ) ;
2008-07-30 12:06:12 -07:00
return ret ;
2013-08-15 00:02:37 +02:00
}
2013-02-27 17:04:08 -08:00
* handlep = ret ;
2008-07-30 12:06:12 -07:00
2013-08-25 18:28:58 +02:00
ret = drm_vma_node_allow ( & obj - > vma_node , file_priv - > filp ) ;
if ( ret ) {
drm_gem_handle_delete ( file_priv , * handlep ) ;
return ret ;
}
2011-06-09 00:24:59 +00:00
if ( dev - > driver - > gem_open_object ) {
ret = dev - > driver - > gem_open_object ( obj , file_priv ) ;
if ( ret ) {
drm_gem_handle_delete ( file_priv , * handlep ) ;
return ret ;
}
}
2008-07-30 12:06:12 -07:00
return 0 ;
}
2013-08-15 00:02:45 +02:00
/**
2014-01-21 12:39:00 +01:00
* gem_handle_create - create a gem handle for an object
* @ file_priv : drm file - private structure to register the handle for
* @ obj : object to register
* @ handlep : pionter to return the created handle to the caller
*
2013-08-15 00:02:45 +02:00
* Create a handle for this object . This adds a handle reference
* to the object , which includes a regular reference count . Callers
* will likely want to dereference the object afterwards .
*/
int
drm_gem_handle_create ( struct drm_file * file_priv ,
struct drm_gem_object * obj ,
u32 * handlep )
{
mutex_lock ( & obj - > dev - > object_name_lock ) ;
return drm_gem_handle_create_tail ( file_priv , obj , handlep ) ;
}
2008-07-30 12:06:12 -07:00
EXPORT_SYMBOL ( drm_gem_handle_create ) ;
2011-08-10 08:09:07 -05:00
/**
* drm_gem_free_mmap_offset - release a fake mmap offset for an object
* @ obj : obj in question
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset ( ) .
*/
void
drm_gem_free_mmap_offset ( struct drm_gem_object * obj )
{
struct drm_device * dev = obj - > dev ;
2013-12-11 14:24:46 +01:00
drm_vma_offset_remove ( dev - > vma_offset_manager , & obj - > vma_node ) ;
2011-08-10 08:09:07 -05:00
}
EXPORT_SYMBOL ( drm_gem_free_mmap_offset ) ;
/**
2013-08-07 13:41:23 -04:00
* drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
2011-08-10 08:09:07 -05:00
* @ obj : obj in question
2013-08-07 13:41:23 -04:00
* @ size : the virtual size
2011-08-10 08:09:07 -05:00
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap ( 2 ) call . The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures .
*
2013-08-07 13:41:23 -04:00
* This routine allocates and attaches a fake offset for @ obj , in cases where
* the virtual size differs from the physical size ( ie . obj - > size ) . Otherwise
* just use drm_gem_create_mmap_offset ( ) .
2011-08-10 08:09:07 -05:00
*/
int
2013-08-07 13:41:23 -04:00
drm_gem_create_mmap_offset_size ( struct drm_gem_object * obj , size_t size )
2011-08-10 08:09:07 -05:00
{
struct drm_device * dev = obj - > dev ;
2013-12-11 14:24:46 +01:00
return drm_vma_offset_add ( dev - > vma_offset_manager , & obj - > vma_node ,
2013-08-07 13:41:23 -04:00
size / PAGE_SIZE ) ;
}
EXPORT_SYMBOL ( drm_gem_create_mmap_offset_size ) ;
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* @ obj : obj in question
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap ( 2 ) call . The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures .
*
* This routine allocates and attaches a fake offset for @ obj .
*/
int drm_gem_create_mmap_offset ( struct drm_gem_object * obj )
{
return drm_gem_create_mmap_offset_size ( obj , obj - > size ) ;
2011-08-10 08:09:07 -05:00
}
EXPORT_SYMBOL ( drm_gem_create_mmap_offset ) ;
2013-08-07 13:41:24 -04:00
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @ obj : obj in question
* @ gfpmask : gfp mask of requested pages
*/
struct page * * drm_gem_get_pages ( struct drm_gem_object * obj , gfp_t gfpmask )
{
struct inode * inode ;
struct address_space * mapping ;
struct page * p , * * pages ;
int i , npages ;
/* This is the shared memory object that backs the GEM resource */
inode = file_inode ( obj - > filp ) ;
mapping = inode - > i_mapping ;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init ( ) , so we should never hit this unless
* driver author is doing something really wrong :
*/
WARN_ON ( ( obj - > size & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
npages = obj - > size > > PAGE_SHIFT ;
pages = drm_malloc_ab ( npages , sizeof ( struct page * ) ) ;
if ( pages = = NULL )
return ERR_PTR ( - ENOMEM ) ;
gfpmask | = mapping_gfp_mask ( mapping ) ;
for ( i = 0 ; i < npages ; i + + ) {
p = shmem_read_mapping_page_gfp ( mapping , i , gfpmask ) ;
if ( IS_ERR ( p ) )
goto fail ;
pages [ i ] = p ;
2014-05-25 14:34:08 +02:00
/* Make sure shmem keeps __GFP_DMA32 allocated pages in the
* correct region during swapin . Note that this requires
* __GFP_DMA32 to be set in mapping_gfp_mask ( inode - > i_mapping )
* so shmem can relocate pages during swapin if required .
2013-08-07 13:41:24 -04:00
*/
BUG_ON ( ( gfpmask & __GFP_DMA32 ) & &
( page_to_pfn ( p ) > = 0x00100000UL ) ) ;
}
return pages ;
fail :
while ( i - - )
page_cache_release ( pages [ i ] ) ;
drm_free_large ( pages ) ;
return ERR_CAST ( p ) ;
}
EXPORT_SYMBOL ( drm_gem_get_pages ) ;
/**
* drm_gem_put_pages - helper to free backing pages for a GEM object
* @ obj : obj in question
* @ pages : pages to free
* @ dirty : if true , pages will be marked as dirty
* @ accessed : if true , the pages will be marked as accessed
*/
void drm_gem_put_pages ( struct drm_gem_object * obj , struct page * * pages ,
bool dirty , bool accessed )
{
int i , npages ;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init ( ) , so we should never hit this unless
* driver author is doing something really wrong :
*/
WARN_ON ( ( obj - > size & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
npages = obj - > size > > PAGE_SHIFT ;
for ( i = 0 ; i < npages ; i + + ) {
if ( dirty )
set_page_dirty ( pages [ i ] ) ;
if ( accessed )
mark_page_accessed ( pages [ i ] ) ;
/* Undo the reference we took when populating the table */
page_cache_release ( pages [ i ] ) ;
}
drm_free_large ( pages ) ;
}
EXPORT_SYMBOL ( drm_gem_put_pages ) ;
2008-07-30 12:06:12 -07:00
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup ( struct drm_device * dev , struct drm_file * filp ,
2009-08-23 12:40:55 +03:00
u32 handle )
2008-07-30 12:06:12 -07:00
{
struct drm_gem_object * obj ;
spin_lock ( & filp - > table_lock ) ;
/* Check if we currently have a reference on the object */
obj = idr_find ( & filp - > object_idr , handle ) ;
if ( obj = = NULL ) {
spin_unlock ( & filp - > table_lock ) ;
return NULL ;
}
drm_gem_object_reference ( obj ) ;
spin_unlock ( & filp - > table_lock ) ;
return obj ;
}
EXPORT_SYMBOL ( drm_gem_object_lookup ) ;
/**
2014-01-21 12:39:00 +01:00
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
* @ dev : drm_device
* @ data : ioctl data
* @ file_priv : drm file - private structure
*
2008-07-30 12:06:12 -07:00
* Releases the handle to an mm object .
*/
int
drm_gem_close_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_gem_close * args = data ;
int ret ;
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) )
return - ENODEV ;
ret = drm_gem_handle_delete ( file_priv , args - > handle ) ;
return ret ;
}
/**
2014-01-21 12:39:00 +01:00
* drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
* @ dev : drm_device
* @ data : ioctl data
* @ file_priv : drm file - private structure
*
2008-07-30 12:06:12 -07:00
* Create a global name for an object , returning the name .
*
* Note that the name does not hold a reference ; when the object
* is freed , the name goes away .
*/
int
drm_gem_flink_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_gem_flink * args = data ;
struct drm_gem_object * obj ;
int ret ;
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) )
return - ENODEV ;
obj = drm_gem_object_lookup ( dev , file_priv , args - > handle ) ;
if ( obj = = NULL )
2010-08-04 14:19:46 +01:00
return - ENOENT ;
2008-07-30 12:06:12 -07:00
2013-08-15 00:02:44 +02:00
mutex_lock ( & dev - > object_name_lock ) ;
2013-02-27 17:04:08 -08:00
idr_preload ( GFP_KERNEL ) ;
2013-08-15 00:02:37 +02:00
/* prevent races with concurrent gem_close. */
if ( obj - > handle_count = = 0 ) {
ret = - ENOENT ;
goto err ;
}
2009-02-11 14:26:28 +00:00
if ( ! obj - > name ) {
2013-02-27 17:04:08 -08:00
ret = idr_alloc ( & dev - > object_name_idr , obj , 1 , 0 , GFP_NOWAIT ) ;
if ( ret < 0 )
2009-02-11 14:26:28 +00:00
goto err ;
2013-06-27 08:58:33 +09:00
obj - > name = ret ;
2009-02-11 14:26:28 +00:00
}
2009-02-09 11:31:41 +00:00
2013-06-27 08:58:33 +09:00
args - > name = ( uint64_t ) obj - > name ;
ret = 0 ;
2009-02-09 11:31:41 +00:00
err :
2013-06-27 08:58:33 +09:00
idr_preload_end ( ) ;
2013-08-15 00:02:44 +02:00
mutex_unlock ( & dev - > object_name_lock ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_unreference_unlocked ( obj ) ;
2009-02-09 11:31:41 +00:00
return ret ;
2008-07-30 12:06:12 -07:00
}
/**
2014-01-21 12:39:00 +01:00
* drm_gem_open - implementation of the GEM_OPEN ioctl
* @ dev : drm_device
* @ data : ioctl data
* @ file_priv : drm file - private structure
*
2008-07-30 12:06:12 -07:00
* Open an object using the global name , returning a handle and the size .
*
* This handle ( of course ) holds a reference to the object , so the object
* will not go away until the handle is deleted .
*/
int
drm_gem_open_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_gem_open * args = data ;
struct drm_gem_object * obj ;
int ret ;
2009-08-23 12:40:55 +03:00
u32 handle ;
2008-07-30 12:06:12 -07:00
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) )
return - ENODEV ;
2013-08-15 00:02:44 +02:00
mutex_lock ( & dev - > object_name_lock ) ;
2008-07-30 12:06:12 -07:00
obj = idr_find ( & dev - > object_name_idr , ( int ) args - > name ) ;
2013-08-15 00:02:45 +02:00
if ( obj ) {
2008-07-30 12:06:12 -07:00
drm_gem_object_reference ( obj ) ;
2013-08-15 00:02:45 +02:00
} else {
mutex_unlock ( & dev - > object_name_lock ) ;
2008-07-30 12:06:12 -07:00
return - ENOENT ;
2013-08-15 00:02:45 +02:00
}
2008-07-30 12:06:12 -07:00
2013-08-15 00:02:45 +02:00
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail ( file_priv , obj , & handle ) ;
2010-02-09 05:49:12 +00:00
drm_gem_object_unreference_unlocked ( obj ) ;
2008-07-30 12:06:12 -07:00
if ( ret )
return ret ;
args - > handle = handle ;
args - > size = obj - > size ;
return 0 ;
}
/**
2014-01-21 12:39:00 +01:00
* gem_gem_open - initalizes GEM file - private structures at devnode open time
* @ dev : drm_device which is being opened by userspace
* @ file_private : drm file - private structure to set up
*
2008-07-30 12:06:12 -07:00
* Called at device open time , sets up the structure for handling refcounting
* of mm objects .
*/
void
drm_gem_open ( struct drm_device * dev , struct drm_file * file_private )
{
idr_init ( & file_private - > object_idr ) ;
spin_lock_init ( & file_private - > table_lock ) ;
}
2014-01-21 12:39:00 +01:00
/*
2008-07-30 12:06:12 -07:00
* Called at device close to release the file ' s
* handle references on objects .
*/
static int
drm_gem_object_release_handle ( int id , void * ptr , void * data )
{
2011-06-09 00:24:59 +00:00
struct drm_file * file_priv = data ;
2008-07-30 12:06:12 -07:00
struct drm_gem_object * obj = ptr ;
2011-06-09 00:24:59 +00:00
struct drm_device * dev = obj - > dev ;
2013-08-28 12:04:14 +02:00
if ( drm_core_check_feature ( dev , DRIVER_PRIME ) )
drm_gem_remove_prime_handles ( obj , file_priv ) ;
2013-08-25 18:28:58 +02:00
drm_vma_node_revoke ( & obj - > vma_node , file_priv - > filp ) ;
2011-11-25 15:21:02 +00:00
2011-06-09 00:24:59 +00:00
if ( dev - > driver - > gem_close_object )
dev - > driver - > gem_close_object ( obj , file_priv ) ;
2008-07-30 12:06:12 -07:00
2010-02-09 05:49:12 +00:00
drm_gem_object_handle_unreference_unlocked ( obj ) ;
2008-07-30 12:06:12 -07:00
return 0 ;
}
/**
2014-01-21 12:39:00 +01:00
* drm_gem_release - release file - private GEM resources
* @ dev : drm_device which is being closed by userspace
* @ file_private : drm file - private structure to clean up
*
2008-07-30 12:06:12 -07:00
* Called at close time when the filp is going away .
*
* Releases any remaining references on objects by this filp .
*/
void
drm_gem_release ( struct drm_device * dev , struct drm_file * file_private )
{
idr_for_each ( & file_private - > object_idr ,
2011-06-09 00:24:59 +00:00
& drm_gem_object_release_handle , file_private ) ;
2008-07-30 12:06:12 -07:00
idr_destroy ( & file_private - > object_idr ) ;
}
2010-04-09 19:05:05 +00:00
void
drm_gem_object_release ( struct drm_gem_object * obj )
2010-02-09 05:49:11 +00:00
{
2013-08-15 00:02:46 +02:00
WARN_ON ( obj - > dma_buf ) ;
2011-06-07 14:17:51 +01:00
if ( obj - > filp )
2014-01-20 20:07:49 +01:00
fput ( obj - > filp ) ;
2014-01-20 20:05:43 +01:00
drm_gem_free_mmap_offset ( obj ) ;
2010-02-09 05:49:11 +00:00
}
2010-04-09 19:05:05 +00:00
EXPORT_SYMBOL ( drm_gem_object_release ) ;
2010-02-09 05:49:11 +00:00
2008-07-30 12:06:12 -07:00
/**
2014-01-21 12:39:00 +01:00
* drm_gem_object_free - free a GEM object
* @ kref : kref of the object to free
*
2008-07-30 12:06:12 -07:00
* Called after the last reference to the object has been lost .
2010-02-09 05:49:11 +00:00
* Must be called holding struct_ mutex
2008-07-30 12:06:12 -07:00
*
* Frees the object
*/
void
drm_gem_object_free ( struct kref * kref )
{
struct drm_gem_object * obj = ( struct drm_gem_object * ) kref ;
struct drm_device * dev = obj - > dev ;
BUG_ON ( ! mutex_is_locked ( & dev - > struct_mutex ) ) ;
if ( dev - > driver - > gem_free_object ! = NULL )
dev - > driver - > gem_free_object ( obj ) ;
}
EXPORT_SYMBOL ( drm_gem_object_free ) ;
2009-02-11 14:01:46 -08:00
void drm_gem_vm_open ( struct vm_area_struct * vma )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
drm_gem_object_reference ( obj ) ;
2010-09-27 21:28:30 +01:00
mutex_lock ( & obj - > dev - > struct_mutex ) ;
2012-05-01 11:04:51 -05:00
drm_vm_open_locked ( obj - > dev , vma ) ;
2010-09-27 21:28:30 +01:00
mutex_unlock ( & obj - > dev - > struct_mutex ) ;
2009-02-11 14:01:46 -08:00
}
EXPORT_SYMBOL ( drm_gem_vm_open ) ;
void drm_gem_vm_close ( struct vm_area_struct * vma )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
2011-03-17 22:33:33 +00:00
struct drm_device * dev = obj - > dev ;
2009-02-11 14:01:46 -08:00
2011-03-17 22:33:33 +00:00
mutex_lock ( & dev - > struct_mutex ) ;
2012-05-01 11:04:51 -05:00
drm_vm_close_locked ( obj - > dev , vma ) ;
2010-09-27 21:28:30 +01:00
drm_gem_object_unreference ( obj ) ;
2011-03-17 22:33:33 +00:00
mutex_unlock ( & dev - > struct_mutex ) ;
2009-02-11 14:01:46 -08:00
}
EXPORT_SYMBOL ( drm_gem_vm_close ) ;
2013-04-16 14:14:52 +02:00
/**
* drm_gem_mmap_obj - memory map a GEM object
* @ obj : the GEM object to map
* @ obj_size : the object size to be mapped , in bytes
* @ vma : VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver . Depending on their requirements , drivers can either
* provide a fault handler in their gem_vm_ops ( in which case any accesses to
* the object will be trapped , to perform migration , GTT binding , surface
* register allocation , or performance monitoring ) , or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj .
*
* This function is mainly intended to implement the DMABUF mmap operation , when
* the GEM object is not looked up based on its fake offset . To implement the
* DRM mmap operation , drivers should use the drm_gem_mmap ( ) function .
*
2013-08-25 18:28:58 +02:00
* drm_gem_mmap_obj ( ) assumes the user is granted access to the buffer while
* drm_gem_mmap ( ) prevents unprivileged users from mapping random objects . So
* callers must verify access restrictions before calling this helper .
*
2013-06-27 08:39:58 +09:00
* NOTE : This function has to be protected with dev - > struct_mutex
*
2013-04-16 14:14:52 +02:00
* Return 0 or success or - EINVAL if the object size is smaller than the VMA
* size , or if no gem_vm_ops are provided .
*/
int drm_gem_mmap_obj ( struct drm_gem_object * obj , unsigned long obj_size ,
struct vm_area_struct * vma )
{
struct drm_device * dev = obj - > dev ;
2013-06-27 08:39:58 +09:00
lockdep_assert_held ( & dev - > struct_mutex ) ;
2013-04-16 14:14:52 +02:00
/* Check for valid size. */
if ( obj_size < vma - > vm_end - vma - > vm_start )
return - EINVAL ;
if ( ! dev - > driver - > gem_vm_ops )
return - EINVAL ;
vma - > vm_flags | = VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP ;
vma - > vm_ops = dev - > driver - > gem_vm_ops ;
vma - > vm_private_data = obj ;
2014-01-20 20:07:49 +01:00
vma - > vm_page_prot = pgprot_writecombine ( vm_get_page_prot ( vma - > vm_flags ) ) ;
2013-04-16 14:14:52 +02:00
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset ' s pointer to the object .
* This reference is cleaned up by the corresponding vm_close
* ( which should happen whether the vma was created by this call , or
* by a vm_open due to mremap or partial unmap or whatever ) .
*/
drm_gem_object_reference ( obj ) ;
drm_vm_open_locked ( dev , vma ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_gem_mmap_obj ) ;
2009-02-11 14:01:46 -08:00
2008-11-05 10:31:53 -08:00
/**
* drm_gem_mmap - memory map routine for GEM objects
* @ filp : DRM file pointer
* @ vma : VMA for the area to be mapped
*
* If a driver supports GEM object mapping , mmap calls on the DRM file
* descriptor will end up here .
*
2013-04-16 14:14:52 +02:00
* Look up the GEM object based on the offset passed in ( vma - > vm_pgoff will
2008-11-05 10:31:53 -08:00
* contain the fake offset we created when the GTT map ioctl was called on
2013-04-16 14:14:52 +02:00
* the object ) and map it with a call to drm_gem_mmap_obj ( ) .
2013-08-25 18:28:58 +02:00
*
* If the caller is not granted access to the buffer object , the mmap will fail
* with EACCES . Please see the vma manager for more information .
2008-11-05 10:31:53 -08:00
*/
int drm_gem_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct drm_file * priv = filp - > private_data ;
struct drm_device * dev = priv - > minor - > dev ;
2013-07-24 21:07:52 +02:00
struct drm_gem_object * obj ;
struct drm_vma_offset_node * node ;
2014-01-20 20:15:38 +01:00
int ret ;
2008-11-05 10:31:53 -08:00
2012-02-20 14:18:07 +00:00
if ( drm_device_is_unplugged ( dev ) )
return - ENODEV ;
2008-11-05 10:31:53 -08:00
mutex_lock ( & dev - > struct_mutex ) ;
2013-12-11 14:24:46 +01:00
node = drm_vma_offset_exact_lookup ( dev - > vma_offset_manager ,
vma - > vm_pgoff ,
2013-07-24 21:07:52 +02:00
vma_pages ( vma ) ) ;
if ( ! node ) {
2008-11-05 10:31:53 -08:00
mutex_unlock ( & dev - > struct_mutex ) ;
return drm_mmap ( filp , vma ) ;
2013-08-25 18:28:58 +02:00
} else if ( ! drm_vma_node_is_allowed ( node , filp ) ) {
mutex_unlock ( & dev - > struct_mutex ) ;
return - EACCES ;
2008-11-05 10:31:53 -08:00
}
2013-07-24 21:07:52 +02:00
obj = container_of ( node , struct drm_gem_object , vma_node ) ;
2013-07-26 12:09:32 +02:00
ret = drm_gem_mmap_obj ( obj , drm_vma_node_size ( node ) < < PAGE_SHIFT , vma ) ;
2008-11-05 10:31:53 -08:00
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_mmap ) ;