2008-07-30 23:06:12 +04:00
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Eric Anholt < eric @ anholt . net >
*
*/
# include <linux/types.h>
# include <linux/slab.h>
# include <linux/mm.h>
# include <linux/uaccess.h>
# include <linux/fs.h>
# include <linux/file.h>
# include <linux/module.h>
# include <linux/mman.h>
# include <linux/pagemap.h>
2011-06-28 03:18:18 +04:00
# include <linux/shmem_fs.h>
2011-11-25 19:21:02 +04:00
# include <linux/dma-buf.h>
2012-10-02 21:01:07 +04:00
# include <drm/drmP.h>
2013-07-24 23:07:52 +04:00
# include <drm/drm_vma_manager.h>
2008-07-30 23:06:12 +04:00
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver .
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies , implementing that is left up to
* the driver , and all that the general API provides should be generic - -
* allocating objects , reading / writing data with the cpu , freeing objects .
* Even there , platform - dependent optimizations for reading / writing data with
* the CPU mean we ' ll likely hook those out to driver - specific calls . However ,
* the DRI2 implementation wants to have at least allocate / mmap be generic .
*
* The goal was to have swap - backed object allocation managed through
* struct file . However , file descriptors as handles to a struct file have
* two major failings :
* - Process limits prevent more than 1024 or so being used at a time by
* default .
* - Inability to allocate high fds will aggravate the X Server ' s select ( )
* handling , and likely that of many GL client applications as well .
*
* This led to a plan of using our own integer IDs ( called handles , following
* DRM terminology ) to mimic fds , and implement the fd syscalls we need as
* ioctls . The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date , and as our interface with shmfs for memory allocation .
*/
2008-11-05 21:31:53 +03:00
/*
* We make up offsets for buffer objects so we can recognize them at
* mmap time .
*/
2010-05-27 23:40:27 +04:00
/* pgoff in mmap is an unsigned long, so we need to make sure that
* the faked up offset will fit
*/
# if BITS_PER_LONG == 64
2008-11-05 21:31:53 +03:00
# define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
# define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
2010-05-27 23:40:27 +04:00
# else
# define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
# define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
# endif
2008-11-05 21:31:53 +03:00
2008-07-30 23:06:12 +04:00
/**
* Initialize the GEM device fields
*/
int
drm_gem_init ( struct drm_device * dev )
{
2008-11-05 21:31:53 +03:00
struct drm_gem_mm * mm ;
2013-08-15 02:02:44 +04:00
mutex_init ( & dev - > object_name_lock ) ;
2008-07-30 23:06:12 +04:00
idr_init ( & dev - > object_name_idr ) ;
2008-11-05 21:31:53 +03:00
2009-03-24 22:23:04 +03:00
mm = kzalloc ( sizeof ( struct drm_gem_mm ) , GFP_KERNEL ) ;
2008-11-05 21:31:53 +03:00
if ( ! mm ) {
DRM_ERROR ( " out of memory \n " ) ;
return - ENOMEM ;
}
dev - > mm_private = mm ;
2013-07-24 23:07:52 +04:00
drm_vma_offset_manager_init ( & mm - > vma_manager ,
DRM_FILE_PAGE_OFFSET_START ,
DRM_FILE_PAGE_OFFSET_SIZE ) ;
2008-11-05 21:31:53 +03:00
2008-07-30 23:06:12 +04:00
return 0 ;
}
2008-11-05 21:31:53 +03:00
void
drm_gem_destroy ( struct drm_device * dev )
{
struct drm_gem_mm * mm = dev - > mm_private ;
2013-07-24 23:07:52 +04:00
drm_vma_offset_manager_destroy ( & mm - > vma_manager ) ;
2009-03-24 22:23:04 +03:00
kfree ( mm ) ;
2008-11-05 21:31:53 +03:00
dev - > mm_private = NULL ;
}
2010-04-09 23:05:04 +04:00
/**
2011-06-07 17:17:51 +04:00
* Initialize an already allocated GEM object of the specified size with
2010-04-09 23:05:04 +04:00
* shmfs backing store .
*/
int drm_gem_object_init ( struct drm_device * dev ,
struct drm_gem_object * obj , size_t size )
{
2013-07-11 13:56:32 +04:00
struct file * filp ;
2010-04-09 23:05:04 +04:00
2013-07-11 13:56:32 +04:00
filp = shmem_file_setup ( " drm mm object " , size , VM_NORESERVE ) ;
if ( IS_ERR ( filp ) )
return PTR_ERR ( filp ) ;
2010-04-09 23:05:04 +04:00
2013-07-11 13:56:32 +04:00
drm_gem_private_object_init ( dev , obj , size ) ;
obj - > filp = filp ;
2010-04-09 23:05:04 +04:00
return 0 ;
}
EXPORT_SYMBOL ( drm_gem_object_init ) ;
2011-06-07 17:17:51 +04:00
/**
* Initialize an already allocated GEM object of the specified size with
* no GEM provided backing store . Instead the caller is responsible for
* backing the object and handling it .
*/
2013-07-11 13:56:32 +04:00
void drm_gem_private_object_init ( struct drm_device * dev ,
struct drm_gem_object * obj , size_t size )
2011-06-07 17:17:51 +04:00
{
BUG_ON ( ( size & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
obj - > dev = dev ;
obj - > filp = NULL ;
kref_init ( & obj - > refcount ) ;
2013-08-15 02:02:37 +04:00
obj - > handle_count = 0 ;
2011-06-07 17:17:51 +04:00
obj - > size = size ;
}
EXPORT_SYMBOL ( drm_gem_private_object_init ) ;
2008-07-30 23:06:12 +04:00
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc ( struct drm_device * dev , size_t size )
{
struct drm_gem_object * obj ;
2009-06-10 23:43:49 +04:00
obj = kzalloc ( sizeof ( * obj ) , GFP_KERNEL ) ;
2009-07-14 01:20:21 +04:00
if ( ! obj )
goto free ;
2008-07-30 23:06:12 +04:00
2010-04-09 23:05:04 +04:00
if ( drm_gem_object_init ( dev , obj , size ) ! = 0 )
2009-07-14 01:20:21 +04:00
goto free ;
2008-07-30 23:06:12 +04:00
if ( dev - > driver - > gem_init_object ! = NULL & &
dev - > driver - > gem_init_object ( obj ) ! = 0 ) {
2009-07-14 01:20:21 +04:00
goto fput ;
2008-07-30 23:06:12 +04:00
}
return obj ;
2009-07-14 01:20:21 +04:00
fput :
2010-04-09 23:05:04 +04:00
/* Object_init mangles the global counters - readjust them. */
2009-07-14 01:20:21 +04:00
fput ( obj - > filp ) ;
free :
kfree ( obj ) ;
return NULL ;
2008-07-30 23:06:12 +04:00
}
EXPORT_SYMBOL ( drm_gem_object_alloc ) ;
2012-05-20 20:31:16 +04:00
static void
drm_gem_remove_prime_handles ( struct drm_gem_object * obj , struct drm_file * filp )
{
if ( obj - > import_attach ) {
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
drm_prime_remove_buf_handle ( & filp - > prime ,
2012-05-20 20:31:16 +04:00
obj - > import_attach - > dmabuf ) ;
}
if ( obj - > export_dma_buf ) {
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
drm_prime_remove_buf_handle ( & filp - > prime ,
2012-05-20 20:31:16 +04:00
obj - > export_dma_buf ) ;
}
}
2013-08-15 02:02:34 +04:00
static void drm_gem_object_ref_bug ( struct kref * list_kref )
{
BUG ( ) ;
}
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object . Note that this must be
* called before drm_gem_object_free or we ' ll be touching
* freed memory
*/
static void drm_gem_object_handle_free ( struct drm_gem_object * obj )
{
struct drm_device * dev = obj - > dev ;
/* Remove any name for this object */
if ( obj - > name ) {
idr_remove ( & dev - > object_name_idr , obj - > name ) ;
obj - > name = 0 ;
/*
* The object name held a reference to this object , drop
* that now .
*
* This cannot be the last reference , since the handle holds one too .
*/
kref_put ( & obj - > refcount , drm_gem_object_ref_bug ) ;
2013-08-15 02:02:37 +04:00
}
2013-08-15 02:02:34 +04:00
}
2013-08-15 02:02:39 +04:00
static void
2013-08-15 02:02:34 +04:00
drm_gem_object_handle_unreference_unlocked ( struct drm_gem_object * obj )
{
2013-08-15 02:02:37 +04:00
if ( WARN_ON ( obj - > handle_count = = 0 ) )
2013-08-15 02:02:34 +04:00
return ;
/*
* Must bump handle count first as this may be the last
* ref , in which case the object would disappear before we
* checked for a name
*/
2013-08-15 02:02:44 +04:00
mutex_lock ( & obj - > dev - > object_name_lock ) ;
2013-08-15 02:02:37 +04:00
if ( - - obj - > handle_count = = 0 )
2013-08-15 02:02:34 +04:00
drm_gem_object_handle_free ( obj ) ;
2013-08-15 02:02:44 +04:00
mutex_unlock ( & obj - > dev - > object_name_lock ) ;
2013-08-15 02:02:37 +04:00
2013-08-15 02:02:34 +04:00
drm_gem_object_unreference_unlocked ( obj ) ;
}
2008-07-30 23:06:12 +04:00
/**
* Removes the mapping from handle to filp for this object .
*/
2011-02-07 05:16:14 +03:00
int
2009-08-23 13:40:55 +04:00
drm_gem_handle_delete ( struct drm_file * filp , u32 handle )
2008-07-30 23:06:12 +04:00
{
struct drm_device * dev ;
struct drm_gem_object * obj ;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code . It just spews if you fail at deleting .
* So , we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount , or the user
* could race us to double - decrement the refcount and cause a
* use - after - free later . Given the frequency of our handle lookups ,
* we may want to use ida for number allocation and a hash table
* for the pointers , anyway .
*/
spin_lock ( & filp - > table_lock ) ;
/* Check if we currently have a reference on the object */
obj = idr_find ( & filp - > object_idr , handle ) ;
if ( obj = = NULL ) {
spin_unlock ( & filp - > table_lock ) ;
return - EINVAL ;
}
dev = obj - > dev ;
/* Release reference and decrement refcount. */
idr_remove ( & filp - > object_idr , handle ) ;
spin_unlock ( & filp - > table_lock ) ;
2012-05-20 20:31:16 +04:00
drm_gem_remove_prime_handles ( obj , filp ) ;
2011-11-25 19:21:02 +04:00
2011-06-09 04:24:59 +04:00
if ( dev - > driver - > gem_close_object )
dev - > driver - > gem_close_object ( obj , filp ) ;
2010-02-09 08:49:12 +03:00
drm_gem_object_handle_unreference_unlocked ( obj ) ;
2008-07-30 23:06:12 +04:00
return 0 ;
}
2011-02-07 05:16:14 +03:00
EXPORT_SYMBOL ( drm_gem_handle_delete ) ;
2008-07-30 23:06:12 +04:00
2013-07-16 11:12:04 +04:00
/**
* drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
*
* This implements the - > dumb_destroy kms driver callback for drivers which use
* gem to manage their backing storage .
*/
int drm_gem_dumb_destroy ( struct drm_file * file ,
struct drm_device * dev ,
uint32_t handle )
{
return drm_gem_handle_delete ( file , handle ) ;
}
EXPORT_SYMBOL ( drm_gem_dumb_destroy ) ;
2008-07-30 23:06:12 +04:00
/**
* Create a handle for this object . This adds a handle reference
* to the object , which includes a regular reference count . Callers
* will likely want to dereference the object afterwards .
*/
int
drm_gem_handle_create ( struct drm_file * file_priv ,
struct drm_gem_object * obj ,
2009-08-23 13:40:55 +04:00
u32 * handlep )
2008-07-30 23:06:12 +04:00
{
2011-06-09 04:24:59 +04:00
struct drm_device * dev = obj - > dev ;
int ret ;
2008-07-30 23:06:12 +04:00
/*
2013-02-28 05:04:08 +04:00
* Get the user - visible handle using idr . Preload and perform
* allocation under our spinlock .
2008-07-30 23:06:12 +04:00
*/
2013-08-15 02:02:44 +04:00
mutex_lock ( & dev - > object_name_lock ) ;
2013-02-28 05:04:08 +04:00
idr_preload ( GFP_KERNEL ) ;
2008-07-30 23:06:12 +04:00
spin_lock ( & file_priv - > table_lock ) ;
2013-02-28 05:04:08 +04:00
ret = idr_alloc ( & file_priv - > object_idr , obj , 1 , 0 , GFP_NOWAIT ) ;
2013-08-15 02:02:37 +04:00
drm_gem_object_reference ( obj ) ;
obj - > handle_count + + ;
2008-07-30 23:06:12 +04:00
spin_unlock ( & file_priv - > table_lock ) ;
2013-02-28 05:04:08 +04:00
idr_preload_end ( ) ;
2013-08-15 02:02:44 +04:00
mutex_unlock ( & dev - > object_name_lock ) ;
2013-08-15 02:02:37 +04:00
if ( ret < 0 ) {
drm_gem_object_handle_unreference_unlocked ( obj ) ;
2008-07-30 23:06:12 +04:00
return ret ;
2013-08-15 02:02:37 +04:00
}
2013-02-28 05:04:08 +04:00
* handlep = ret ;
2008-07-30 23:06:12 +04:00
2011-06-09 04:24:59 +04:00
if ( dev - > driver - > gem_open_object ) {
ret = dev - > driver - > gem_open_object ( obj , file_priv ) ;
if ( ret ) {
drm_gem_handle_delete ( file_priv , * handlep ) ;
return ret ;
}
}
2008-07-30 23:06:12 +04:00
return 0 ;
}
EXPORT_SYMBOL ( drm_gem_handle_create ) ;
2011-08-10 17:09:07 +04:00
/**
* drm_gem_free_mmap_offset - release a fake mmap offset for an object
* @ obj : obj in question
*
* This routine frees fake offsets allocated by drm_gem_create_mmap_offset ( ) .
*/
void
drm_gem_free_mmap_offset ( struct drm_gem_object * obj )
{
struct drm_device * dev = obj - > dev ;
struct drm_gem_mm * mm = dev - > mm_private ;
2013-07-24 23:07:52 +04:00
drm_vma_offset_remove ( & mm - > vma_manager , & obj - > vma_node ) ;
2011-08-10 17:09:07 +04:00
}
EXPORT_SYMBOL ( drm_gem_free_mmap_offset ) ;
/**
2013-08-07 21:41:23 +04:00
* drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
2011-08-10 17:09:07 +04:00
* @ obj : obj in question
2013-08-07 21:41:23 +04:00
* @ size : the virtual size
2011-08-10 17:09:07 +04:00
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap ( 2 ) call . The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures .
*
2013-08-07 21:41:23 +04:00
* This routine allocates and attaches a fake offset for @ obj , in cases where
* the virtual size differs from the physical size ( ie . obj - > size ) . Otherwise
* just use drm_gem_create_mmap_offset ( ) .
2011-08-10 17:09:07 +04:00
*/
int
2013-08-07 21:41:23 +04:00
drm_gem_create_mmap_offset_size ( struct drm_gem_object * obj , size_t size )
2011-08-10 17:09:07 +04:00
{
struct drm_device * dev = obj - > dev ;
struct drm_gem_mm * mm = dev - > mm_private ;
2013-07-24 23:07:52 +04:00
return drm_vma_offset_add ( & mm - > vma_manager , & obj - > vma_node ,
2013-08-07 21:41:23 +04:00
size / PAGE_SIZE ) ;
}
EXPORT_SYMBOL ( drm_gem_create_mmap_offset_size ) ;
/**
* drm_gem_create_mmap_offset - create a fake mmap offset for an object
* @ obj : obj in question
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap ( 2 ) call . The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures .
*
* This routine allocates and attaches a fake offset for @ obj .
*/
int drm_gem_create_mmap_offset ( struct drm_gem_object * obj )
{
return drm_gem_create_mmap_offset_size ( obj , obj - > size ) ;
2011-08-10 17:09:07 +04:00
}
EXPORT_SYMBOL ( drm_gem_create_mmap_offset ) ;
2013-08-07 21:41:24 +04:00
/**
* drm_gem_get_pages - helper to allocate backing pages for a GEM object
* from shmem
* @ obj : obj in question
* @ gfpmask : gfp mask of requested pages
*/
struct page * * drm_gem_get_pages ( struct drm_gem_object * obj , gfp_t gfpmask )
{
struct inode * inode ;
struct address_space * mapping ;
struct page * p , * * pages ;
int i , npages ;
/* This is the shared memory object that backs the GEM resource */
inode = file_inode ( obj - > filp ) ;
mapping = inode - > i_mapping ;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init ( ) , so we should never hit this unless
* driver author is doing something really wrong :
*/
WARN_ON ( ( obj - > size & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
npages = obj - > size > > PAGE_SHIFT ;
pages = drm_malloc_ab ( npages , sizeof ( struct page * ) ) ;
if ( pages = = NULL )
return ERR_PTR ( - ENOMEM ) ;
gfpmask | = mapping_gfp_mask ( mapping ) ;
for ( i = 0 ; i < npages ; i + + ) {
p = shmem_read_mapping_page_gfp ( mapping , i , gfpmask ) ;
if ( IS_ERR ( p ) )
goto fail ;
pages [ i ] = p ;
/* There is a hypothetical issue w/ drivers that require
* buffer memory in the low 4 GB . . if the pages are un -
* pinned , and swapped out , they can end up swapped back
* in above 4 GB . If pages are already in memory , then
* shmem_read_mapping_page_gfp will ignore the gfpmask ,
* even if the already in - memory page disobeys the mask .
*
* It is only a theoretical issue today , because none of
* the devices with this limitation can be populated with
* enough memory to trigger the issue . But this BUG_ON ( )
* is here as a reminder in case the problem with
* shmem_read_mapping_page_gfp ( ) isn ' t solved by the time
* it does become a real issue .
*
* See this thread : http : //lkml.org/lkml/2011/7/11/238
*/
BUG_ON ( ( gfpmask & __GFP_DMA32 ) & &
( page_to_pfn ( p ) > = 0x00100000UL ) ) ;
}
return pages ;
fail :
while ( i - - )
page_cache_release ( pages [ i ] ) ;
drm_free_large ( pages ) ;
return ERR_CAST ( p ) ;
}
EXPORT_SYMBOL ( drm_gem_get_pages ) ;
/**
* drm_gem_put_pages - helper to free backing pages for a GEM object
* @ obj : obj in question
* @ pages : pages to free
* @ dirty : if true , pages will be marked as dirty
* @ accessed : if true , the pages will be marked as accessed
*/
void drm_gem_put_pages ( struct drm_gem_object * obj , struct page * * pages ,
bool dirty , bool accessed )
{
int i , npages ;
/* We already BUG_ON() for non-page-aligned sizes in
* drm_gem_object_init ( ) , so we should never hit this unless
* driver author is doing something really wrong :
*/
WARN_ON ( ( obj - > size & ( PAGE_SIZE - 1 ) ) ! = 0 ) ;
npages = obj - > size > > PAGE_SHIFT ;
for ( i = 0 ; i < npages ; i + + ) {
if ( dirty )
set_page_dirty ( pages [ i ] ) ;
if ( accessed )
mark_page_accessed ( pages [ i ] ) ;
/* Undo the reference we took when populating the table */
page_cache_release ( pages [ i ] ) ;
}
drm_free_large ( pages ) ;
}
EXPORT_SYMBOL ( drm_gem_put_pages ) ;
2008-07-30 23:06:12 +04:00
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup ( struct drm_device * dev , struct drm_file * filp ,
2009-08-23 13:40:55 +04:00
u32 handle )
2008-07-30 23:06:12 +04:00
{
struct drm_gem_object * obj ;
spin_lock ( & filp - > table_lock ) ;
/* Check if we currently have a reference on the object */
obj = idr_find ( & filp - > object_idr , handle ) ;
if ( obj = = NULL ) {
spin_unlock ( & filp - > table_lock ) ;
return NULL ;
}
drm_gem_object_reference ( obj ) ;
spin_unlock ( & filp - > table_lock ) ;
return obj ;
}
EXPORT_SYMBOL ( drm_gem_object_lookup ) ;
/**
* Releases the handle to an mm object .
*/
int
drm_gem_close_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_gem_close * args = data ;
int ret ;
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) )
return - ENODEV ;
ret = drm_gem_handle_delete ( file_priv , args - > handle ) ;
return ret ;
}
/**
* Create a global name for an object , returning the name .
*
* Note that the name does not hold a reference ; when the object
* is freed , the name goes away .
*/
int
drm_gem_flink_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_gem_flink * args = data ;
struct drm_gem_object * obj ;
int ret ;
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) )
return - ENODEV ;
obj = drm_gem_object_lookup ( dev , file_priv , args - > handle ) ;
if ( obj = = NULL )
2010-08-04 17:19:46 +04:00
return - ENOENT ;
2008-07-30 23:06:12 +04:00
2013-08-15 02:02:44 +04:00
mutex_lock ( & dev - > object_name_lock ) ;
2013-02-28 05:04:08 +04:00
idr_preload ( GFP_KERNEL ) ;
2013-08-15 02:02:37 +04:00
/* prevent races with concurrent gem_close. */
if ( obj - > handle_count = = 0 ) {
ret = - ENOENT ;
goto err ;
}
2009-02-11 17:26:28 +03:00
if ( ! obj - > name ) {
2013-02-28 05:04:08 +04:00
ret = idr_alloc ( & dev - > object_name_idr , obj , 1 , 0 , GFP_NOWAIT ) ;
if ( ret < 0 )
2009-02-11 17:26:28 +03:00
goto err ;
2013-06-27 03:58:33 +04:00
obj - > name = ret ;
2008-07-30 23:06:12 +04:00
2009-02-11 17:26:28 +03:00
/* Allocate a reference for the name table. */
drm_gem_object_reference ( obj ) ;
}
2009-02-09 14:31:41 +03:00
2013-06-27 03:58:33 +04:00
args - > name = ( uint64_t ) obj - > name ;
ret = 0 ;
2009-02-09 14:31:41 +03:00
err :
2013-06-27 03:58:33 +04:00
idr_preload_end ( ) ;
2013-08-15 02:02:44 +04:00
mutex_unlock ( & dev - > object_name_lock ) ;
2010-02-09 08:49:12 +03:00
drm_gem_object_unreference_unlocked ( obj ) ;
2009-02-09 14:31:41 +03:00
return ret ;
2008-07-30 23:06:12 +04:00
}
/**
* Open an object using the global name , returning a handle and the size .
*
* This handle ( of course ) holds a reference to the object , so the object
* will not go away until the handle is deleted .
*/
int
drm_gem_open_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_gem_open * args = data ;
struct drm_gem_object * obj ;
int ret ;
2009-08-23 13:40:55 +04:00
u32 handle ;
2008-07-30 23:06:12 +04:00
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) )
return - ENODEV ;
2013-08-15 02:02:44 +04:00
mutex_lock ( & dev - > object_name_lock ) ;
2008-07-30 23:06:12 +04:00
obj = idr_find ( & dev - > object_name_idr , ( int ) args - > name ) ;
if ( obj )
drm_gem_object_reference ( obj ) ;
2013-08-15 02:02:44 +04:00
mutex_unlock ( & dev - > object_name_lock ) ;
2008-07-30 23:06:12 +04:00
if ( ! obj )
return - ENOENT ;
ret = drm_gem_handle_create ( file_priv , obj , & handle ) ;
2010-02-09 08:49:12 +03:00
drm_gem_object_unreference_unlocked ( obj ) ;
2008-07-30 23:06:12 +04:00
if ( ret )
return ret ;
args - > handle = handle ;
args - > size = obj - > size ;
return 0 ;
}
/**
* Called at device open time , sets up the structure for handling refcounting
* of mm objects .
*/
void
drm_gem_open ( struct drm_device * dev , struct drm_file * file_private )
{
idr_init ( & file_private - > object_idr ) ;
spin_lock_init ( & file_private - > table_lock ) ;
}
/**
* Called at device close to release the file ' s
* handle references on objects .
*/
static int
drm_gem_object_release_handle ( int id , void * ptr , void * data )
{
2011-06-09 04:24:59 +04:00
struct drm_file * file_priv = data ;
2008-07-30 23:06:12 +04:00
struct drm_gem_object * obj = ptr ;
2011-06-09 04:24:59 +04:00
struct drm_device * dev = obj - > dev ;
2012-05-20 20:31:16 +04:00
drm_gem_remove_prime_handles ( obj , file_priv ) ;
2011-11-25 19:21:02 +04:00
2011-06-09 04:24:59 +04:00
if ( dev - > driver - > gem_close_object )
dev - > driver - > gem_close_object ( obj , file_priv ) ;
2008-07-30 23:06:12 +04:00
2010-02-09 08:49:12 +03:00
drm_gem_object_handle_unreference_unlocked ( obj ) ;
2008-07-30 23:06:12 +04:00
return 0 ;
}
/**
* Called at close time when the filp is going away .
*
* Releases any remaining references on objects by this filp .
*/
void
drm_gem_release ( struct drm_device * dev , struct drm_file * file_private )
{
idr_for_each ( & file_private - > object_idr ,
2011-06-09 04:24:59 +04:00
& drm_gem_object_release_handle , file_private ) ;
2008-07-30 23:06:12 +04:00
idr_destroy ( & file_private - > object_idr ) ;
}
2010-04-09 23:05:05 +04:00
void
drm_gem_object_release ( struct drm_gem_object * obj )
2010-02-09 08:49:11 +03:00
{
2011-06-07 17:17:51 +04:00
if ( obj - > filp )
fput ( obj - > filp ) ;
2010-02-09 08:49:11 +03:00
}
2010-04-09 23:05:05 +04:00
EXPORT_SYMBOL ( drm_gem_object_release ) ;
2010-02-09 08:49:11 +03:00
2008-07-30 23:06:12 +04:00
/**
* Called after the last reference to the object has been lost .
2010-02-09 08:49:11 +03:00
* Must be called holding struct_ mutex
2008-07-30 23:06:12 +04:00
*
* Frees the object
*/
void
drm_gem_object_free ( struct kref * kref )
{
struct drm_gem_object * obj = ( struct drm_gem_object * ) kref ;
struct drm_device * dev = obj - > dev ;
BUG_ON ( ! mutex_is_locked ( & dev - > struct_mutex ) ) ;
if ( dev - > driver - > gem_free_object ! = NULL )
dev - > driver - > gem_free_object ( obj ) ;
}
EXPORT_SYMBOL ( drm_gem_object_free ) ;
2009-02-12 01:01:46 +03:00
void drm_gem_vm_open ( struct vm_area_struct * vma )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
drm_gem_object_reference ( obj ) ;
2010-09-28 00:28:30 +04:00
mutex_lock ( & obj - > dev - > struct_mutex ) ;
2012-05-01 20:04:51 +04:00
drm_vm_open_locked ( obj - > dev , vma ) ;
2010-09-28 00:28:30 +04:00
mutex_unlock ( & obj - > dev - > struct_mutex ) ;
2009-02-12 01:01:46 +03:00
}
EXPORT_SYMBOL ( drm_gem_vm_open ) ;
void drm_gem_vm_close ( struct vm_area_struct * vma )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
2011-03-18 01:33:33 +03:00
struct drm_device * dev = obj - > dev ;
2009-02-12 01:01:46 +03:00
2011-03-18 01:33:33 +03:00
mutex_lock ( & dev - > struct_mutex ) ;
2012-05-01 20:04:51 +04:00
drm_vm_close_locked ( obj - > dev , vma ) ;
2010-09-28 00:28:30 +04:00
drm_gem_object_unreference ( obj ) ;
2011-03-18 01:33:33 +03:00
mutex_unlock ( & dev - > struct_mutex ) ;
2009-02-12 01:01:46 +03:00
}
EXPORT_SYMBOL ( drm_gem_vm_close ) ;
2013-04-16 16:14:52 +04:00
/**
* drm_gem_mmap_obj - memory map a GEM object
* @ obj : the GEM object to map
* @ obj_size : the object size to be mapped , in bytes
* @ vma : VMA for the area to be mapped
*
* Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
* provided by the driver . Depending on their requirements , drivers can either
* provide a fault handler in their gem_vm_ops ( in which case any accesses to
* the object will be trapped , to perform migration , GTT binding , surface
* register allocation , or performance monitoring ) , or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj .
*
* This function is mainly intended to implement the DMABUF mmap operation , when
* the GEM object is not looked up based on its fake offset . To implement the
* DRM mmap operation , drivers should use the drm_gem_mmap ( ) function .
*
2013-06-27 03:39:58 +04:00
* NOTE : This function has to be protected with dev - > struct_mutex
*
2013-04-16 16:14:52 +04:00
* Return 0 or success or - EINVAL if the object size is smaller than the VMA
* size , or if no gem_vm_ops are provided .
*/
int drm_gem_mmap_obj ( struct drm_gem_object * obj , unsigned long obj_size ,
struct vm_area_struct * vma )
{
struct drm_device * dev = obj - > dev ;
2013-06-27 03:39:58 +04:00
lockdep_assert_held ( & dev - > struct_mutex ) ;
2013-04-16 16:14:52 +04:00
/* Check for valid size. */
if ( obj_size < vma - > vm_end - vma - > vm_start )
return - EINVAL ;
if ( ! dev - > driver - > gem_vm_ops )
return - EINVAL ;
vma - > vm_flags | = VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP ;
vma - > vm_ops = dev - > driver - > gem_vm_ops ;
vma - > vm_private_data = obj ;
vma - > vm_page_prot = pgprot_writecombine ( vm_get_page_prot ( vma - > vm_flags ) ) ;
/* Take a ref for this mapping of the object, so that the fault
* handler can dereference the mmap offset ' s pointer to the object .
* This reference is cleaned up by the corresponding vm_close
* ( which should happen whether the vma was created by this call , or
* by a vm_open due to mremap or partial unmap or whatever ) .
*/
drm_gem_object_reference ( obj ) ;
drm_vm_open_locked ( dev , vma ) ;
return 0 ;
}
EXPORT_SYMBOL ( drm_gem_mmap_obj ) ;
2009-02-12 01:01:46 +03:00
2008-11-05 21:31:53 +03:00
/**
* drm_gem_mmap - memory map routine for GEM objects
* @ filp : DRM file pointer
* @ vma : VMA for the area to be mapped
*
* If a driver supports GEM object mapping , mmap calls on the DRM file
* descriptor will end up here .
*
2013-04-16 16:14:52 +04:00
* Look up the GEM object based on the offset passed in ( vma - > vm_pgoff will
2008-11-05 21:31:53 +03:00
* contain the fake offset we created when the GTT map ioctl was called on
2013-04-16 16:14:52 +04:00
* the object ) and map it with a call to drm_gem_mmap_obj ( ) .
2008-11-05 21:31:53 +03:00
*/
int drm_gem_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct drm_file * priv = filp - > private_data ;
struct drm_device * dev = priv - > minor - > dev ;
struct drm_gem_mm * mm = dev - > mm_private ;
2013-07-24 23:07:52 +04:00
struct drm_gem_object * obj ;
struct drm_vma_offset_node * node ;
2008-11-05 21:31:53 +03:00
int ret = 0 ;
2012-02-20 18:18:07 +04:00
if ( drm_device_is_unplugged ( dev ) )
return - ENODEV ;
2008-11-05 21:31:53 +03:00
mutex_lock ( & dev - > struct_mutex ) ;
2013-07-24 23:07:52 +04:00
node = drm_vma_offset_exact_lookup ( & mm - > vma_manager , vma - > vm_pgoff ,
vma_pages ( vma ) ) ;
if ( ! node ) {
2008-11-05 21:31:53 +03:00
mutex_unlock ( & dev - > struct_mutex ) ;
return drm_mmap ( filp , vma ) ;
}
2013-07-24 23:07:52 +04:00
obj = container_of ( node , struct drm_gem_object , vma_node ) ;
2013-07-26 14:09:32 +04:00
ret = drm_gem_mmap_obj ( obj , drm_vma_node_size ( node ) < < PAGE_SHIFT , vma ) ;
2008-11-05 21:31:53 +03:00
mutex_unlock ( & dev - > struct_mutex ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_mmap ) ;