2011-10-04 14:19:01 +04:00
/* exynos_drm_gem.c
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
* Author : Inki Dae < inki . dae @ samsung . com >
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* VA LINUX SYSTEMS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# include "drmP.h"
# include "drm.h"
2012-03-16 13:47:05 +04:00
# include <linux/shmem_fs.h>
2011-10-04 14:19:01 +04:00
# include <drm/exynos_drm.h>
# include "exynos_drm_drv.h"
# include "exynos_drm_gem.h"
# include "exynos_drm_buf.h"
static unsigned int convert_to_vm_err_msg ( int msg )
{
unsigned int out_msg ;
switch ( msg ) {
case 0 :
case - ERESTARTSYS :
case - EINTR :
out_msg = VM_FAULT_NOPAGE ;
break ;
case - ENOMEM :
out_msg = VM_FAULT_OOM ;
break ;
default :
out_msg = VM_FAULT_SIGBUS ;
break ;
}
return out_msg ;
}
2012-04-03 16:27:58 +04:00
static int check_gem_flags ( unsigned int flags )
2012-03-16 13:47:05 +04:00
{
2012-04-03 16:27:58 +04:00
if ( flags & ~ ( EXYNOS_BO_MASK ) ) {
DRM_ERROR ( " invalid flags. \n " ) ;
return - EINVAL ;
}
return 0 ;
}
2012-04-23 14:26:34 +04:00
static void update_vm_cache_attr ( struct exynos_drm_gem_obj * obj ,
struct vm_area_struct * vma )
{
DRM_DEBUG_KMS ( " flags = 0x%x \n " , obj - > flags ) ;
/* non-cachable as default. */
if ( obj - > flags & EXYNOS_BO_CACHABLE )
vma - > vm_page_prot = vm_get_page_prot ( vma - > vm_flags ) ;
else if ( obj - > flags & EXYNOS_BO_WC )
vma - > vm_page_prot =
pgprot_writecombine ( vm_get_page_prot ( vma - > vm_flags ) ) ;
else
vma - > vm_page_prot =
pgprot_noncached ( vm_get_page_prot ( vma - > vm_flags ) ) ;
}
2012-04-03 16:27:58 +04:00
static unsigned long roundup_gem_size ( unsigned long size , unsigned int flags )
{
if ( ! IS_NONCONTIG_BUFFER ( flags ) ) {
if ( size > = SZ_1M )
return roundup ( size , SECTION_SIZE ) ;
else if ( size > = SZ_64K )
return roundup ( size , SZ_64K ) ;
else
goto out ;
}
out :
return roundup ( size , PAGE_SIZE ) ;
2012-03-16 13:47:05 +04:00
}
2012-04-23 16:01:28 +04:00
struct page * * exynos_gem_get_pages ( struct drm_gem_object * obj ,
2012-03-16 13:47:05 +04:00
gfp_t gfpmask )
{
struct inode * inode ;
struct address_space * mapping ;
struct page * p , * * pages ;
int i , npages ;
/* This is the shared memory object that backs the GEM resource */
inode = obj - > filp - > f_path . dentry - > d_inode ;
mapping = inode - > i_mapping ;
npages = obj - > size > > PAGE_SHIFT ;
pages = drm_malloc_ab ( npages , sizeof ( struct page * ) ) ;
if ( pages = = NULL )
return ERR_PTR ( - ENOMEM ) ;
gfpmask | = mapping_gfp_mask ( mapping ) ;
for ( i = 0 ; i < npages ; i + + ) {
p = shmem_read_mapping_page_gfp ( mapping , i , gfpmask ) ;
if ( IS_ERR ( p ) )
goto fail ;
pages [ i ] = p ;
}
return pages ;
fail :
while ( i - - )
page_cache_release ( pages [ i ] ) ;
drm_free_large ( pages ) ;
return ERR_PTR ( PTR_ERR ( p ) ) ;
}
static void exynos_gem_put_pages ( struct drm_gem_object * obj ,
struct page * * pages ,
bool dirty , bool accessed )
{
int i , npages ;
npages = obj - > size > > PAGE_SHIFT ;
for ( i = 0 ; i < npages ; i + + ) {
if ( dirty )
set_page_dirty ( pages [ i ] ) ;
if ( accessed )
mark_page_accessed ( pages [ i ] ) ;
/* Undo the reference we took when populating the table */
page_cache_release ( pages [ i ] ) ;
}
drm_free_large ( pages ) ;
}
static int exynos_drm_gem_map_pages ( struct drm_gem_object * obj ,
struct vm_area_struct * vma ,
unsigned long f_vaddr ,
pgoff_t page_offset )
{
struct exynos_drm_gem_obj * exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
struct exynos_drm_gem_buf * buf = exynos_gem_obj - > buffer ;
unsigned long pfn ;
if ( exynos_gem_obj - > flags & EXYNOS_BO_NONCONTIG ) {
if ( ! buf - > pages )
return - EINTR ;
2012-04-23 14:41:14 +04:00
pfn = page_to_pfn ( buf - > pages [ page_offset + + ] ) ;
} else
pfn = ( buf - > dma_addr > > PAGE_SHIFT ) + page_offset ;
2012-03-16 13:47:05 +04:00
return vm_insert_mixed ( vma , f_vaddr , pfn ) ;
}
static int exynos_drm_gem_get_pages ( struct drm_gem_object * obj )
{
struct exynos_drm_gem_obj * exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
struct exynos_drm_gem_buf * buf = exynos_gem_obj - > buffer ;
struct scatterlist * sgl ;
struct page * * pages ;
unsigned int npages , i = 0 ;
int ret ;
if ( buf - > pages ) {
DRM_DEBUG_KMS ( " already allocated. \n " ) ;
return - EINVAL ;
}
pages = exynos_gem_get_pages ( obj , GFP_KERNEL ) ;
if ( IS_ERR ( pages ) ) {
DRM_ERROR ( " failed to get pages. \n " ) ;
return PTR_ERR ( pages ) ;
}
npages = obj - > size > > PAGE_SHIFT ;
2012-04-23 16:01:28 +04:00
buf - > page_size = PAGE_SIZE ;
2012-03-16 13:47:05 +04:00
buf - > sgt = kzalloc ( sizeof ( struct sg_table ) , GFP_KERNEL ) ;
if ( ! buf - > sgt ) {
DRM_ERROR ( " failed to allocate sg table. \n " ) ;
ret = - ENOMEM ;
goto err ;
}
ret = sg_alloc_table ( buf - > sgt , npages , GFP_KERNEL ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to initialize sg table. \n " ) ;
ret = - EFAULT ;
goto err1 ;
}
sgl = buf - > sgt - > sgl ;
/* set all pages to sg list. */
while ( i < npages ) {
sg_set_page ( sgl , pages [ i ] , PAGE_SIZE , 0 ) ;
sg_dma_address ( sgl ) = page_to_phys ( pages [ i ] ) ;
i + + ;
sgl = sg_next ( sgl ) ;
}
/* add some codes for UNCACHED type here. TODO */
buf - > pages = pages ;
return ret ;
err1 :
kfree ( buf - > sgt ) ;
buf - > sgt = NULL ;
err :
exynos_gem_put_pages ( obj , pages , true , false ) ;
return ret ;
}
static void exynos_drm_gem_put_pages ( struct drm_gem_object * obj )
{
struct exynos_drm_gem_obj * exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
struct exynos_drm_gem_buf * buf = exynos_gem_obj - > buffer ;
/*
* if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
* allocated at gem fault handler .
*/
sg_free_table ( buf - > sgt ) ;
kfree ( buf - > sgt ) ;
buf - > sgt = NULL ;
exynos_gem_put_pages ( obj , buf - > pages , true , false ) ;
buf - > pages = NULL ;
/* add some codes for UNCACHED type here. TODO */
}
2011-12-13 09:39:13 +04:00
static int exynos_drm_gem_handle_create ( struct drm_gem_object * obj ,
struct drm_file * file_priv ,
unsigned int * handle )
2011-10-04 14:19:01 +04:00
{
int ret ;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , obj , handle ) ;
if ( ret )
2011-12-13 09:39:13 +04:00
return ret ;
2011-10-04 14:19:01 +04:00
DRM_DEBUG_KMS ( " gem handle = 0x%x \n " , * handle ) ;
/* drop reference from allocate - handle holds it now. */
drm_gem_object_unreference_unlocked ( obj ) ;
2011-12-13 09:39:13 +04:00
return 0 ;
}
void exynos_drm_gem_destroy ( struct exynos_drm_gem_obj * exynos_gem_obj )
{
struct drm_gem_object * obj ;
2012-04-23 14:26:34 +04:00
struct exynos_drm_gem_buf * buf ;
2011-12-13 09:39:13 +04:00
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
obj = & exynos_gem_obj - > base ;
2012-04-23 14:26:34 +04:00
buf = exynos_gem_obj - > buffer ;
2011-12-13 09:39:13 +04:00
DRM_DEBUG_KMS ( " handle count = %d \n " , atomic_read ( & obj - > handle_count ) ) ;
2011-10-04 14:19:01 +04:00
2012-04-23 14:26:34 +04:00
if ( ! buf - > pages )
return ;
if ( exynos_gem_obj - > flags & EXYNOS_BO_NONCONTIG )
2012-03-16 13:47:05 +04:00
exynos_drm_gem_put_pages ( obj ) ;
else
2012-04-23 14:26:34 +04:00
exynos_drm_free_buf ( obj - > dev , exynos_gem_obj - > flags , buf ) ;
2012-03-16 13:47:05 +04:00
2012-04-23 14:26:34 +04:00
exynos_drm_fini_buf ( obj - > dev , buf ) ;
2012-03-16 13:47:05 +04:00
exynos_gem_obj - > buffer = NULL ;
2011-12-13 09:39:13 +04:00
if ( obj - > map_list . map )
drm_gem_free_mmap_offset ( obj ) ;
/* release file pointer to gem object. */
2011-10-04 14:19:01 +04:00
drm_gem_object_release ( obj ) ;
kfree ( exynos_gem_obj ) ;
2012-03-16 13:47:05 +04:00
exynos_gem_obj = NULL ;
2011-12-13 09:39:13 +04:00
}
2012-04-23 16:01:28 +04:00
struct exynos_drm_gem_obj * exynos_drm_gem_init ( struct drm_device * dev ,
2011-12-13 09:39:13 +04:00
unsigned long size )
{
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct drm_gem_object * obj ;
int ret ;
exynos_gem_obj = kzalloc ( sizeof ( * exynos_gem_obj ) , GFP_KERNEL ) ;
if ( ! exynos_gem_obj ) {
DRM_ERROR ( " failed to allocate exynos gem object \n " ) ;
return NULL ;
}
2012-03-16 13:47:05 +04:00
exynos_gem_obj - > size = size ;
2011-12-13 09:39:13 +04:00
obj = & exynos_gem_obj - > base ;
ret = drm_gem_object_init ( dev , obj , size ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to initialize gem object \n " ) ;
kfree ( exynos_gem_obj ) ;
return NULL ;
}
DRM_DEBUG_KMS ( " created file object = 0x%x \n " , ( unsigned int ) obj - > filp ) ;
return exynos_gem_obj ;
2011-10-04 14:19:01 +04:00
}
2011-11-12 09:51:23 +04:00
struct exynos_drm_gem_obj * exynos_drm_gem_create ( struct drm_device * dev ,
2012-03-16 13:47:05 +04:00
unsigned int flags ,
unsigned long size )
2011-11-12 09:51:23 +04:00
{
2011-12-13 09:39:13 +04:00
struct exynos_drm_gem_obj * exynos_gem_obj ;
2012-03-16 13:47:05 +04:00
struct exynos_drm_gem_buf * buf ;
int ret ;
2011-11-12 09:51:23 +04:00
2012-04-03 16:27:58 +04:00
if ( ! size ) {
DRM_ERROR ( " invalid size. \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
size = roundup_gem_size ( size , flags ) ;
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
2011-11-12 09:51:23 +04:00
2012-04-03 16:27:58 +04:00
ret = check_gem_flags ( flags ) ;
if ( ret )
return ERR_PTR ( ret ) ;
2012-03-16 13:47:05 +04:00
buf = exynos_drm_init_buf ( dev , size ) ;
if ( ! buf )
2011-12-13 09:20:23 +04:00
return ERR_PTR ( - ENOMEM ) ;
2011-11-12 09:51:23 +04:00
2011-12-13 09:39:13 +04:00
exynos_gem_obj = exynos_drm_gem_init ( dev , size ) ;
if ( ! exynos_gem_obj ) {
2012-03-16 13:47:05 +04:00
ret = - ENOMEM ;
2012-04-03 16:27:58 +04:00
goto err_fini_buf ;
2011-11-12 09:51:23 +04:00
}
2012-03-16 13:47:05 +04:00
exynos_gem_obj - > buffer = buf ;
/* set memory type and cache attribute from user side. */
exynos_gem_obj - > flags = flags ;
/*
* allocate all pages as desired size if user wants to allocate
* physically non - continuous memory .
*/
if ( flags & EXYNOS_BO_NONCONTIG ) {
ret = exynos_drm_gem_get_pages ( & exynos_gem_obj - > base ) ;
if ( ret < 0 ) {
drm_gem_object_release ( & exynos_gem_obj - > base ) ;
2012-04-03 16:27:58 +04:00
goto err_fini_buf ;
2012-03-16 13:47:05 +04:00
}
} else {
ret = exynos_drm_alloc_buf ( dev , buf , flags ) ;
if ( ret < 0 ) {
drm_gem_object_release ( & exynos_gem_obj - > base ) ;
2012-04-03 16:27:58 +04:00
goto err_fini_buf ;
2012-03-16 13:47:05 +04:00
}
}
2011-11-12 09:51:23 +04:00
return exynos_gem_obj ;
2012-04-03 16:27:58 +04:00
err_fini_buf :
2012-03-16 13:47:05 +04:00
exynos_drm_fini_buf ( dev , buf ) ;
return ERR_PTR ( ret ) ;
2011-11-12 09:51:23 +04:00
}
2011-10-04 14:19:01 +04:00
int exynos_drm_gem_create_ioctl ( struct drm_device * dev , void * data ,
2011-12-13 09:20:23 +04:00
struct drm_file * file_priv )
2011-10-04 14:19:01 +04:00
{
struct drm_exynos_gem_create * args = data ;
2011-12-13 09:20:23 +04:00
struct exynos_drm_gem_obj * exynos_gem_obj ;
2011-12-13 09:39:13 +04:00
int ret ;
2011-10-04 14:19:01 +04:00
2011-11-12 09:51:23 +04:00
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
2011-10-04 14:19:01 +04:00
2012-03-16 13:47:05 +04:00
exynos_gem_obj = exynos_drm_gem_create ( dev , args - > flags , args - > size ) ;
2011-10-04 14:19:01 +04:00
if ( IS_ERR ( exynos_gem_obj ) )
return PTR_ERR ( exynos_gem_obj ) ;
2011-12-13 09:39:13 +04:00
ret = exynos_drm_gem_handle_create ( & exynos_gem_obj - > base , file_priv ,
& args - > handle ) ;
if ( ret ) {
exynos_drm_gem_destroy ( exynos_gem_obj ) ;
return ret ;
}
2011-10-04 14:19:01 +04:00
return 0 ;
}
2012-03-16 13:47:06 +04:00
void * exynos_drm_gem_get_dma_addr ( struct drm_device * dev ,
unsigned int gem_handle ,
struct drm_file * file_priv )
{
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct drm_gem_object * obj ;
obj = drm_gem_object_lookup ( dev , file_priv , gem_handle ) ;
if ( ! obj ) {
DRM_ERROR ( " failed to lookup gem object. \n " ) ;
return ERR_PTR ( - EINVAL ) ;
}
exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
if ( exynos_gem_obj - > flags & EXYNOS_BO_NONCONTIG ) {
DRM_DEBUG_KMS ( " not support NONCONTIG type. \n " ) ;
drm_gem_object_unreference_unlocked ( obj ) ;
/* TODO */
return ERR_PTR ( - EINVAL ) ;
}
return & exynos_gem_obj - > buffer - > dma_addr ;
}
void exynos_drm_gem_put_dma_addr ( struct drm_device * dev ,
unsigned int gem_handle ,
struct drm_file * file_priv )
{
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct drm_gem_object * obj ;
obj = drm_gem_object_lookup ( dev , file_priv , gem_handle ) ;
if ( ! obj ) {
DRM_ERROR ( " failed to lookup gem object. \n " ) ;
return ;
}
exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
if ( exynos_gem_obj - > flags & EXYNOS_BO_NONCONTIG ) {
DRM_DEBUG_KMS ( " not support NONCONTIG type. \n " ) ;
drm_gem_object_unreference_unlocked ( obj ) ;
/* TODO */
return ;
}
drm_gem_object_unreference_unlocked ( obj ) ;
/*
* decrease obj - > refcount one more time because we has already
* increased it at exynos_drm_gem_get_dma_addr ( ) .
*/
drm_gem_object_unreference_unlocked ( obj ) ;
}
2011-10-04 14:19:01 +04:00
int exynos_drm_gem_map_offset_ioctl ( struct drm_device * dev , void * data ,
2011-12-13 09:20:23 +04:00
struct drm_file * file_priv )
2011-10-04 14:19:01 +04:00
{
struct drm_exynos_gem_map_off * args = data ;
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
DRM_DEBUG_KMS ( " handle = 0x%x, offset = 0x%lx \n " ,
args - > handle , ( unsigned long ) args - > offset ) ;
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) ) {
DRM_ERROR ( " does not support GEM. \n " ) ;
return - ENODEV ;
}
return exynos_drm_gem_dumb_map_offset ( file_priv , dev , args - > handle ,
& args - > offset ) ;
}
static int exynos_drm_gem_mmap_buffer ( struct file * filp ,
2011-12-13 09:20:23 +04:00
struct vm_area_struct * vma )
2011-10-04 14:19:01 +04:00
{
struct drm_gem_object * obj = filp - > private_data ;
struct exynos_drm_gem_obj * exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
2011-11-12 10:23:32 +04:00
struct exynos_drm_gem_buf * buffer ;
2012-03-16 13:47:05 +04:00
unsigned long pfn , vm_size , usize , uaddr = vma - > vm_start ;
int ret ;
2011-10-04 14:19:01 +04:00
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
vma - > vm_flags | = ( VM_IO | VM_RESERVED ) ;
2012-04-23 14:26:34 +04:00
update_vm_cache_attr ( exynos_gem_obj , vma ) ;
2011-10-04 14:19:01 +04:00
2012-03-16 13:47:05 +04:00
vm_size = usize = vma - > vm_end - vma - > vm_start ;
2011-10-04 14:19:01 +04:00
/*
2011-11-12 10:23:32 +04:00
* a buffer contains information to physically continuous memory
2011-10-04 14:19:01 +04:00
* allocated by user request or at framebuffer creation .
*/
2011-11-12 10:23:32 +04:00
buffer = exynos_gem_obj - > buffer ;
2011-10-04 14:19:01 +04:00
/* check if user-requested size is valid. */
2011-11-12 10:23:32 +04:00
if ( vm_size > buffer - > size )
2011-10-04 14:19:01 +04:00
return - EINVAL ;
2012-03-16 13:47:05 +04:00
if ( exynos_gem_obj - > flags & EXYNOS_BO_NONCONTIG ) {
int i = 0 ;
if ( ! buffer - > pages )
return - EINVAL ;
2012-04-23 14:47:18 +04:00
vma - > vm_flags | = VM_MIXEDMAP ;
2012-03-16 13:47:05 +04:00
do {
ret = vm_insert_page ( vma , uaddr , buffer - > pages [ i + + ] ) ;
if ( ret ) {
DRM_ERROR ( " failed to remap user space. \n " ) ;
return ret ;
}
uaddr + = PAGE_SIZE ;
usize - = PAGE_SIZE ;
} while ( usize > 0 ) ;
} else {
/*
* get page frame number to physical memory to be mapped
* to user space .
*/
pfn = ( ( unsigned long ) exynos_gem_obj - > buffer - > dma_addr ) > >
PAGE_SHIFT ;
DRM_DEBUG_KMS ( " pfn = 0x%lx \n " , pfn ) ;
if ( remap_pfn_range ( vma , vma - > vm_start , pfn , vm_size ,
vma - > vm_page_prot ) ) {
DRM_ERROR ( " failed to remap pfn range. \n " ) ;
return - EAGAIN ;
}
2011-10-04 14:19:01 +04:00
}
return 0 ;
}
static const struct file_operations exynos_drm_gem_fops = {
. mmap = exynos_drm_gem_mmap_buffer ,
} ;
int exynos_drm_gem_mmap_ioctl ( struct drm_device * dev , void * data ,
2011-12-13 09:20:23 +04:00
struct drm_file * file_priv )
2011-10-04 14:19:01 +04:00
{
struct drm_exynos_gem_mmap * args = data ;
struct drm_gem_object * obj ;
unsigned int addr ;
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
if ( ! ( dev - > driver - > driver_features & DRIVER_GEM ) ) {
DRM_ERROR ( " does not support GEM. \n " ) ;
return - ENODEV ;
}
obj = drm_gem_object_lookup ( dev , file_priv , args - > handle ) ;
if ( ! obj ) {
DRM_ERROR ( " failed to lookup gem object. \n " ) ;
return - EINVAL ;
}
obj - > filp - > f_op = & exynos_drm_gem_fops ;
obj - > filp - > private_data = obj ;
2012-04-21 04:13:58 +04:00
addr = vm_mmap ( obj - > filp , 0 , args - > size ,
2011-10-04 14:19:01 +04:00
PROT_READ | PROT_WRITE , MAP_SHARED , 0 ) ;
drm_gem_object_unreference_unlocked ( obj ) ;
if ( IS_ERR ( ( void * ) addr ) )
return PTR_ERR ( ( void * ) addr ) ;
args - > mapped = addr ;
DRM_DEBUG_KMS ( " mapped = 0x%lx \n " , ( unsigned long ) args - > mapped ) ;
return 0 ;
}
int exynos_drm_gem_init_object ( struct drm_gem_object * obj )
{
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
return 0 ;
}
2011-12-13 09:20:23 +04:00
void exynos_drm_gem_free_object ( struct drm_gem_object * obj )
2011-10-04 14:19:01 +04:00
{
2012-04-23 16:01:28 +04:00
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct exynos_drm_gem_buf * buf ;
2011-10-04 14:19:01 +04:00
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
2012-04-23 16:01:28 +04:00
exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
buf = exynos_gem_obj - > buffer ;
if ( obj - > import_attach )
drm_prime_gem_destroy ( obj , buf - > sgt ) ;
2011-12-13 09:39:13 +04:00
exynos_drm_gem_destroy ( to_exynos_gem_obj ( obj ) ) ;
2011-10-04 14:19:01 +04:00
}
int exynos_drm_gem_dumb_create ( struct drm_file * file_priv ,
2011-12-13 09:20:23 +04:00
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
2011-10-04 14:19:01 +04:00
{
struct exynos_drm_gem_obj * exynos_gem_obj ;
2011-12-13 09:39:13 +04:00
int ret ;
2011-10-04 14:19:01 +04:00
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
/*
* alocate memory to be used for framebuffer .
* - this callback would be called by user application
* with DRM_IOCTL_MODE_CREATE_DUMB command .
*/
args - > pitch = args - > width * args - > bpp > > 3 ;
2012-03-16 13:47:05 +04:00
args - > size = PAGE_ALIGN ( args - > pitch * args - > height ) ;
2011-10-04 14:19:01 +04:00
2012-03-16 13:47:05 +04:00
exynos_gem_obj = exynos_drm_gem_create ( dev , args - > flags , args - > size ) ;
2011-10-04 14:19:01 +04:00
if ( IS_ERR ( exynos_gem_obj ) )
return PTR_ERR ( exynos_gem_obj ) ;
2011-12-13 09:39:13 +04:00
ret = exynos_drm_gem_handle_create ( & exynos_gem_obj - > base , file_priv ,
& args - > handle ) ;
if ( ret ) {
exynos_drm_gem_destroy ( exynos_gem_obj ) ;
return ret ;
}
2011-10-04 14:19:01 +04:00
return 0 ;
}
int exynos_drm_gem_dumb_map_offset ( struct drm_file * file_priv ,
2011-12-13 09:20:23 +04:00
struct drm_device * dev , uint32_t handle ,
uint64_t * offset )
2011-10-04 14:19:01 +04:00
{
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct drm_gem_object * obj ;
2011-12-13 09:32:24 +04:00
int ret = 0 ;
2011-10-04 14:19:01 +04:00
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
mutex_lock ( & dev - > struct_mutex ) ;
/*
* get offset of memory allocated for drm framebuffer .
* - this callback would be called by user application
* with DRM_IOCTL_MODE_MAP_DUMB command .
*/
obj = drm_gem_object_lookup ( dev , file_priv , handle ) ;
if ( ! obj ) {
DRM_ERROR ( " failed to lookup gem object. \n " ) ;
2011-12-13 09:32:24 +04:00
ret = - EINVAL ;
goto unlock ;
2011-10-04 14:19:01 +04:00
}
exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
2011-12-13 09:32:24 +04:00
if ( ! exynos_gem_obj - > base . map_list . map ) {
ret = drm_gem_create_mmap_offset ( & exynos_gem_obj - > base ) ;
if ( ret )
goto out ;
}
2011-10-04 14:19:01 +04:00
2011-12-13 09:32:24 +04:00
* offset = ( u64 ) exynos_gem_obj - > base . map_list . hash . key < < PAGE_SHIFT ;
2011-10-04 14:19:01 +04:00
DRM_DEBUG_KMS ( " offset = 0x%lx \n " , ( unsigned long ) * offset ) ;
2011-12-13 09:32:24 +04:00
out :
drm_gem_object_unreference ( obj ) ;
unlock :
2011-10-04 14:19:01 +04:00
mutex_unlock ( & dev - > struct_mutex ) ;
2011-12-13 09:32:24 +04:00
return ret ;
2011-10-04 14:19:01 +04:00
}
2011-12-13 09:20:23 +04:00
int exynos_drm_gem_dumb_destroy ( struct drm_file * file_priv ,
struct drm_device * dev ,
unsigned int handle )
{
int ret ;
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
/*
* obj - > refcount and obj - > handle_count are decreased and
* if both them are 0 then exynos_drm_gem_free_object ( )
* would be called by callback to release resources .
*/
ret = drm_gem_handle_delete ( file_priv , handle ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to delete drm_gem_handle. \n " ) ;
return ret ;
}
return 0 ;
}
2011-10-04 14:19:01 +04:00
int exynos_drm_gem_fault ( struct vm_area_struct * vma , struct vm_fault * vmf )
{
struct drm_gem_object * obj = vma - > vm_private_data ;
struct drm_device * dev = obj - > dev ;
2012-03-16 13:47:05 +04:00
unsigned long f_vaddr ;
2011-10-04 14:19:01 +04:00
pgoff_t page_offset ;
int ret ;
page_offset = ( ( unsigned long ) vmf - > virtual_address -
vma - > vm_start ) > > PAGE_SHIFT ;
2012-03-16 13:47:05 +04:00
f_vaddr = ( unsigned long ) vmf - > virtual_address ;
2011-10-04 14:19:01 +04:00
mutex_lock ( & dev - > struct_mutex ) ;
2012-03-16 13:47:05 +04:00
ret = exynos_drm_gem_map_pages ( obj , vma , f_vaddr , page_offset ) ;
if ( ret < 0 )
DRM_ERROR ( " failed to map pages. \n " ) ;
2011-10-04 14:19:01 +04:00
mutex_unlock ( & dev - > struct_mutex ) ;
return convert_to_vm_err_msg ( ret ) ;
}
int exynos_drm_gem_mmap ( struct file * filp , struct vm_area_struct * vma )
{
2012-04-23 14:26:34 +04:00
struct exynos_drm_gem_obj * exynos_gem_obj ;
struct drm_gem_object * obj ;
2011-10-04 14:19:01 +04:00
int ret ;
DRM_DEBUG_KMS ( " %s \n " , __FILE__ ) ;
/* set vm_area_struct. */
ret = drm_gem_mmap ( filp , vma ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to mmap. \n " ) ;
return ret ;
}
2012-04-23 14:26:34 +04:00
obj = vma - > vm_private_data ;
exynos_gem_obj = to_exynos_gem_obj ( obj ) ;
ret = check_gem_flags ( exynos_gem_obj - > flags ) ;
if ( ret ) {
drm_gem_vm_close ( vma ) ;
drm_gem_free_mmap_offset ( obj ) ;
return ret ;
}
2011-10-04 14:19:01 +04:00
vma - > vm_flags & = ~ VM_PFNMAP ;
vma - > vm_flags | = VM_MIXEDMAP ;
2012-04-23 14:26:34 +04:00
update_vm_cache_attr ( exynos_gem_obj , vma ) ;
2011-10-04 14:19:01 +04:00
return ret ;
}