2011-10-04 19:19:01 +09:00
/* exynos_drm_buf.c
*
* Copyright ( c ) 2011 Samsung Electronics Co . , Ltd .
* Author : Inki Dae < inki . dae @ samsung . com >
*
2012-12-18 02:30:17 +09:00
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation ; either version 2 of the License , or ( at your
* option ) any later version .
2011-10-04 19:19:01 +09:00
*/
2012-10-02 18:01:07 +01:00
# include <drm/drmP.h>
# include <drm/exynos_drm.h>
2011-10-04 19:19:01 +09:00
# include "exynos_drm_drv.h"
2011-11-12 15:23:32 +09:00
# include "exynos_drm_gem.h"
2011-10-04 19:19:01 +09:00
# include "exynos_drm_buf.h"
2012-12-26 18:06:01 +09:00
# include "exynos_drm_iommu.h"
2011-10-04 19:19:01 +09:00
static int lowlevel_buffer_allocate ( struct drm_device * dev ,
2012-03-16 18:47:05 +09:00
unsigned int flags , struct exynos_drm_gem_buf * buf )
2011-10-04 19:19:01 +09:00
{
2012-10-20 07:53:42 -07:00
int ret = 0 ;
2012-12-14 14:34:31 +09:00
enum dma_attr attr ;
2012-12-07 17:51:27 +09:00
unsigned int nr_pages ;
2012-03-16 18:47:05 +09:00
if ( buf - > dma_addr ) {
DRM_DEBUG_KMS ( " already allocated. \n " ) ;
return 0 ;
}
2012-10-20 07:53:42 -07:00
init_dma_attrs ( & buf - > dma_attrs ) ;
2012-12-14 14:34:31 +09:00
/*
* if EXYNOS_BO_CONTIG , fully physically contiguous memory
* region will be allocated else physically contiguous
* as possible .
*/
2012-12-27 19:54:23 +09:00
if ( ! ( flags & EXYNOS_BO_NONCONTIG ) )
2012-12-14 14:34:31 +09:00
dma_set_attr ( DMA_ATTR_FORCE_CONTIGUOUS , & buf - > dma_attrs ) ;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE , writecombine mapping
* else cachable mapping .
*/
if ( flags & EXYNOS_BO_WC | | ! ( flags & EXYNOS_BO_CACHABLE ) )
2012-10-20 07:53:42 -07:00
attr = DMA_ATTR_WRITE_COMBINE ;
2012-12-14 14:34:31 +09:00
else
attr = DMA_ATTR_NON_CONSISTENT ;
2012-10-20 07:53:42 -07:00
dma_set_attr ( attr , & buf - > dma_attrs ) ;
2012-12-07 17:51:27 +09:00
dma_set_attr ( DMA_ATTR_NO_KERNEL_MAPPING , & buf - > dma_attrs ) ;
2012-10-20 07:53:42 -07:00
2012-12-26 18:06:01 +09:00
nr_pages = buf - > size > > PAGE_SHIFT ;
if ( ! is_drm_iommu_supported ( dev ) ) {
dma_addr_t start_addr ;
unsigned int i = 0 ;
2013-07-03 17:09:20 +09:00
buf - > pages = drm_calloc_large ( nr_pages , sizeof ( struct page * ) ) ;
2012-12-26 18:06:01 +09:00
if ( ! buf - > pages ) {
DRM_ERROR ( " failed to allocate pages. \n " ) ;
return - ENOMEM ;
}
buf - > kvaddr = dma_alloc_attrs ( dev - > dev , buf - > size ,
& buf - > dma_addr , GFP_KERNEL ,
& buf - > dma_attrs ) ;
if ( ! buf - > kvaddr ) {
DRM_ERROR ( " failed to allocate buffer. \n " ) ;
2013-07-03 17:09:21 +09:00
ret = - ENOMEM ;
goto err_free ;
2012-12-26 18:06:01 +09:00
}
start_addr = buf - > dma_addr ;
while ( i < nr_pages ) {
buf - > pages [ i ] = phys_to_page ( start_addr ) ;
start_addr + = PAGE_SIZE ;
i + + ;
}
} else {
buf - > pages = dma_alloc_attrs ( dev - > dev , buf - > size ,
& buf - > dma_addr , GFP_KERNEL ,
& buf - > dma_attrs ) ;
if ( ! buf - > pages ) {
DRM_ERROR ( " failed to allocate buffer. \n " ) ;
return - ENOMEM ;
}
2012-03-16 18:47:05 +09:00
}
2012-12-07 17:51:27 +09:00
buf - > sgt = drm_prime_pages_to_sg ( buf - > pages , nr_pages ) ;
2012-03-16 18:47:05 +09:00
if ( ! buf - > sgt ) {
2012-12-07 17:51:27 +09:00
DRM_ERROR ( " failed to get sg table. \n " ) ;
2012-10-20 07:53:42 -07:00
ret = - ENOMEM ;
goto err_free_attrs ;
2011-10-04 19:19:01 +09:00
}
2012-12-07 17:51:27 +09:00
DRM_DEBUG_KMS ( " dma_addr(0x%lx), size(0x%lx) \n " ,
2012-03-16 18:47:05 +09:00
( unsigned long ) buf - > dma_addr ,
buf - > size ) ;
return ret ;
2012-10-20 07:53:42 -07:00
err_free_attrs :
2012-12-07 17:51:27 +09:00
dma_free_attrs ( dev - > dev , buf - > size , buf - > pages ,
2012-10-20 07:53:42 -07:00
( dma_addr_t ) buf - > dma_addr , & buf - > dma_attrs ) ;
buf - > dma_addr = ( dma_addr_t ) NULL ;
2013-07-03 17:09:21 +09:00
err_free :
2012-12-26 18:06:01 +09:00
if ( ! is_drm_iommu_supported ( dev ) )
2013-07-03 17:09:19 +09:00
drm_free_large ( buf - > pages ) ;
2012-12-26 18:06:01 +09:00
2012-03-16 18:47:05 +09:00
return ret ;
2011-10-04 19:19:01 +09:00
}
static void lowlevel_buffer_deallocate ( struct drm_device * dev ,
2012-03-16 18:47:05 +09:00
unsigned int flags , struct exynos_drm_gem_buf * buf )
2011-10-04 19:19:01 +09:00
{
2012-03-16 18:47:05 +09:00
if ( ! buf - > dma_addr ) {
DRM_DEBUG_KMS ( " dma_addr is invalid. \n " ) ;
return ;
}
2012-12-07 17:51:27 +09:00
DRM_DEBUG_KMS ( " dma_addr(0x%lx), size(0x%lx) \n " ,
2012-03-16 18:47:05 +09:00
( unsigned long ) buf - > dma_addr ,
buf - > size ) ;
sg_free_table ( buf - > sgt ) ;
kfree ( buf - > sgt ) ;
buf - > sgt = NULL ;
2012-12-26 18:06:01 +09:00
if ( ! is_drm_iommu_supported ( dev ) ) {
dma_free_attrs ( dev - > dev , buf - > size , buf - > kvaddr ,
( dma_addr_t ) buf - > dma_addr , & buf - > dma_attrs ) ;
2013-07-03 17:09:19 +09:00
drm_free_large ( buf - > pages ) ;
2012-12-26 18:06:01 +09:00
} else
dma_free_attrs ( dev - > dev , buf - > size , buf - > pages ,
2012-10-20 07:53:42 -07:00
( dma_addr_t ) buf - > dma_addr , & buf - > dma_attrs ) ;
2012-12-26 18:06:01 +09:00
2012-03-16 18:47:05 +09:00
buf - > dma_addr = ( dma_addr_t ) NULL ;
2011-10-04 19:19:01 +09:00
}
2012-03-16 18:47:05 +09:00
struct exynos_drm_gem_buf * exynos_drm_init_buf ( struct drm_device * dev ,
unsigned int size )
2011-10-04 19:19:01 +09:00
{
2011-11-12 15:23:32 +09:00
struct exynos_drm_gem_buf * buffer ;
2011-10-04 19:19:01 +09:00
2011-11-12 15:23:32 +09:00
DRM_DEBUG_KMS ( " desired size = 0x%x \n " , size ) ;
2011-10-04 19:19:01 +09:00
2011-11-12 15:23:32 +09:00
buffer = kzalloc ( sizeof ( * buffer ) , GFP_KERNEL ) ;
2013-08-19 19:04:55 +09:00
if ( ! buffer )
2011-12-13 14:20:23 +09:00
return NULL ;
2011-10-04 19:19:01 +09:00
2011-11-12 15:23:32 +09:00
buffer - > size = size ;
return buffer ;
2011-10-04 19:19:01 +09:00
}
2012-03-16 18:47:05 +09:00
void exynos_drm_fini_buf ( struct drm_device * dev ,
struct exynos_drm_gem_buf * buffer )
2011-10-04 19:19:01 +09:00
{
2011-11-12 15:23:32 +09:00
kfree ( buffer ) ;
buffer = NULL ;
2011-10-04 19:19:01 +09:00
}
2012-03-16 18:47:05 +09:00
int exynos_drm_alloc_buf ( struct drm_device * dev ,
struct exynos_drm_gem_buf * buf , unsigned int flags )
{
/*
* allocate memory region and set the memory information
* to vaddr and dma_addr of a buffer object .
*/
if ( lowlevel_buffer_allocate ( dev , flags , buf ) < 0 )
return - ENOMEM ;
return 0 ;
}
void exynos_drm_free_buf ( struct drm_device * dev ,
unsigned int flags , struct exynos_drm_gem_buf * buffer )
{
lowlevel_buffer_deallocate ( dev , flags , buffer ) ;
}