2019-05-27 08:55:21 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2016-01-04 18:36:34 +01:00
/*
* Copyright ( c ) 2015 MediaTek Inc .
*/
# include <linux/dma-buf.h>
2019-07-18 18:15:06 +02:00
# include <drm/drm.h>
2019-07-16 08:42:20 +02:00
# include <drm/drm_device.h>
# include <drm/drm_gem.h>
# include <drm/drm_prime.h>
2016-01-04 18:36:34 +01:00
# include "mtk_drm_drv.h"
# include "mtk_drm_gem.h"
static struct mtk_drm_gem_obj * mtk_drm_gem_init ( struct drm_device * dev ,
unsigned long size )
{
struct mtk_drm_gem_obj * mtk_gem_obj ;
int ret ;
size = round_up ( size , PAGE_SIZE ) ;
mtk_gem_obj = kzalloc ( sizeof ( * mtk_gem_obj ) , GFP_KERNEL ) ;
if ( ! mtk_gem_obj )
return ERR_PTR ( - ENOMEM ) ;
ret = drm_gem_object_init ( dev , & mtk_gem_obj - > base , size ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to initialize gem object \n " ) ;
kfree ( mtk_gem_obj ) ;
return ERR_PTR ( ret ) ;
}
return mtk_gem_obj ;
}
struct mtk_drm_gem_obj * mtk_drm_gem_create ( struct drm_device * dev ,
size_t size , bool alloc_kmap )
{
struct mtk_drm_private * priv = dev - > dev_private ;
struct mtk_drm_gem_obj * mtk_gem ;
struct drm_gem_object * obj ;
int ret ;
mtk_gem = mtk_drm_gem_init ( dev , size ) ;
if ( IS_ERR ( mtk_gem ) )
return ERR_CAST ( mtk_gem ) ;
obj = & mtk_gem - > base ;
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs = DMA_ATTR_WRITE_COMBINE ;
2016-01-04 18:36:34 +01:00
if ( ! alloc_kmap )
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs | = DMA_ATTR_NO_KERNEL_MAPPING ;
2016-01-04 18:36:34 +01:00
mtk_gem - > cookie = dma_alloc_attrs ( priv - > dma_dev , obj - > size ,
& mtk_gem - > dma_addr , GFP_KERNEL ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
if ( ! mtk_gem - > cookie ) {
DRM_ERROR ( " failed to allocate %zx byte dma buffer " , obj - > size ) ;
ret = - ENOMEM ;
goto err_gem_free ;
}
if ( alloc_kmap )
mtk_gem - > kvaddr = mtk_gem - > cookie ;
DRM_DEBUG_DRIVER ( " cookie = %p dma_addr = %pad size = %zu \n " ,
mtk_gem - > cookie , & mtk_gem - > dma_addr ,
size ) ;
return mtk_gem ;
err_gem_free :
drm_gem_object_release ( obj ) ;
kfree ( mtk_gem ) ;
return ERR_PTR ( ret ) ;
}
void mtk_drm_gem_free_object ( struct drm_gem_object * obj )
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct mtk_drm_private * priv = obj - > dev - > dev_private ;
if ( mtk_gem - > sg )
drm_prime_gem_destroy ( obj , mtk_gem - > sg ) ;
else
dma_free_attrs ( priv - > dma_dev , obj - > size , mtk_gem - > cookie ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_addr , mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
/* release file pointer to gem object. */
drm_gem_object_release ( obj ) ;
kfree ( mtk_gem ) ;
}
int mtk_drm_gem_dumb_create ( struct drm_file * file_priv , struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
struct mtk_drm_gem_obj * mtk_gem ;
int ret ;
args - > pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
args - > size = args - > pitch * args - > height ;
mtk_gem = mtk_drm_gem_create ( dev , args - > size , false ) ;
if ( IS_ERR ( mtk_gem ) )
return PTR_ERR ( mtk_gem ) ;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , & mtk_gem - > base , & args - > handle ) ;
if ( ret )
goto err_handle_create ;
/* drop reference from allocate - handle holds it now. */
2017-08-11 15:32:59 +03:00
drm_gem_object_put_unlocked ( & mtk_gem - > base ) ;
2016-01-04 18:36:34 +01:00
return 0 ;
err_handle_create :
mtk_drm_gem_free_object ( & mtk_gem - > base ) ;
return ret ;
}
static int mtk_drm_gem_object_mmap ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
{
int ret ;
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct mtk_drm_private * priv = obj - > dev - > dev_private ;
/*
* dma_alloc_attrs ( ) allocated a struct page table for mtk_gem , so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj ( ) / drm_gem_mmap ( ) .
*/
vma - > vm_flags & = ~ VM_PFNMAP ;
ret = dma_mmap_attrs ( priv - > dma_dev , vma , mtk_gem - > cookie ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_addr , obj - > size , mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
if ( ret )
drm_gem_vm_close ( vma ) ;
return ret ;
}
int mtk_drm_gem_mmap_buf ( struct drm_gem_object * obj , struct vm_area_struct * vma )
{
int ret ;
ret = drm_gem_mmap_obj ( obj , obj - > size , vma ) ;
if ( ret )
return ret ;
return mtk_drm_gem_object_mmap ( obj , vma ) ;
}
int mtk_drm_gem_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct drm_gem_object * obj ;
int ret ;
ret = drm_gem_mmap ( filp , vma ) ;
if ( ret )
return ret ;
obj = vma - > vm_private_data ;
2019-03-27 14:19:20 +08:00
/*
* Set vm_pgoff ( used as a fake buffer offset by DRM ) to 0 and map the
* whole buffer from the start .
*/
vma - > vm_pgoff = 0 ;
2016-01-04 18:36:34 +01:00
return mtk_drm_gem_object_mmap ( obj , vma ) ;
}
/*
* Allocate a sg_table for this GEM object .
* Note : Both the table ' s contents , and the sg_table itself must be freed by
* the caller .
* Returns a pointer to the newly allocated sg_table , or an ERR_PTR ( ) error .
*/
struct sg_table * mtk_gem_prime_get_sg_table ( struct drm_gem_object * obj )
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct mtk_drm_private * priv = obj - > dev - > dev_private ;
struct sg_table * sgt ;
int ret ;
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt )
return ERR_PTR ( - ENOMEM ) ;
ret = dma_get_sgtable_attrs ( priv - > dma_dev , sgt , mtk_gem - > cookie ,
mtk_gem - > dma_addr , obj - > size ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
if ( ret ) {
DRM_ERROR ( " failed to allocate sgt, %d \n " , ret ) ;
kfree ( sgt ) ;
return ERR_PTR ( ret ) ;
}
return sgt ;
}
struct drm_gem_object * mtk_gem_prime_import_sg_table ( struct drm_device * dev ,
struct dma_buf_attachment * attach , struct sg_table * sg )
{
struct mtk_drm_gem_obj * mtk_gem ;
int ret ;
struct scatterlist * s ;
unsigned int i ;
dma_addr_t expected ;
mtk_gem = mtk_drm_gem_init ( dev , attach - > dmabuf - > size ) ;
if ( IS_ERR ( mtk_gem ) )
2017-11-21 23:31:33 +01:00
return ERR_CAST ( mtk_gem ) ;
2016-01-04 18:36:34 +01:00
expected = sg_dma_address ( sg - > sgl ) ;
for_each_sg ( sg - > sgl , s , sg - > nents , i ) {
if ( sg_dma_address ( s ) ! = expected ) {
DRM_ERROR ( " sg_table is not contiguous " ) ;
ret = - EINVAL ;
goto err_gem_free ;
}
expected = sg_dma_address ( s ) + sg_dma_len ( s ) ;
}
mtk_gem - > dma_addr = sg_dma_address ( sg - > sgl ) ;
mtk_gem - > sg = sg ;
return & mtk_gem - > base ;
err_gem_free :
kfree ( mtk_gem ) ;
return ERR_PTR ( ret ) ;
}
2019-01-14 17:36:48 +08:00
void * mtk_drm_gem_prime_vmap ( struct drm_gem_object * obj )
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct sg_table * sgt ;
struct sg_page_iter iter ;
unsigned int npages ;
unsigned int i = 0 ;
if ( mtk_gem - > kvaddr )
return mtk_gem - > kvaddr ;
sgt = mtk_gem_prime_get_sg_table ( obj ) ;
if ( IS_ERR ( sgt ) )
return NULL ;
npages = obj - > size > > PAGE_SHIFT ;
mtk_gem - > pages = kcalloc ( npages , sizeof ( * mtk_gem - > pages ) , GFP_KERNEL ) ;
if ( ! mtk_gem - > pages )
goto out ;
for_each_sg_page ( sgt - > sgl , & iter , sgt - > orig_nents , 0 ) {
mtk_gem - > pages [ i + + ] = sg_page_iter_page ( & iter ) ;
if ( i > npages )
break ;
}
mtk_gem - > kvaddr = vmap ( mtk_gem - > pages , npages , VM_MAP ,
pgprot_writecombine ( PAGE_KERNEL ) ) ;
out :
2019-10-23 14:11:07 +03:00
kfree ( sgt ) ;
2019-01-14 17:36:48 +08:00
return mtk_gem - > kvaddr ;
}
void mtk_drm_gem_prime_vunmap ( struct drm_gem_object * obj , void * vaddr )
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
if ( ! mtk_gem - > pages )
return ;
vunmap ( vaddr ) ;
mtk_gem - > kvaddr = 0 ;
2019-10-23 14:11:07 +03:00
kfree ( mtk_gem - > pages ) ;
2019-01-14 17:36:48 +08:00
}