2019-05-27 08:55:21 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2016-01-04 18:36:34 +01:00
/*
* Copyright ( c ) 2015 MediaTek Inc .
*/
# include <linux/dma-buf.h>
2019-07-18 18:15:06 +02:00
# include <drm/drm.h>
2019-07-16 08:42:20 +02:00
# include <drm/drm_device.h>
# include <drm/drm_gem.h>
2020-09-23 12:21:45 +02:00
# include <drm/drm_gem_cma_helper.h>
2019-07-16 08:42:20 +02:00
# include <drm/drm_prime.h>
2016-01-04 18:36:34 +01:00
# include "mtk_drm_drv.h"
# include "mtk_drm_gem.h"
2021-06-24 11:01:26 +02:00
static int mtk_drm_gem_object_mmap ( struct drm_gem_object * obj , struct vm_area_struct * vma ) ;
2020-09-23 12:21:45 +02:00
static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
. free = mtk_drm_gem_free_object ,
. get_sg_table = mtk_gem_prime_get_sg_table ,
. vmap = mtk_drm_gem_prime_vmap ,
. vunmap = mtk_drm_gem_prime_vunmap ,
2021-06-24 11:01:26 +02:00
. mmap = mtk_drm_gem_object_mmap ,
2020-09-23 12:21:45 +02:00
. vm_ops = & drm_gem_cma_vm_ops ,
} ;
2016-01-04 18:36:34 +01:00
static struct mtk_drm_gem_obj * mtk_drm_gem_init ( struct drm_device * dev ,
unsigned long size )
{
struct mtk_drm_gem_obj * mtk_gem_obj ;
int ret ;
size = round_up ( size , PAGE_SIZE ) ;
mtk_gem_obj = kzalloc ( sizeof ( * mtk_gem_obj ) , GFP_KERNEL ) ;
if ( ! mtk_gem_obj )
return ERR_PTR ( - ENOMEM ) ;
2020-09-23 12:21:45 +02:00
mtk_gem_obj - > base . funcs = & mtk_drm_gem_object_funcs ;
2016-01-04 18:36:34 +01:00
ret = drm_gem_object_init ( dev , & mtk_gem_obj - > base , size ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to initialize gem object \n " ) ;
kfree ( mtk_gem_obj ) ;
return ERR_PTR ( ret ) ;
}
return mtk_gem_obj ;
}
struct mtk_drm_gem_obj * mtk_drm_gem_create ( struct drm_device * dev ,
size_t size , bool alloc_kmap )
{
struct mtk_drm_private * priv = dev - > dev_private ;
struct mtk_drm_gem_obj * mtk_gem ;
struct drm_gem_object * obj ;
int ret ;
mtk_gem = mtk_drm_gem_init ( dev , size ) ;
if ( IS_ERR ( mtk_gem ) )
return ERR_CAST ( mtk_gem ) ;
obj = & mtk_gem - > base ;
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs = DMA_ATTR_WRITE_COMBINE ;
2016-01-04 18:36:34 +01:00
if ( ! alloc_kmap )
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs | = DMA_ATTR_NO_KERNEL_MAPPING ;
2016-01-04 18:36:34 +01:00
mtk_gem - > cookie = dma_alloc_attrs ( priv - > dma_dev , obj - > size ,
& mtk_gem - > dma_addr , GFP_KERNEL ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
if ( ! mtk_gem - > cookie ) {
DRM_ERROR ( " failed to allocate %zx byte dma buffer " , obj - > size ) ;
ret = - ENOMEM ;
goto err_gem_free ;
}
if ( alloc_kmap )
mtk_gem - > kvaddr = mtk_gem - > cookie ;
DRM_DEBUG_DRIVER ( " cookie = %p dma_addr = %pad size = %zu \n " ,
mtk_gem - > cookie , & mtk_gem - > dma_addr ,
size ) ;
return mtk_gem ;
err_gem_free :
drm_gem_object_release ( obj ) ;
kfree ( mtk_gem ) ;
return ERR_PTR ( ret ) ;
}
void mtk_drm_gem_free_object ( struct drm_gem_object * obj )
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct mtk_drm_private * priv = obj - > dev - > dev_private ;
if ( mtk_gem - > sg )
drm_prime_gem_destroy ( obj , mtk_gem - > sg ) ;
else
dma_free_attrs ( priv - > dma_dev , obj - > size , mtk_gem - > cookie ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_addr , mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
/* release file pointer to gem object. */
drm_gem_object_release ( obj ) ;
kfree ( mtk_gem ) ;
}
int mtk_drm_gem_dumb_create ( struct drm_file * file_priv , struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
struct mtk_drm_gem_obj * mtk_gem ;
int ret ;
args - > pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
args - > size = args - > pitch * args - > height ;
mtk_gem = mtk_drm_gem_create ( dev , args - > size , false ) ;
if ( IS_ERR ( mtk_gem ) )
return PTR_ERR ( mtk_gem ) ;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , & mtk_gem - > base , & args - > handle ) ;
if ( ret )
goto err_handle_create ;
/* drop reference from allocate - handle holds it now. */
2020-05-15 10:51:02 +01:00
drm_gem_object_put ( & mtk_gem - > base ) ;
2016-01-04 18:36:34 +01:00
return 0 ;
err_handle_create :
mtk_drm_gem_free_object ( & mtk_gem - > base ) ;
return ret ;
}
static int mtk_drm_gem_object_mmap ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
{
int ret ;
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct mtk_drm_private * priv = obj - > dev - > dev_private ;
2021-06-24 11:01:26 +02:00
/*
* Set vm_pgoff ( used as a fake buffer offset by DRM ) to 0 and map the
* whole buffer from the start .
*/
vma - > vm_pgoff = 0 ;
2016-01-04 18:36:34 +01:00
/*
* dma_alloc_attrs ( ) allocated a struct page table for mtk_gem , so clear
* VM_PFNMAP flag that was set by drm_gem_mmap_obj ( ) / drm_gem_mmap ( ) .
*/
2021-06-24 11:01:26 +02:00
vma - > vm_flags | = VM_IO | VM_DONTEXPAND | VM_DONTDUMP ;
vma - > vm_page_prot = pgprot_writecombine ( vm_get_page_prot ( vma - > vm_flags ) ) ;
vma - > vm_page_prot = pgprot_decrypted ( vma - > vm_page_prot ) ;
2016-01-04 18:36:34 +01:00
ret = dma_mmap_attrs ( priv - > dma_dev , vma , mtk_gem - > cookie ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_addr , obj - > size , mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
if ( ret )
drm_gem_vm_close ( vma ) ;
return ret ;
}
/*
* Allocate a sg_table for this GEM object .
* Note : Both the table ' s contents , and the sg_table itself must be freed by
* the caller .
* Returns a pointer to the newly allocated sg_table , or an ERR_PTR ( ) error .
*/
struct sg_table * mtk_gem_prime_get_sg_table ( struct drm_gem_object * obj )
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
struct mtk_drm_private * priv = obj - > dev - > dev_private ;
struct sg_table * sgt ;
int ret ;
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt )
return ERR_PTR ( - ENOMEM ) ;
ret = dma_get_sgtable_attrs ( priv - > dma_dev , sgt , mtk_gem - > cookie ,
mtk_gem - > dma_addr , obj - > size ,
2016-08-03 13:46:00 -07:00
mtk_gem - > dma_attrs ) ;
2016-01-04 18:36:34 +01:00
if ( ret ) {
DRM_ERROR ( " failed to allocate sgt, %d \n " , ret ) ;
kfree ( sgt ) ;
return ERR_PTR ( ret ) ;
}
return sgt ;
}
struct drm_gem_object * mtk_gem_prime_import_sg_table ( struct drm_device * dev ,
struct dma_buf_attachment * attach , struct sg_table * sg )
{
struct mtk_drm_gem_obj * mtk_gem ;
2020-05-08 08:25:52 +02:00
/* check if the entries in the sg_table are contiguous */
if ( drm_prime_get_contiguous_size ( sg ) < attach - > dmabuf - > size ) {
DRM_ERROR ( " sg_table is not contiguous " ) ;
return ERR_PTR ( - EINVAL ) ;
}
2016-01-04 18:36:34 +01:00
2020-05-08 08:25:52 +02:00
mtk_gem = mtk_drm_gem_init ( dev , attach - > dmabuf - > size ) ;
2016-01-04 18:36:34 +01:00
if ( IS_ERR ( mtk_gem ) )
2017-11-21 23:31:33 +01:00
return ERR_CAST ( mtk_gem ) ;
2016-01-04 18:36:34 +01:00
mtk_gem - > dma_addr = sg_dma_address ( sg - > sgl ) ;
mtk_gem - > sg = sg ;
return & mtk_gem - > base ;
}
2019-01-14 17:36:48 +08:00
2022-02-04 09:05:41 -08:00
int mtk_drm_gem_prime_vmap ( struct drm_gem_object * obj , struct iosys_map * map )
2019-01-14 17:36:48 +08:00
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
2020-11-09 11:32:42 +01:00
struct sg_table * sgt = NULL ;
2019-01-14 17:36:48 +08:00
unsigned int npages ;
if ( mtk_gem - > kvaddr )
2020-11-09 11:32:42 +01:00
goto out ;
2019-01-14 17:36:48 +08:00
sgt = mtk_gem_prime_get_sg_table ( obj ) ;
if ( IS_ERR ( sgt ) )
2020-11-09 11:32:42 +01:00
return PTR_ERR ( sgt ) ;
2019-01-14 17:36:48 +08:00
npages = obj - > size > > PAGE_SHIFT ;
mtk_gem - > pages = kcalloc ( npages , sizeof ( * mtk_gem - > pages ) , GFP_KERNEL ) ;
2020-11-09 11:32:42 +01:00
if ( ! mtk_gem - > pages ) {
kfree ( sgt ) ;
return - ENOMEM ;
}
2019-01-14 17:36:48 +08:00
2020-10-08 12:57:32 +02:00
drm_prime_sg_to_page_array ( sgt , mtk_gem - > pages , npages ) ;
2020-05-08 11:02:47 +02:00
2019-01-14 17:36:48 +08:00
mtk_gem - > kvaddr = vmap ( mtk_gem - > pages , npages , VM_MAP ,
pgprot_writecombine ( PAGE_KERNEL ) ) ;
out :
2019-10-23 14:11:07 +03:00
kfree ( sgt ) ;
2022-02-04 09:05:41 -08:00
iosys_map_set_vaddr ( map , mtk_gem - > kvaddr ) ;
2019-01-14 17:36:48 +08:00
2020-11-09 11:32:42 +01:00
return 0 ;
2019-01-14 17:36:48 +08:00
}
2022-02-04 09:05:41 -08:00
void mtk_drm_gem_prime_vunmap ( struct drm_gem_object * obj ,
struct iosys_map * map )
2019-01-14 17:36:48 +08:00
{
struct mtk_drm_gem_obj * mtk_gem = to_mtk_gem_obj ( obj ) ;
2020-11-09 11:32:42 +01:00
void * vaddr = map - > vaddr ;
2019-01-14 17:36:48 +08:00
if ( ! mtk_gem - > pages )
return ;
vunmap ( vaddr ) ;
mtk_gem - > kvaddr = 0 ;
2019-10-23 14:11:07 +03:00
kfree ( mtk_gem - > pages ) ;
2019-01-14 17:36:48 +08:00
}