2019-05-29 07:17:56 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2014-08-22 18:36:26 +08:00
/*
* Copyright ( C ) Fuzhou Rockchip Electronics Co . Ltd
* Author : Mark Yao < mark . yao @ rock - chips . com >
*/
2019-07-16 08:42:19 +02:00
# include <linux/dma-buf.h>
# include <linux/iommu.h>
2014-08-22 18:36:26 +08:00
# include <drm/drm.h>
# include <drm/drm_gem.h>
2019-07-16 08:42:19 +02:00
# include <drm/drm_prime.h>
2014-08-22 18:36:26 +08:00
# include <drm/drm_vma_manager.h>
2018-01-30 21:28:32 +01:00
2014-08-22 18:36:26 +08:00
# include "rockchip_drm_drv.h"
# include "rockchip_drm_gem.h"
2016-06-24 10:13:31 +08:00
static int rockchip_gem_iommu_map ( struct rockchip_gem_object * rk_obj )
{
struct drm_device * drm = rk_obj - > base . dev ;
struct rockchip_drm_private * private = drm - > dev_private ;
int prot = IOMMU_READ | IOMMU_WRITE ;
ssize_t ret ;
mutex_lock ( & private - > mm_lock ) ;
ret = drm_mm_insert_node_generic ( & private - > mm , & rk_obj - > mm ,
rk_obj - > base . size , PAGE_SIZE ,
0 , 0 ) ;
mutex_unlock ( & private - > mm_lock ) ;
2017-05-31 10:14:23 +08:00
2016-06-24 10:13:31 +08:00
if ( ret < 0 ) {
DRM_ERROR ( " out of I/O virtual memory: %zd \n " , ret ) ;
return ret ;
}
rk_obj - > dma_addr = rk_obj - > mm . start ;
ret = iommu_map_sg ( private - > domain , rk_obj - > dma_addr , rk_obj - > sgt - > sgl ,
rk_obj - > sgt - > nents , prot ) ;
if ( ret < rk_obj - > base . size ) {
DRM_ERROR ( " failed to map buffer: size=%zd request_size=%zd \n " ,
ret , rk_obj - > base . size ) ;
ret = - ENOMEM ;
goto err_remove_node ;
}
rk_obj - > size = ret ;
return 0 ;
err_remove_node :
2017-05-31 10:14:23 +08:00
mutex_lock ( & private - > mm_lock ) ;
2016-06-24 10:13:31 +08:00
drm_mm_remove_node ( & rk_obj - > mm ) ;
2017-05-31 10:14:23 +08:00
mutex_unlock ( & private - > mm_lock ) ;
2016-06-24 10:13:31 +08:00
return ret ;
}
static int rockchip_gem_iommu_unmap ( struct rockchip_gem_object * rk_obj )
{
struct drm_device * drm = rk_obj - > base . dev ;
struct rockchip_drm_private * private = drm - > dev_private ;
iommu_unmap ( private - > domain , rk_obj - > dma_addr , rk_obj - > size ) ;
mutex_lock ( & private - > mm_lock ) ;
drm_mm_remove_node ( & rk_obj - > mm ) ;
mutex_unlock ( & private - > mm_lock ) ;
return 0 ;
}
static int rockchip_gem_get_pages ( struct rockchip_gem_object * rk_obj )
{
struct drm_device * drm = rk_obj - > base . dev ;
int ret , i ;
struct scatterlist * s ;
rk_obj - > pages = drm_gem_get_pages ( & rk_obj - > base ) ;
if ( IS_ERR ( rk_obj - > pages ) )
return PTR_ERR ( rk_obj - > pages ) ;
rk_obj - > num_pages = rk_obj - > base . size > > PAGE_SHIFT ;
rk_obj - > sgt = drm_prime_pages_to_sg ( rk_obj - > pages , rk_obj - > num_pages ) ;
if ( IS_ERR ( rk_obj - > sgt ) ) {
ret = PTR_ERR ( rk_obj - > sgt ) ;
goto err_put_pages ;
}
/*
* Fake up the SG table so that dma_sync_sg_for_device ( ) can be used
* to flush the pages associated with it .
*
* TODO : Replace this by drm_clflush_sg ( ) once it can be implemented
* without relying on symbols that are not exported .
*/
for_each_sg ( rk_obj - > sgt - > sgl , s , rk_obj - > sgt - > nents , i )
sg_dma_address ( s ) = sg_phys ( s ) ;
dma_sync_sg_for_device ( drm - > dev , rk_obj - > sgt - > sgl , rk_obj - > sgt - > nents ,
DMA_TO_DEVICE ) ;
return 0 ;
err_put_pages :
drm_gem_put_pages ( & rk_obj - > base , rk_obj - > pages , false , false ) ;
return ret ;
}
static void rockchip_gem_put_pages ( struct rockchip_gem_object * rk_obj )
{
sg_free_table ( rk_obj - > sgt ) ;
kfree ( rk_obj - > sgt ) ;
drm_gem_put_pages ( & rk_obj - > base , rk_obj - > pages , true , true ) ;
}
static int rockchip_gem_alloc_iommu ( struct rockchip_gem_object * rk_obj ,
bool alloc_kmap )
{
int ret ;
ret = rockchip_gem_get_pages ( rk_obj ) ;
if ( ret < 0 )
return ret ;
ret = rockchip_gem_iommu_map ( rk_obj ) ;
if ( ret < 0 )
goto err_free ;
if ( alloc_kmap ) {
rk_obj - > kvaddr = vmap ( rk_obj - > pages , rk_obj - > num_pages , VM_MAP ,
pgprot_writecombine ( PAGE_KERNEL ) ) ;
if ( ! rk_obj - > kvaddr ) {
DRM_ERROR ( " failed to vmap() buffer \n " ) ;
ret = - ENOMEM ;
goto err_unmap ;
}
}
return 0 ;
err_unmap :
rockchip_gem_iommu_unmap ( rk_obj ) ;
err_free :
rockchip_gem_put_pages ( rk_obj ) ;
return ret ;
}
static int rockchip_gem_alloc_dma ( struct rockchip_gem_object * rk_obj ,
2015-01-12 14:58:23 +08:00
bool alloc_kmap )
2014-08-22 18:36:26 +08:00
{
struct drm_gem_object * obj = & rk_obj - > base ;
struct drm_device * drm = obj - > dev ;
2016-08-03 13:46:00 -07:00
rk_obj - > dma_attrs = DMA_ATTR_WRITE_COMBINE ;
2014-08-22 18:36:26 +08:00
2015-01-12 14:58:23 +08:00
if ( ! alloc_kmap )
2016-08-03 13:46:00 -07:00
rk_obj - > dma_attrs | = DMA_ATTR_NO_KERNEL_MAPPING ;
2015-01-12 14:58:23 +08:00
2014-08-22 18:36:26 +08:00
rk_obj - > kvaddr = dma_alloc_attrs ( drm - > dev , obj - > size ,
& rk_obj - > dma_addr , GFP_KERNEL ,
2016-08-03 13:46:00 -07:00
rk_obj - > dma_attrs ) ;
2015-01-07 17:27:06 +08:00
if ( ! rk_obj - > kvaddr ) {
2016-06-09 10:46:32 -07:00
DRM_ERROR ( " failed to allocate %zu byte dma buffer " , obj - > size ) ;
2015-01-07 17:27:06 +08:00
return - ENOMEM ;
2014-08-22 18:36:26 +08:00
}
return 0 ;
}
2016-06-24 10:13:31 +08:00
static int rockchip_gem_alloc_buf ( struct rockchip_gem_object * rk_obj ,
bool alloc_kmap )
{
struct drm_gem_object * obj = & rk_obj - > base ;
struct drm_device * drm = obj - > dev ;
struct rockchip_drm_private * private = drm - > dev_private ;
if ( private - > domain )
return rockchip_gem_alloc_iommu ( rk_obj , alloc_kmap ) ;
else
return rockchip_gem_alloc_dma ( rk_obj , alloc_kmap ) ;
}
static void rockchip_gem_free_iommu ( struct rockchip_gem_object * rk_obj )
{
vunmap ( rk_obj - > kvaddr ) ;
rockchip_gem_iommu_unmap ( rk_obj ) ;
rockchip_gem_put_pages ( rk_obj ) ;
}
static void rockchip_gem_free_dma ( struct rockchip_gem_object * rk_obj )
2014-08-22 18:36:26 +08:00
{
struct drm_gem_object * obj = & rk_obj - > base ;
struct drm_device * drm = obj - > dev ;
dma_free_attrs ( drm - > dev , obj - > size , rk_obj - > kvaddr , rk_obj - > dma_addr ,
2016-08-03 13:46:00 -07:00
rk_obj - > dma_attrs ) ;
2014-08-22 18:36:26 +08:00
}
2016-06-24 10:13:31 +08:00
static void rockchip_gem_free_buf ( struct rockchip_gem_object * rk_obj )
{
if ( rk_obj - > pages )
rockchip_gem_free_iommu ( rk_obj ) ;
else
rockchip_gem_free_dma ( rk_obj ) ;
}
2015-07-07 17:03:36 +08:00
2016-06-24 10:13:31 +08:00
static int rockchip_drm_gem_object_mmap_iommu ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
2014-08-22 18:36:26 +08:00
{
2016-06-24 10:13:31 +08:00
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
2019-05-13 17:22:07 -07:00
unsigned int count = obj - > size > > PAGE_SHIFT ;
2017-09-21 00:29:36 +02:00
unsigned long user_count = vma_pages ( vma ) ;
2016-06-24 10:13:31 +08:00
if ( user_count = = 0 )
return - ENXIO ;
2019-05-13 17:22:07 -07:00
return vm_map_pages ( vma , rk_obj - > pages , count ) ;
2016-06-24 10:13:31 +08:00
}
static int rockchip_drm_gem_object_mmap_dma ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
{
2014-08-22 18:36:26 +08:00
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
struct drm_device * drm = obj - > dev ;
2016-06-24 10:13:31 +08:00
return dma_mmap_attrs ( drm - > dev , vma , rk_obj - > kvaddr , rk_obj - > dma_addr ,
obj - > size , rk_obj - > dma_attrs ) ;
}
static int rockchip_drm_gem_object_mmap ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
{
int ret ;
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
2015-07-07 17:03:36 +08:00
/*
2016-06-24 10:13:31 +08:00
* We allocated a struct page table for rk_obj , so clear
2015-07-07 17:03:36 +08:00
* VM_PFNMAP flag that was set by drm_gem_mmap_obj ( ) / drm_gem_mmap ( ) .
*/
vma - > vm_flags & = ~ VM_PFNMAP ;
2014-08-22 18:36:26 +08:00
2016-06-24 10:13:31 +08:00
if ( rk_obj - > pages )
ret = rockchip_drm_gem_object_mmap_iommu ( obj , vma ) ;
else
ret = rockchip_drm_gem_object_mmap_dma ( obj , vma ) ;
2015-07-07 17:03:36 +08:00
if ( ret )
drm_gem_vm_close ( vma ) ;
return ret ;
}
int rockchip_gem_mmap_buf ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
{
int ret ;
ret = drm_gem_mmap_obj ( obj , obj - > size , vma ) ;
if ( ret )
return ret ;
return rockchip_drm_gem_object_mmap ( obj , vma ) ;
2014-08-22 18:36:26 +08:00
}
/* drm driver mmap file operations */
int rockchip_gem_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct drm_gem_object * obj ;
int ret ;
2015-07-07 17:03:36 +08:00
ret = drm_gem_mmap ( filp , vma ) ;
if ( ret )
return ret ;
2014-08-22 18:36:26 +08:00
2018-01-30 21:28:33 +01:00
/*
* Set vm_pgoff ( used as a fake buffer offset by DRM ) to 0 and map the
* whole buffer from the start .
*/
vma - > vm_pgoff = 0 ;
2015-07-07 17:03:36 +08:00
obj = vma - > vm_private_data ;
2014-08-22 18:36:26 +08:00
2015-07-07 17:03:36 +08:00
return rockchip_drm_gem_object_mmap ( obj , vma ) ;
2014-08-22 18:36:26 +08:00
}
2016-06-24 10:13:31 +08:00
static void rockchip_gem_release_object ( struct rockchip_gem_object * rk_obj )
{
drm_gem_object_release ( & rk_obj - > base ) ;
kfree ( rk_obj ) ;
}
2014-08-22 18:36:26 +08:00
struct rockchip_gem_object *
2018-01-30 21:28:32 +01:00
rockchip_gem_alloc_object ( struct drm_device * drm , unsigned int size )
2014-08-22 18:36:26 +08:00
{
struct rockchip_gem_object * rk_obj ;
struct drm_gem_object * obj ;
size = round_up ( size , PAGE_SIZE ) ;
rk_obj = kzalloc ( sizeof ( * rk_obj ) , GFP_KERNEL ) ;
if ( ! rk_obj )
return ERR_PTR ( - ENOMEM ) ;
obj = & rk_obj - > base ;
2016-06-24 10:13:31 +08:00
drm_gem_object_init ( drm , obj , size ) ;
2014-08-22 18:36:26 +08:00
2018-01-30 21:28:32 +01:00
return rk_obj ;
}
struct rockchip_gem_object *
rockchip_gem_create_object ( struct drm_device * drm , unsigned int size ,
bool alloc_kmap )
{
struct rockchip_gem_object * rk_obj ;
int ret ;
rk_obj = rockchip_gem_alloc_object ( drm , size ) ;
if ( IS_ERR ( rk_obj ) )
return rk_obj ;
2015-01-12 14:58:23 +08:00
ret = rockchip_gem_alloc_buf ( rk_obj , alloc_kmap ) ;
2014-08-22 18:36:26 +08:00
if ( ret )
goto err_free_rk_obj ;
return rk_obj ;
err_free_rk_obj :
2016-06-24 10:13:31 +08:00
rockchip_gem_release_object ( rk_obj ) ;
2014-08-22 18:36:26 +08:00
return ERR_PTR ( ret ) ;
}
/*
2018-03-27 10:23:53 +02:00
* rockchip_gem_free_object - ( struct drm_driver ) - > gem_free_object_unlocked
* callback function
2014-08-22 18:36:26 +08:00
*/
void rockchip_gem_free_object ( struct drm_gem_object * obj )
{
2018-01-30 21:28:32 +01:00
struct drm_device * drm = obj - > dev ;
struct rockchip_drm_private * private = drm - > dev_private ;
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
2014-08-22 18:36:26 +08:00
2018-01-30 21:28:32 +01:00
if ( obj - > import_attach ) {
if ( private - > domain ) {
rockchip_gem_iommu_unmap ( rk_obj ) ;
} else {
dma_unmap_sg ( drm - > dev , rk_obj - > sgt - > sgl ,
rk_obj - > sgt - > nents , DMA_BIDIRECTIONAL ) ;
}
drm_prime_gem_destroy ( obj , rk_obj - > sgt ) ;
} else {
rockchip_gem_free_buf ( rk_obj ) ;
}
2014-08-22 18:36:26 +08:00
2016-06-24 10:13:31 +08:00
rockchip_gem_release_object ( rk_obj ) ;
2014-08-22 18:36:26 +08:00
}
/*
* rockchip_gem_create_with_handle - allocate an object with the given
* size and create a gem handle on it
*
* returns a struct rockchip_gem_object * on success or ERR_PTR values
* on failure .
*/
static struct rockchip_gem_object *
rockchip_gem_create_with_handle ( struct drm_file * file_priv ,
struct drm_device * drm , unsigned int size ,
unsigned int * handle )
{
struct rockchip_gem_object * rk_obj ;
struct drm_gem_object * obj ;
int ret ;
2015-01-12 14:58:23 +08:00
rk_obj = rockchip_gem_create_object ( drm , size , false ) ;
2014-08-22 18:36:26 +08:00
if ( IS_ERR ( rk_obj ) )
return ERR_CAST ( rk_obj ) ;
obj = & rk_obj - > base ;
/*
* allocate a id of idr table where the obj is registered
* and handle has the id what user can see .
*/
ret = drm_gem_handle_create ( file_priv , obj , handle ) ;
if ( ret )
goto err_handle_create ;
/* drop reference from allocate - handle holds it now. */
2017-08-11 15:33:06 +03:00
drm_gem_object_put_unlocked ( obj ) ;
2014-08-22 18:36:26 +08:00
return rk_obj ;
err_handle_create :
rockchip_gem_free_object ( obj ) ;
return ERR_PTR ( ret ) ;
}
/*
* rockchip_gem_dumb_create - ( struct drm_driver ) - > dumb_create callback
* function
*
* This aligns the pitch and size arguments to the minimum required . wrap
* this into your own function if you need bigger alignment .
*/
int rockchip_gem_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
struct rockchip_gem_object * rk_obj ;
int min_pitch = DIV_ROUND_UP ( args - > width * args - > bpp , 8 ) ;
/*
* align to 64 bytes since Mali requires it .
*/
drm/rockchip: fix wrong pitch/size using on gem
args->pitch and args->size may not be set by userspace, sometimes
userspace only malloc args and not memset args to zero, then
args->pitch and args->size is random, it is very danger to use
pitch/size on gem.
pitch's type is u32, and min_pitch's type is int, example,
pitch is 0xffffffff, then pitch < min_pitch return true, then gem will
alloc very very big bufffer, it would eat all the memory and cause kernel
crash.
Stop using pitch/size from args, calc them from other args.
Signed-off-by: Mark Yao <mark.yao@rock-chips.com>
2015-09-23 12:34:34 +08:00
args - > pitch = ALIGN ( min_pitch , 64 ) ;
args - > size = args - > pitch * args - > height ;
2014-08-22 18:36:26 +08:00
rk_obj = rockchip_gem_create_with_handle ( file_priv , dev , args - > size ,
& args - > handle ) ;
return PTR_ERR_OR_ZERO ( rk_obj ) ;
}
/*
* Allocate a sg_table for this GEM object .
* Note : Both the table ' s contents , and the sg_table itself must be freed by
* the caller .
* Returns a pointer to the newly allocated sg_table , or an ERR_PTR ( ) error .
*/
struct sg_table * rockchip_gem_prime_get_sg_table ( struct drm_gem_object * obj )
{
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
struct drm_device * drm = obj - > dev ;
struct sg_table * sgt ;
int ret ;
2016-06-24 10:13:31 +08:00
if ( rk_obj - > pages )
return drm_prime_pages_to_sg ( rk_obj - > pages , rk_obj - > num_pages ) ;
2014-08-22 18:36:26 +08:00
sgt = kzalloc ( sizeof ( * sgt ) , GFP_KERNEL ) ;
if ( ! sgt )
return ERR_PTR ( - ENOMEM ) ;
ret = dma_get_sgtable_attrs ( drm - > dev , sgt , rk_obj - > kvaddr ,
rk_obj - > dma_addr , obj - > size ,
2016-08-03 13:46:00 -07:00
rk_obj - > dma_attrs ) ;
2014-08-22 18:36:26 +08:00
if ( ret ) {
DRM_ERROR ( " failed to allocate sgt, %d \n " , ret ) ;
kfree ( sgt ) ;
return ERR_PTR ( ret ) ;
}
return sgt ;
}
2018-01-30 21:28:32 +01:00
static unsigned long rockchip_sg_get_contiguous_size ( struct sg_table * sgt ,
int count )
{
struct scatterlist * s ;
dma_addr_t expected = sg_dma_address ( sgt - > sgl ) ;
unsigned int i ;
unsigned long size = 0 ;
for_each_sg ( sgt - > sgl , s , count , i ) {
if ( sg_dma_address ( s ) ! = expected )
break ;
expected = sg_dma_address ( s ) + sg_dma_len ( s ) ;
size + = sg_dma_len ( s ) ;
}
return size ;
}
static int
rockchip_gem_iommu_map_sg ( struct drm_device * drm ,
struct dma_buf_attachment * attach ,
struct sg_table * sg ,
struct rockchip_gem_object * rk_obj )
{
rk_obj - > sgt = sg ;
return rockchip_gem_iommu_map ( rk_obj ) ;
}
static int
rockchip_gem_dma_map_sg ( struct drm_device * drm ,
struct dma_buf_attachment * attach ,
struct sg_table * sg ,
struct rockchip_gem_object * rk_obj )
{
int count = dma_map_sg ( drm - > dev , sg - > sgl , sg - > nents ,
DMA_BIDIRECTIONAL ) ;
if ( ! count )
return - EINVAL ;
if ( rockchip_sg_get_contiguous_size ( sg , count ) < attach - > dmabuf - > size ) {
DRM_ERROR ( " failed to map sg_table to contiguous linear address. \n " ) ;
dma_unmap_sg ( drm - > dev , sg - > sgl , sg - > nents ,
DMA_BIDIRECTIONAL ) ;
return - EINVAL ;
}
rk_obj - > dma_addr = sg_dma_address ( sg - > sgl ) ;
rk_obj - > sgt = sg ;
return 0 ;
}
struct drm_gem_object *
rockchip_gem_prime_import_sg_table ( struct drm_device * drm ,
struct dma_buf_attachment * attach ,
struct sg_table * sg )
{
struct rockchip_drm_private * private = drm - > dev_private ;
struct rockchip_gem_object * rk_obj ;
int ret ;
rk_obj = rockchip_gem_alloc_object ( drm , attach - > dmabuf - > size ) ;
if ( IS_ERR ( rk_obj ) )
return ERR_CAST ( rk_obj ) ;
if ( private - > domain )
ret = rockchip_gem_iommu_map_sg ( drm , attach , sg , rk_obj ) ;
else
ret = rockchip_gem_dma_map_sg ( drm , attach , sg , rk_obj ) ;
if ( ret < 0 ) {
DRM_ERROR ( " failed to import sg table: %d \n " , ret ) ;
goto err_free_rk_obj ;
}
return & rk_obj - > base ;
err_free_rk_obj :
rockchip_gem_release_object ( rk_obj ) ;
return ERR_PTR ( ret ) ;
}
2014-08-22 18:36:26 +08:00
void * rockchip_gem_prime_vmap ( struct drm_gem_object * obj )
{
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
2016-06-24 10:13:31 +08:00
if ( rk_obj - > pages )
return vmap ( rk_obj - > pages , rk_obj - > num_pages , VM_MAP ,
pgprot_writecombine ( PAGE_KERNEL ) ) ;
2016-08-03 13:46:00 -07:00
if ( rk_obj - > dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING )
2015-01-12 14:58:23 +08:00
return NULL ;
2014-08-22 18:36:26 +08:00
return rk_obj - > kvaddr ;
}
void rockchip_gem_prime_vunmap ( struct drm_gem_object * obj , void * vaddr )
{
2016-06-24 10:13:31 +08:00
struct rockchip_gem_object * rk_obj = to_rockchip_obj ( obj ) ;
if ( rk_obj - > pages ) {
vunmap ( vaddr ) ;
return ;
}
/* Nothing to do if allocated by DMA mapping API. */
2014-08-22 18:36:26 +08:00
}