2017-01-16 16:09:51 +01:00
/*
* Copyright ( C ) 2017 Etnaviv Project
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
2017-01-16 17:29:57 +01:00
# include <drm/drm_mm.h>
2017-01-16 16:09:51 +01:00
# include "etnaviv_cmdbuf.h"
# include "etnaviv_gpu.h"
# include "etnaviv_mmu.h"
2017-09-24 15:15:24 +02:00
# include "etnaviv_perfmon.h"
2017-01-16 16:09:51 +01:00
2017-01-16 17:29:57 +01:00
# define SUBALLOC_SIZE SZ_256K
# define SUBALLOC_GRANULE SZ_4K
# define SUBALLOC_GRANULES (SUBALLOC_SIZE / SUBALLOC_GRANULE)
struct etnaviv_cmdbuf_suballoc {
/* suballocated dma buffer properties */
struct etnaviv_gpu * gpu ;
void * vaddr ;
dma_addr_t paddr ;
/* GPU mapping */
u32 iova ;
struct drm_mm_node vram_node ; /* only used on MMUv2 */
/* allocation management */
struct mutex lock ;
DECLARE_BITMAP ( granule_map , SUBALLOC_GRANULES ) ;
int free_space ;
wait_queue_head_t free_event ;
} ;
struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new ( struct etnaviv_gpu * gpu )
{
struct etnaviv_cmdbuf_suballoc * suballoc ;
int ret ;
suballoc = kzalloc ( sizeof ( * suballoc ) , GFP_KERNEL ) ;
if ( ! suballoc )
return ERR_PTR ( - ENOMEM ) ;
suballoc - > gpu = gpu ;
mutex_init ( & suballoc - > lock ) ;
init_waitqueue_head ( & suballoc - > free_event ) ;
suballoc - > vaddr = dma_alloc_wc ( gpu - > dev , SUBALLOC_SIZE ,
& suballoc - > paddr , GFP_KERNEL ) ;
if ( ! suballoc - > vaddr )
goto free_suballoc ;
ret = etnaviv_iommu_get_suballoc_va ( gpu , suballoc - > paddr ,
& suballoc - > vram_node , SUBALLOC_SIZE ,
& suballoc - > iova ) ;
if ( ret )
goto free_dma ;
return suballoc ;
free_dma :
dma_free_wc ( gpu - > dev , SUBALLOC_SIZE , suballoc - > vaddr , suballoc - > paddr ) ;
free_suballoc :
kfree ( suballoc ) ;
return NULL ;
}
void etnaviv_cmdbuf_suballoc_destroy ( struct etnaviv_cmdbuf_suballoc * suballoc )
{
etnaviv_iommu_put_suballoc_va ( suballoc - > gpu , & suballoc - > vram_node ,
SUBALLOC_SIZE , suballoc - > iova ) ;
dma_free_wc ( suballoc - > gpu - > dev , SUBALLOC_SIZE , suballoc - > vaddr ,
suballoc - > paddr ) ;
kfree ( suballoc ) ;
}
struct etnaviv_cmdbuf *
etnaviv_cmdbuf_new ( struct etnaviv_cmdbuf_suballoc * suballoc , u32 size ,
2017-09-24 15:15:24 +02:00
size_t nr_bos , size_t nr_pmrs )
2017-01-16 16:09:51 +01:00
{
struct etnaviv_cmdbuf * cmdbuf ;
2017-09-24 15:15:24 +02:00
struct etnaviv_perfmon_request * pmrs ;
2017-01-16 16:09:51 +01:00
size_t sz = size_vstruct ( nr_bos , sizeof ( cmdbuf - > bo_map [ 0 ] ) ,
sizeof ( * cmdbuf ) ) ;
2017-01-16 17:29:57 +01:00
int granule_offs , order , ret ;
2017-01-16 16:09:51 +01:00
cmdbuf = kzalloc ( sz , GFP_KERNEL ) ;
if ( ! cmdbuf )
return NULL ;
2017-09-24 15:15:24 +02:00
sz = sizeof ( * pmrs ) * nr_pmrs ;
pmrs = kzalloc ( sz , GFP_KERNEL ) ;
if ( ! pmrs )
goto out_free_cmdbuf ;
cmdbuf - > pmrs = pmrs ;
2017-01-16 17:29:57 +01:00
cmdbuf - > suballoc = suballoc ;
cmdbuf - > size = size ;
2017-01-16 16:09:51 +01:00
2017-01-16 17:29:57 +01:00
order = order_base_2 ( ALIGN ( size , SUBALLOC_GRANULE ) / SUBALLOC_GRANULE ) ;
retry :
mutex_lock ( & suballoc - > lock ) ;
granule_offs = bitmap_find_free_region ( suballoc - > granule_map ,
SUBALLOC_GRANULES , order ) ;
if ( granule_offs < 0 ) {
suballoc - > free_space = 0 ;
mutex_unlock ( & suballoc - > lock ) ;
ret = wait_event_interruptible_timeout ( suballoc - > free_event ,
suballoc - > free_space ,
msecs_to_jiffies ( 10 * 1000 ) ) ;
if ( ! ret ) {
dev_err ( suballoc - > gpu - > dev ,
" Timeout waiting for cmdbuf space \n " ) ;
return NULL ;
}
goto retry ;
2017-01-16 16:09:51 +01:00
}
2017-01-16 17:29:57 +01:00
mutex_unlock ( & suballoc - > lock ) ;
cmdbuf - > suballoc_offset = granule_offs * SUBALLOC_GRANULE ;
cmdbuf - > vaddr = suballoc - > vaddr + cmdbuf - > suballoc_offset ;
2017-01-16 16:09:51 +01:00
return cmdbuf ;
2017-09-24 15:15:24 +02:00
out_free_cmdbuf :
kfree ( cmdbuf ) ;
return NULL ;
2017-01-16 16:09:51 +01:00
}
void etnaviv_cmdbuf_free ( struct etnaviv_cmdbuf * cmdbuf )
{
2017-01-16 17:29:57 +01:00
struct etnaviv_cmdbuf_suballoc * suballoc = cmdbuf - > suballoc ;
int order = order_base_2 ( ALIGN ( cmdbuf - > size , SUBALLOC_GRANULE ) /
SUBALLOC_GRANULE ) ;
mutex_lock ( & suballoc - > lock ) ;
bitmap_release_region ( suballoc - > granule_map ,
cmdbuf - > suballoc_offset / SUBALLOC_GRANULE ,
order ) ;
suballoc - > free_space = 1 ;
mutex_unlock ( & suballoc - > lock ) ;
wake_up_all ( & suballoc - > free_event ) ;
2017-09-24 15:15:24 +02:00
kfree ( cmdbuf - > pmrs ) ;
2017-01-16 16:09:51 +01:00
kfree ( cmdbuf ) ;
}
2017-01-16 16:52:44 +01:00
u32 etnaviv_cmdbuf_get_va ( struct etnaviv_cmdbuf * buf )
{
2017-01-16 17:29:57 +01:00
return buf - > suballoc - > iova + buf - > suballoc_offset ;
2017-01-16 16:52:44 +01:00
}
2017-01-16 17:00:08 +01:00
dma_addr_t etnaviv_cmdbuf_get_pa ( struct etnaviv_cmdbuf * buf )
{
2017-01-16 17:29:57 +01:00
return buf - > suballoc - > paddr + buf - > suballoc_offset ;
2017-01-16 17:00:08 +01:00
}