2019-02-16 00:39:15 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016 - 2019 HabanaLabs , Ltd .
* All Rights Reserved .
*/
# include <uapi/misc/habanalabs.h>
# include "habanalabs.h"
# include <linux/mm.h>
# include <linux/slab.h>
2020-07-30 10:00:10 +03:00
# include <linux/uaccess.h>
2019-02-16 00:39:15 +02:00
2020-07-09 16:17:48 +03:00
static int cb_map_mem ( struct hl_ctx * ctx , struct hl_cb * cb )
{
struct hl_device * hdev = ctx - > hdev ;
struct asic_fixed_properties * prop = & hdev - > asic_prop ;
struct hl_vm_va_block * va_block , * tmp ;
dma_addr_t bus_addr ;
u64 virt_addr ;
u32 page_size = prop - > pmmu . page_size ;
s32 offset ;
int rc ;
if ( ! hdev - > supports_cb_mapping ) {
dev_err_ratelimited ( hdev - > dev ,
" Cannot map CB because no VA range is allocated for CB mapping \n " ) ;
return - EINVAL ;
}
if ( ! hdev - > mmu_enable ) {
dev_err_ratelimited ( hdev - > dev ,
" Cannot map CB because MMU is disabled \n " ) ;
return - EINVAL ;
}
INIT_LIST_HEAD ( & cb - > va_block_list ) ;
for ( bus_addr = cb - > bus_address ;
bus_addr < cb - > bus_address + cb - > size ;
bus_addr + = page_size ) {
virt_addr = ( u64 ) gen_pool_alloc ( ctx - > cb_va_pool , page_size ) ;
if ( ! virt_addr ) {
dev_err ( hdev - > dev ,
" Failed to allocate device virtual address for CB \n " ) ;
rc = - ENOMEM ;
goto err_va_pool_free ;
}
va_block = kzalloc ( sizeof ( * va_block ) , GFP_KERNEL ) ;
if ( ! va_block ) {
rc = - ENOMEM ;
gen_pool_free ( ctx - > cb_va_pool , virt_addr , page_size ) ;
goto err_va_pool_free ;
}
va_block - > start = virt_addr ;
va_block - > end = virt_addr + page_size ;
va_block - > size = page_size ;
list_add_tail ( & va_block - > node , & cb - > va_block_list ) ;
}
mutex_lock ( & ctx - > mmu_lock ) ;
bus_addr = cb - > bus_address ;
offset = 0 ;
list_for_each_entry ( va_block , & cb - > va_block_list , node ) {
2020-10-22 15:13:10 +03:00
rc = hl_mmu_map_page ( ctx , va_block - > start , bus_addr ,
va_block - > size , list_is_last ( & va_block - > node ,
& cb - > va_block_list ) ) ;
2020-07-09 16:17:48 +03:00
if ( rc ) {
dev_err ( hdev - > dev , " Failed to map VA %#llx to CB \n " ,
va_block - > start ) ;
goto err_va_umap ;
}
bus_addr + = va_block - > size ;
offset + = va_block - > size ;
}
hdev - > asic_funcs - > mmu_invalidate_cache ( hdev , false , VM_TYPE_USERPTR ) ;
mutex_unlock ( & ctx - > mmu_lock ) ;
cb - > is_mmu_mapped = true ;
return 0 ;
err_va_umap :
list_for_each_entry ( va_block , & cb - > va_block_list , node ) {
if ( offset < = 0 )
break ;
2020-10-22 15:13:10 +03:00
hl_mmu_unmap_page ( ctx , va_block - > start , va_block - > size ,
2020-07-09 16:17:48 +03:00
offset < = va_block - > size ) ;
offset - = va_block - > size ;
}
hdev - > asic_funcs - > mmu_invalidate_cache ( hdev , true , VM_TYPE_USERPTR ) ;
mutex_unlock ( & ctx - > mmu_lock ) ;
err_va_pool_free :
list_for_each_entry_safe ( va_block , tmp , & cb - > va_block_list , node ) {
gen_pool_free ( ctx - > cb_va_pool , va_block - > start , va_block - > size ) ;
list_del ( & va_block - > node ) ;
kfree ( va_block ) ;
}
return rc ;
}
static void cb_unmap_mem ( struct hl_ctx * ctx , struct hl_cb * cb )
{
struct hl_device * hdev = ctx - > hdev ;
struct hl_vm_va_block * va_block , * tmp ;
mutex_lock ( & ctx - > mmu_lock ) ;
list_for_each_entry ( va_block , & cb - > va_block_list , node )
2020-10-22 15:13:10 +03:00
if ( hl_mmu_unmap_page ( ctx , va_block - > start , va_block - > size ,
2020-07-09 16:17:48 +03:00
list_is_last ( & va_block - > node ,
& cb - > va_block_list ) ) )
dev_warn_ratelimited ( hdev - > dev ,
" Failed to unmap CB's va 0x%llx \n " ,
va_block - > start ) ;
hdev - > asic_funcs - > mmu_invalidate_cache ( hdev , true , VM_TYPE_USERPTR ) ;
mutex_unlock ( & ctx - > mmu_lock ) ;
list_for_each_entry_safe ( va_block , tmp , & cb - > va_block_list , node ) {
gen_pool_free ( ctx - > cb_va_pool , va_block - > start , va_block - > size ) ;
list_del ( & va_block - > node ) ;
kfree ( va_block ) ;
}
}
2019-02-16 00:39:15 +02:00
static void cb_fini ( struct hl_device * hdev , struct hl_cb * cb )
{
2020-07-13 13:36:55 +03:00
if ( cb - > is_internal )
gen_pool_free ( hdev - > internal_cb_pool ,
2020-10-26 17:08:06 +01:00
( uintptr_t ) cb - > kernel_address , cb - > size ) ;
2020-07-13 13:36:55 +03:00
else
hdev - > asic_funcs - > asic_dma_free_coherent ( hdev , cb - > size ,
2020-10-26 17:08:06 +01:00
cb - > kernel_address , cb - > bus_address ) ;
2020-07-13 13:36:55 +03:00
2019-02-16 00:39:15 +02:00
kfree ( cb ) ;
}
static void cb_do_release ( struct hl_device * hdev , struct hl_cb * cb )
{
if ( cb - > is_pool ) {
spin_lock ( & hdev - > cb_pool_lock ) ;
list_add ( & cb - > pool_list , & hdev - > cb_pool ) ;
spin_unlock ( & hdev - > cb_pool_lock ) ;
} else {
cb_fini ( hdev , cb ) ;
}
}
static void cb_release ( struct kref * ref )
{
struct hl_device * hdev ;
struct hl_cb * cb ;
cb = container_of ( ref , struct hl_cb , refcount ) ;
hdev = cb - > hdev ;
2019-02-16 00:39:24 +02:00
hl_debugfs_remove_cb ( cb ) ;
2020-07-09 16:17:48 +03:00
if ( cb - > is_mmu_mapped )
cb_unmap_mem ( cb - > ctx , cb ) ;
2020-09-07 17:36:41 +03:00
hl_ctx_put ( cb - > ctx ) ;
2019-02-16 00:39:15 +02:00
cb_do_release ( hdev , cb ) ;
}
static struct hl_cb * hl_cb_alloc ( struct hl_device * hdev , u32 cb_size ,
2020-07-13 13:36:55 +03:00
int ctx_id , bool internal_cb )
2019-02-16 00:39:15 +02:00
{
2021-02-14 15:35:56 +02:00
struct hl_cb * cb = NULL ;
2020-07-13 13:36:55 +03:00
u32 cb_offset ;
2019-02-16 00:39:15 +02:00
void * p ;
/*
* We use of GFP_ATOMIC here because this function can be called from
* the latency - sensitive code path for command submission . Due to H / W
* limitations in some of the ASICs , the kernel must copy the user CB
* that is designated for an external queue and actually enqueue
* the kernel ' s copy . Hence , we must never sleep in this code section
* and must use GFP_ATOMIC for all memory allocations .
*/
2021-02-14 15:35:56 +02:00
if ( ctx_id = = HL_KERNEL_ASID_ID & & ! hdev - > disabled )
2019-02-16 00:39:15 +02:00
cb = kzalloc ( sizeof ( * cb ) , GFP_ATOMIC ) ;
2021-02-14 15:35:56 +02:00
if ( ! cb )
2019-02-16 00:39:15 +02:00
cb = kzalloc ( sizeof ( * cb ) , GFP_KERNEL ) ;
if ( ! cb )
return NULL ;
2020-07-13 13:36:55 +03:00
if ( internal_cb ) {
p = ( void * ) gen_pool_alloc ( hdev - > internal_cb_pool , cb_size ) ;
if ( ! p ) {
kfree ( cb ) ;
return NULL ;
}
cb_offset = p - hdev - > internal_cb_pool_virt_addr ;
cb - > is_internal = true ;
cb - > bus_address = hdev - > internal_cb_va_base + cb_offset ;
} else if ( ctx_id = = HL_KERNEL_ASID_ID ) {
2019-05-01 11:47:04 +03:00
p = hdev - > asic_funcs - > asic_dma_alloc_coherent ( hdev , cb_size ,
2019-02-16 00:39:15 +02:00
& cb - > bus_address , GFP_ATOMIC ) ;
2021-02-14 15:35:56 +02:00
if ( ! p )
p = hdev - > asic_funcs - > asic_dma_alloc_coherent ( hdev ,
cb_size , & cb - > bus_address , GFP_KERNEL ) ;
2020-07-13 13:36:55 +03:00
} else {
2019-05-01 11:47:04 +03:00
p = hdev - > asic_funcs - > asic_dma_alloc_coherent ( hdev , cb_size ,
2019-02-16 00:39:15 +02:00
& cb - > bus_address ,
GFP_USER | __GFP_ZERO ) ;
2020-07-13 13:36:55 +03:00
}
2019-02-16 00:39:15 +02:00
if ( ! p ) {
dev_err ( hdev - > dev ,
" failed to allocate %d of dma memory for CB \n " ,
cb_size ) ;
kfree ( cb ) ;
return NULL ;
}
2020-10-26 17:08:06 +01:00
cb - > kernel_address = p ;
2019-02-16 00:39:15 +02:00
cb - > size = cb_size ;
return cb ;
}
int hl_cb_create ( struct hl_device * hdev , struct hl_cb_mgr * mgr ,
2020-09-07 17:36:41 +03:00
struct hl_ctx * ctx , u32 cb_size , bool internal_cb ,
2020-07-09 16:17:48 +03:00
bool map_cb , u64 * handle )
2019-02-16 00:39:15 +02:00
{
struct hl_cb * cb ;
bool alloc_new_cb = true ;
2020-09-07 17:36:41 +03:00
int rc , ctx_id = ctx - > asid ;
2019-02-16 00:39:15 +02:00
2019-02-16 00:39:20 +02:00
/*
* Can ' t use generic function to check this because of special case
* where we create a CB as part of the reset process
*/
if ( ( hdev - > disabled ) | | ( ( atomic_read ( & hdev - > in_reset ) ) & &
( ctx_id ! = HL_KERNEL_ASID_ID ) ) ) {
2019-02-16 00:39:15 +02:00
dev_warn_ratelimited ( hdev - > dev ,
2019-02-16 00:39:20 +02:00
" Device is disabled or in reset. Can't create new CBs \n " ) ;
2019-02-16 00:39:15 +02:00
rc = - EBUSY ;
goto out_err ;
}
2020-04-17 12:12:13 +03:00
if ( cb_size > SZ_2M ) {
dev_err ( hdev - > dev , " CB size %d must be less than %d \n " ,
cb_size , SZ_2M ) ;
2019-02-16 00:39:15 +02:00
rc = - EINVAL ;
goto out_err ;
}
2020-07-13 13:36:55 +03:00
if ( ! internal_cb ) {
/* Minimum allocation must be PAGE SIZE */
if ( cb_size < PAGE_SIZE )
cb_size = PAGE_SIZE ;
if ( ctx_id = = HL_KERNEL_ASID_ID & &
cb_size < = hdev - > asic_prop . cb_pool_cb_size ) {
spin_lock ( & hdev - > cb_pool_lock ) ;
if ( ! list_empty ( & hdev - > cb_pool ) ) {
cb = list_first_entry ( & hdev - > cb_pool ,
typeof ( * cb ) , pool_list ) ;
list_del ( & cb - > pool_list ) ;
spin_unlock ( & hdev - > cb_pool_lock ) ;
alloc_new_cb = false ;
} else {
spin_unlock ( & hdev - > cb_pool_lock ) ;
dev_dbg ( hdev - > dev , " CB pool is empty \n " ) ;
}
2019-02-16 00:39:15 +02:00
}
}
if ( alloc_new_cb ) {
2020-07-13 13:36:55 +03:00
cb = hl_cb_alloc ( hdev , cb_size , ctx_id , internal_cb ) ;
2019-02-16 00:39:15 +02:00
if ( ! cb ) {
rc = - ENOMEM ;
goto out_err ;
}
}
cb - > hdev = hdev ;
2020-09-07 17:36:41 +03:00
cb - > ctx = ctx ;
hl_ctx_get ( hdev , cb - > ctx ) ;
2019-02-16 00:39:15 +02:00
2020-07-09 16:17:48 +03:00
if ( map_cb ) {
if ( ctx_id = = HL_KERNEL_ASID_ID ) {
dev_err ( hdev - > dev ,
" CB mapping is not supported for kernel context \n " ) ;
rc = - EINVAL ;
goto release_cb ;
}
rc = cb_map_mem ( ctx , cb ) ;
if ( rc )
goto release_cb ;
}
2019-02-16 00:39:15 +02:00
spin_lock ( & mgr - > cb_lock ) ;
rc = idr_alloc ( & mgr - > cb_handles , cb , 1 , 0 , GFP_ATOMIC ) ;
2021-02-14 15:35:56 +02:00
if ( rc < 0 )
rc = idr_alloc ( & mgr - > cb_handles , cb , 1 , 0 , GFP_KERNEL ) ;
2019-02-16 00:39:15 +02:00
spin_unlock ( & mgr - > cb_lock ) ;
if ( rc < 0 ) {
dev_err ( hdev - > dev , " Failed to allocate IDR for a new CB \n " ) ;
2020-07-09 16:17:48 +03:00
goto unmap_mem ;
2019-02-16 00:39:15 +02:00
}
2020-08-12 10:11:20 +03:00
cb - > id = ( u64 ) rc ;
2019-02-16 00:39:15 +02:00
kref_init ( & cb - > refcount ) ;
spin_lock_init ( & cb - > lock ) ;
/*
* idr is 32 - bit so we can safely OR it with a mask that is above
* 32 bit
*/
2020-08-29 11:51:39 +03:00
* handle = cb - > id | HL_MMAP_TYPE_CB ;
2019-02-16 00:39:15 +02:00
* handle < < = PAGE_SHIFT ;
2019-02-16 00:39:24 +02:00
hl_debugfs_add_cb ( cb ) ;
2019-02-16 00:39:15 +02:00
return 0 ;
2020-07-09 16:17:48 +03:00
unmap_mem :
if ( cb - > is_mmu_mapped )
cb_unmap_mem ( cb - > ctx , cb ) ;
2019-02-16 00:39:15 +02:00
release_cb :
2020-09-07 17:36:41 +03:00
hl_ctx_put ( cb - > ctx ) ;
2019-02-16 00:39:15 +02:00
cb_do_release ( hdev , cb ) ;
out_err :
* handle = 0 ;
return rc ;
}
int hl_cb_destroy ( struct hl_device * hdev , struct hl_cb_mgr * mgr , u64 cb_handle )
{
struct hl_cb * cb ;
u32 handle ;
int rc = 0 ;
/*
* handle was given to user to do mmap , I need to shift it back to
* how the idr module gave it to me
*/
cb_handle > > = PAGE_SHIFT ;
handle = ( u32 ) cb_handle ;
spin_lock ( & mgr - > cb_lock ) ;
cb = idr_find ( & mgr - > cb_handles , handle ) ;
if ( cb ) {
idr_remove ( & mgr - > cb_handles , handle ) ;
spin_unlock ( & mgr - > cb_lock ) ;
kref_put ( & cb - > refcount , cb_release ) ;
} else {
spin_unlock ( & mgr - > cb_lock ) ;
dev_err ( hdev - > dev ,
" CB destroy failed, no match to handle 0x%x \n " , handle ) ;
rc = - EINVAL ;
}
return rc ;
}
2020-09-02 13:43:32 +03:00
static int hl_cb_info ( struct hl_device * hdev , struct hl_cb_mgr * mgr ,
u64 cb_handle , u32 * usage_cnt )
{
struct hl_cb * cb ;
u32 handle ;
int rc = 0 ;
/* The CB handle was given to user to do mmap, so need to shift it back
* to the value which was allocated by the IDR module .
*/
cb_handle > > = PAGE_SHIFT ;
handle = ( u32 ) cb_handle ;
spin_lock ( & mgr - > cb_lock ) ;
cb = idr_find ( & mgr - > cb_handles , handle ) ;
if ( ! cb ) {
dev_err ( hdev - > dev ,
" CB info failed, no match to handle 0x%x \n " , handle ) ;
rc = - EINVAL ;
goto out ;
}
* usage_cnt = atomic_read ( & cb - > cs_cnt ) ;
out :
spin_unlock ( & mgr - > cb_lock ) ;
return rc ;
}
2019-02-16 00:39:15 +02:00
int hl_cb_ioctl ( struct hl_fpriv * hpriv , void * data )
{
union hl_cb_args * args = data ;
struct hl_device * hdev = hpriv - > hdev ;
2020-10-05 14:40:10 +03:00
enum hl_device_status status ;
2020-04-17 12:12:13 +03:00
u64 handle = 0 ;
2020-09-02 13:43:32 +03:00
u32 usage_cnt = 0 ;
2019-02-16 00:39:15 +02:00
int rc ;
2020-10-05 14:40:10 +03:00
if ( ! hl_device_operational ( hdev , & status ) ) {
2019-04-06 15:41:35 +03:00
dev_warn_ratelimited ( hdev - > dev ,
" Device is %s. Can't execute CB IOCTL \n " ,
2020-10-05 14:40:10 +03:00
hdev - > status [ status ] ) ;
2019-04-06 15:41:35 +03:00
return - EBUSY ;
}
2019-02-16 00:39:15 +02:00
switch ( args - > in . op ) {
case HL_CB_OP_CREATE :
2020-04-17 12:12:13 +03:00
if ( args - > in . cb_size > HL_MAX_CB_SIZE ) {
dev_err ( hdev - > dev ,
" User requested CB size %d must be less than %d \n " ,
args - > in . cb_size , HL_MAX_CB_SIZE ) ;
rc = - EINVAL ;
} else {
2020-09-07 17:36:41 +03:00
rc = hl_cb_create ( hdev , & hpriv - > cb_mgr , hpriv - > ctx ,
2020-07-09 16:17:48 +03:00
args - > in . cb_size , false ,
! ! ( args - > in . flags & HL_CB_FLAGS_MAP ) ,
& handle ) ;
2020-04-17 12:12:13 +03:00
}
2019-02-16 00:39:15 +02:00
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . cb_handle = handle ;
break ;
2020-04-17 12:12:13 +03:00
2019-02-16 00:39:15 +02:00
case HL_CB_OP_DESTROY :
rc = hl_cb_destroy ( hdev , & hpriv - > cb_mgr ,
args - > in . cb_handle ) ;
break ;
2020-04-17 12:12:13 +03:00
2020-09-02 13:43:32 +03:00
case HL_CB_OP_INFO :
rc = hl_cb_info ( hdev , & hpriv - > cb_mgr , args - > in . cb_handle ,
& usage_cnt ) ;
memset ( args , 0 , sizeof ( * args ) ) ;
args - > out . usage_cnt = usage_cnt ;
break ;
2019-02-16 00:39:15 +02:00
default :
rc = - ENOTTY ;
break ;
}
return rc ;
}
static void cb_vm_close ( struct vm_area_struct * vma )
{
struct hl_cb * cb = ( struct hl_cb * ) vma - > vm_private_data ;
2019-02-28 10:46:19 +02:00
long new_mmap_size ;
2019-02-16 00:39:15 +02:00
2019-02-28 10:46:19 +02:00
new_mmap_size = cb - > mmap_size - ( vma - > vm_end - vma - > vm_start ) ;
2019-02-16 00:39:15 +02:00
2019-02-28 10:46:19 +02:00
if ( new_mmap_size > 0 ) {
cb - > mmap_size = new_mmap_size ;
2019-02-16 00:39:15 +02:00
return ;
2019-02-28 10:46:19 +02:00
}
2019-02-16 00:39:15 +02:00
spin_lock ( & cb - > lock ) ;
cb - > mmap = false ;
spin_unlock ( & cb - > lock ) ;
hl_cb_put ( cb ) ;
vma - > vm_private_data = NULL ;
}
static const struct vm_operations_struct cb_vm_ops = {
. close = cb_vm_close
} ;
int hl_cb_mmap ( struct hl_fpriv * hpriv , struct vm_area_struct * vma )
{
struct hl_device * hdev = hpriv - > hdev ;
struct hl_cb * cb ;
2020-07-30 10:00:10 +03:00
u32 handle , user_cb_size ;
2019-02-16 00:39:15 +02:00
int rc ;
2020-08-29 11:55:15 +03:00
/* We use the page offset to hold the idr and thus we need to clear
* it before doing the mmap itself
*/
2019-02-16 00:39:15 +02:00
handle = vma - > vm_pgoff ;
2020-08-29 11:55:15 +03:00
vma - > vm_pgoff = 0 ;
2019-02-16 00:39:15 +02:00
/* reference was taken here */
cb = hl_cb_get ( hdev , & hpriv - > cb_mgr , handle ) ;
if ( ! cb ) {
dev_err ( hdev - > dev ,
2020-04-28 08:43:19 +03:00
" CB mmap failed, no match to handle 0x%x \n " , handle ) ;
2019-02-16 00:39:15 +02:00
return - EINVAL ;
}
/* Validation check */
2020-07-30 10:00:10 +03:00
user_cb_size = vma - > vm_end - vma - > vm_start ;
if ( user_cb_size ! = ALIGN ( cb - > size , PAGE_SIZE ) ) {
2019-02-16 00:39:15 +02:00
dev_err ( hdev - > dev ,
" CB mmap failed, mmap size 0x%lx != 0x%x cb size \n " ,
vma - > vm_end - vma - > vm_start , cb - > size ) ;
rc = - EINVAL ;
goto put_cb ;
}
2020-07-30 10:00:10 +03:00
if ( ! access_ok ( ( void __user * ) ( uintptr_t ) vma - > vm_start ,
user_cb_size ) ) {
dev_err ( hdev - > dev ,
" user pointer is invalid - 0x%lx \n " ,
vma - > vm_start ) ;
rc = - EINVAL ;
goto put_cb ;
}
2019-02-16 00:39:15 +02:00
spin_lock ( & cb - > lock ) ;
if ( cb - > mmap ) {
dev_err ( hdev - > dev ,
" CB mmap failed, CB already mmaped to user \n " ) ;
rc = - EINVAL ;
goto release_lock ;
}
cb - > mmap = true ;
spin_unlock ( & cb - > lock ) ;
vma - > vm_ops = & cb_vm_ops ;
/*
* Note : We ' re transferring the cb reference to
* vma - > vm_private_data here .
*/
vma - > vm_private_data = cb ;
2020-10-26 17:08:06 +01:00
rc = hdev - > asic_funcs - > cb_mmap ( hdev , vma , cb - > kernel_address ,
2020-08-23 07:32:42 +08:00
cb - > bus_address , cb - > size ) ;
2019-02-16 00:39:15 +02:00
if ( rc ) {
spin_lock ( & cb - > lock ) ;
cb - > mmap = false ;
goto release_lock ;
}
cb - > mmap_size = cb - > size ;
2020-10-29 18:38:31 +02:00
vma - > vm_pgoff = handle ;
2019-02-16 00:39:15 +02:00
return 0 ;
release_lock :
spin_unlock ( & cb - > lock ) ;
put_cb :
hl_cb_put ( cb ) ;
return rc ;
}
struct hl_cb * hl_cb_get ( struct hl_device * hdev , struct hl_cb_mgr * mgr ,
u32 handle )
{
struct hl_cb * cb ;
spin_lock ( & mgr - > cb_lock ) ;
cb = idr_find ( & mgr - > cb_handles , handle ) ;
if ( ! cb ) {
spin_unlock ( & mgr - > cb_lock ) ;
dev_warn ( hdev - > dev ,
2020-04-28 08:43:19 +03:00
" CB get failed, no match to handle 0x%x \n " , handle ) ;
2019-02-16 00:39:15 +02:00
return NULL ;
}
kref_get ( & cb - > refcount ) ;
spin_unlock ( & mgr - > cb_lock ) ;
return cb ;
}
void hl_cb_put ( struct hl_cb * cb )
{
kref_put ( & cb - > refcount , cb_release ) ;
}
void hl_cb_mgr_init ( struct hl_cb_mgr * mgr )
{
spin_lock_init ( & mgr - > cb_lock ) ;
idr_init ( & mgr - > cb_handles ) ;
}
void hl_cb_mgr_fini ( struct hl_device * hdev , struct hl_cb_mgr * mgr )
{
struct hl_cb * cb ;
struct idr * idp ;
u32 id ;
idp = & mgr - > cb_handles ;
idr_for_each_entry ( idp , cb , id ) {
if ( kref_put ( & cb - > refcount , cb_release ) ! = 1 )
dev_err ( hdev - > dev ,
" CB %d for CTX ID %d is still alive \n " ,
2020-09-07 17:36:41 +03:00
id , cb - > ctx - > asid ) ;
2019-02-16 00:39:15 +02:00
}
idr_destroy ( & mgr - > cb_handles ) ;
}
2020-07-13 13:36:55 +03:00
struct hl_cb * hl_cb_kernel_create ( struct hl_device * hdev , u32 cb_size ,
bool internal_cb )
2019-02-16 00:39:15 +02:00
{
u64 cb_handle ;
struct hl_cb * cb ;
int rc ;
2020-09-07 17:36:41 +03:00
rc = hl_cb_create ( hdev , & hdev - > kernel_cb_mgr , hdev - > kernel_ctx , cb_size ,
2020-07-09 16:17:48 +03:00
internal_cb , false , & cb_handle ) ;
2019-02-16 00:39:15 +02:00
if ( rc ) {
2019-08-30 16:59:33 +03:00
dev_err ( hdev - > dev ,
" Failed to allocate CB for the kernel driver %d \n " , rc ) ;
2019-02-16 00:39:15 +02:00
return NULL ;
}
cb_handle > > = PAGE_SHIFT ;
cb = hl_cb_get ( hdev , & hdev - > kernel_cb_mgr , ( u32 ) cb_handle ) ;
2020-12-03 17:32:19 +02:00
/* hl_cb_get should never fail here */
if ( ! cb ) {
dev_crit ( hdev - > dev , " Kernel CB handle invalid 0x%x \n " ,
( u32 ) cb_handle ) ;
2019-02-16 00:39:15 +02:00
goto destroy_cb ;
2020-12-03 17:32:19 +02:00
}
2019-02-16 00:39:15 +02:00
return cb ;
destroy_cb :
hl_cb_destroy ( hdev , & hdev - > kernel_cb_mgr , cb_handle < < PAGE_SHIFT ) ;
return NULL ;
}
int hl_cb_pool_init ( struct hl_device * hdev )
{
struct hl_cb * cb ;
int i ;
INIT_LIST_HEAD ( & hdev - > cb_pool ) ;
spin_lock_init ( & hdev - > cb_pool_lock ) ;
for ( i = 0 ; i < hdev - > asic_prop . cb_pool_cb_cnt ; i + + ) {
cb = hl_cb_alloc ( hdev , hdev - > asic_prop . cb_pool_cb_size ,
2020-07-13 13:36:55 +03:00
HL_KERNEL_ASID_ID , false ) ;
2019-02-16 00:39:15 +02:00
if ( cb ) {
cb - > is_pool = true ;
list_add ( & cb - > pool_list , & hdev - > cb_pool ) ;
} else {
hl_cb_pool_fini ( hdev ) ;
return - ENOMEM ;
}
}
return 0 ;
}
int hl_cb_pool_fini ( struct hl_device * hdev )
{
struct hl_cb * cb , * tmp ;
list_for_each_entry_safe ( cb , tmp , & hdev - > cb_pool , pool_list ) {
list_del ( & cb - > pool_list ) ;
cb_fini ( hdev , cb ) ;
}
return 0 ;
}
2020-07-09 16:17:48 +03:00
int hl_cb_va_pool_init ( struct hl_ctx * ctx )
{
struct hl_device * hdev = ctx - > hdev ;
struct asic_fixed_properties * prop = & hdev - > asic_prop ;
int rc ;
if ( ! hdev - > supports_cb_mapping )
return 0 ;
ctx - > cb_va_pool = gen_pool_create ( __ffs ( prop - > pmmu . page_size ) , - 1 ) ;
if ( ! ctx - > cb_va_pool ) {
dev_err ( hdev - > dev ,
" Failed to create VA gen pool for CB mapping \n " ) ;
return - ENOMEM ;
}
rc = gen_pool_add ( ctx - > cb_va_pool , prop - > cb_va_start_addr ,
prop - > cb_va_end_addr - prop - > cb_va_start_addr , - 1 ) ;
if ( rc ) {
dev_err ( hdev - > dev ,
" Failed to add memory to VA gen pool for CB mapping \n " ) ;
goto err_pool_destroy ;
}
return 0 ;
err_pool_destroy :
gen_pool_destroy ( ctx - > cb_va_pool ) ;
return rc ;
}
void hl_cb_va_pool_fini ( struct hl_ctx * ctx )
{
struct hl_device * hdev = ctx - > hdev ;
if ( ! hdev - > supports_cb_mapping )
return ;
gen_pool_destroy ( ctx - > cb_va_pool ) ;
}