2019-02-16 00:39:14 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016 - 2019 HabanaLabs , Ltd .
* All Rights Reserved .
*/
# include "habanalabs.h"
# include <linux/slab.h>
static void hl_ctx_fini ( struct hl_ctx * ctx )
{
struct hl_device * hdev = ctx - > hdev ;
2019-02-16 00:39:21 +02:00
int i ;
/*
* If we arrived here , there are no jobs waiting for this context
* on its queues so we can safely remove it .
* This is because for each CS , we increment the ref count and for
* every CS that was finished we decrement it and we won ' t arrive
* to this function unless the ref count is 0
*/
for ( i = 0 ; i < HL_MAX_PENDING_CS ; i + + )
dma_fence_put ( ctx - > cs_pending [ i ] ) ;
2019-02-16 00:39:14 +02:00
2019-02-16 00:39:22 +02:00
if ( ctx - > asid ! = HL_KERNEL_ASID_ID ) {
2019-07-30 11:49:36 +03:00
/* The engines are stopped as there is no executing CS, but the
2019-05-01 14:38:38 +03:00
* Coresight might be still working by accessing addresses
* related to the stopped engines . Hence stop it explicitly .
2019-07-30 11:49:36 +03:00
* Stop only if this is the compute context , as there can be
* only one compute context
2019-05-01 14:38:38 +03:00
*/
2019-07-30 11:49:36 +03:00
if ( ( hdev - > in_debug ) & & ( hdev - > compute_ctx = = ctx ) )
2019-05-04 17:36:06 +03:00
hl_device_set_debug_mode ( hdev , false ) ;
2019-02-16 00:39:22 +02:00
hl_vm_ctx_fini ( ctx ) ;
2019-02-16 00:39:14 +02:00
hl_asid_free ( hdev , ctx - > asid ) ;
2019-05-29 15:27:48 +03:00
} else {
hl_mmu_ctx_fini ( ctx ) ;
2019-02-16 00:39:22 +02:00
}
2019-02-16 00:39:14 +02:00
}
void hl_ctx_do_release ( struct kref * ref )
{
struct hl_ctx * ctx ;
ctx = container_of ( ref , struct hl_ctx , refcount ) ;
hl_ctx_fini ( ctx ) ;
if ( ctx - > hpriv )
hl_hpriv_put ( ctx - > hpriv ) ;
kfree ( ctx ) ;
}
int hl_ctx_create ( struct hl_device * hdev , struct hl_fpriv * hpriv )
{
struct hl_ctx_mgr * mgr = & hpriv - > ctx_mgr ;
struct hl_ctx * ctx ;
int rc ;
ctx = kzalloc ( sizeof ( * ctx ) , GFP_KERNEL ) ;
if ( ! ctx ) {
rc = - ENOMEM ;
goto out_err ;
}
2019-07-15 21:55:57 +03:00
mutex_lock ( & mgr - > ctx_lock ) ;
rc = idr_alloc ( & mgr - > ctx_handles , ctx , 1 , 0 , GFP_KERNEL ) ;
mutex_unlock ( & mgr - > ctx_lock ) ;
if ( rc < 0 ) {
dev_err ( hdev - > dev , " Failed to allocate IDR for a new CTX \n " ) ;
goto free_ctx ;
}
ctx - > handle = rc ;
2019-02-16 00:39:14 +02:00
rc = hl_ctx_init ( hdev , ctx , false ) ;
if ( rc )
2019-07-15 21:55:57 +03:00
goto remove_from_idr ;
2019-02-16 00:39:14 +02:00
hl_hpriv_get ( hpriv ) ;
ctx - > hpriv = hpriv ;
2019-07-30 11:49:36 +03:00
/* TODO: remove for multiple contexts per process */
2019-02-16 00:39:14 +02:00
hpriv - > ctx = ctx ;
2019-07-30 11:49:36 +03:00
/* TODO: remove the following line for multiple process support */
hdev - > compute_ctx = ctx ;
2019-02-16 00:39:14 +02:00
return 0 ;
2019-07-15 21:55:57 +03:00
remove_from_idr :
mutex_lock ( & mgr - > ctx_lock ) ;
idr_remove ( & mgr - > ctx_handles , ctx - > handle ) ;
mutex_unlock ( & mgr - > ctx_lock ) ;
2019-02-16 00:39:14 +02:00
free_ctx :
kfree ( ctx ) ;
out_err :
return rc ;
}
void hl_ctx_free ( struct hl_device * hdev , struct hl_ctx * ctx )
{
if ( kref_put ( & ctx - > refcount , hl_ctx_do_release ) = = 1 )
return ;
dev_warn ( hdev - > dev ,
" Context %d closed or terminated but its CS are executing \n " ,
ctx - > asid ) ;
}
int hl_ctx_init ( struct hl_device * hdev , struct hl_ctx * ctx , bool is_kernel_ctx )
{
2019-02-16 00:39:22 +02:00
int rc = 0 ;
2019-02-16 00:39:14 +02:00
ctx - > hdev = hdev ;
kref_init ( & ctx - > refcount ) ;
2019-02-16 00:39:21 +02:00
ctx - > cs_sequence = 1 ;
spin_lock_init ( & ctx - > cs_lock ) ;
2019-04-25 20:15:42 +03:00
atomic_set ( & ctx - > thread_ctx_switch_token , 1 ) ;
ctx - > thread_ctx_switch_wait_token = 0 ;
2019-02-16 00:39:21 +02:00
2019-02-16 00:39:14 +02:00
if ( is_kernel_ctx ) {
2019-08-30 16:59:33 +03:00
ctx - > asid = HL_KERNEL_ASID_ID ; /* Kernel driver gets ASID 0 */
2019-05-29 15:27:48 +03:00
rc = hl_mmu_ctx_init ( ctx ) ;
if ( rc ) {
dev_err ( hdev - > dev , " Failed to init mmu ctx module \n " ) ;
goto mem_ctx_err ;
}
2019-02-16 00:39:14 +02:00
} else {
ctx - > asid = hl_asid_alloc ( hdev ) ;
if ( ! ctx - > asid ) {
dev_err ( hdev - > dev , " No free ASID, failed to create context \n " ) ;
return - ENOMEM ;
}
2019-02-16 00:39:22 +02:00
rc = hl_vm_ctx_init ( ctx ) ;
if ( rc ) {
dev_err ( hdev - > dev , " Failed to init mem ctx module \n " ) ;
rc = - ENOMEM ;
goto mem_ctx_err ;
}
2019-02-16 00:39:14 +02:00
}
return 0 ;
2019-02-16 00:39:22 +02:00
mem_ctx_err :
if ( ctx - > asid ! = HL_KERNEL_ASID_ID )
hl_asid_free ( hdev , ctx - > asid ) ;
return rc ;
2019-02-16 00:39:14 +02:00
}
void hl_ctx_get ( struct hl_device * hdev , struct hl_ctx * ctx )
{
kref_get ( & ctx - > refcount ) ;
}
int hl_ctx_put ( struct hl_ctx * ctx )
{
return kref_put ( & ctx - > refcount , hl_ctx_do_release ) ;
}
2019-02-16 00:39:21 +02:00
struct dma_fence * hl_ctx_get_fence ( struct hl_ctx * ctx , u64 seq )
{
struct hl_device * hdev = ctx - > hdev ;
struct dma_fence * fence ;
spin_lock ( & ctx - > cs_lock ) ;
if ( seq > = ctx - > cs_sequence ) {
2019-12-03 10:12:10 +02:00
dev_notice_ratelimited ( hdev - > dev ,
2019-02-16 00:39:21 +02:00
" Can't wait on seq %llu because current CS is at seq %llu \n " ,
seq , ctx - > cs_sequence ) ;
spin_unlock ( & ctx - > cs_lock ) ;
return ERR_PTR ( - EINVAL ) ;
}
if ( seq + HL_MAX_PENDING_CS < ctx - > cs_sequence ) {
dev_dbg ( hdev - > dev ,
" Can't wait on seq %llu because current CS is at seq %llu (Fence is gone) \n " ,
seq , ctx - > cs_sequence ) ;
spin_unlock ( & ctx - > cs_lock ) ;
return NULL ;
}
fence = dma_fence_get (
ctx - > cs_pending [ seq & ( HL_MAX_PENDING_CS - 1 ) ] ) ;
spin_unlock ( & ctx - > cs_lock ) ;
return fence ;
}
2019-02-16 00:39:14 +02:00
/*
* hl_ctx_mgr_init - initialize the context manager
*
* @ mgr : pointer to context manager structure
*
* This manager is an object inside the hpriv object of the user process .
* The function is called when a user process opens the FD .
*/
void hl_ctx_mgr_init ( struct hl_ctx_mgr * mgr )
{
mutex_init ( & mgr - > ctx_lock ) ;
idr_init ( & mgr - > ctx_handles ) ;
}
/*
* hl_ctx_mgr_fini - finalize the context manager
*
* @ hdev : pointer to device structure
* @ mgr : pointer to context manager structure
*
* This function goes over all the contexts in the manager and frees them .
* It is called when a process closes the FD .
*/
void hl_ctx_mgr_fini ( struct hl_device * hdev , struct hl_ctx_mgr * mgr )
{
struct hl_ctx * ctx ;
struct idr * idp ;
u32 id ;
idp = & mgr - > ctx_handles ;
idr_for_each_entry ( idp , ctx , id )
hl_ctx_free ( hdev , ctx ) ;
idr_destroy ( & mgr - > ctx_handles ) ;
mutex_destroy ( & mgr - > ctx_lock ) ;
}