2019-02-16 00:39:17 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2016 - 2019 HabanaLabs , Ltd .
* All Rights Reserved .
*/
# include "habanalabs.h"
2019-02-16 00:39:18 +02:00
# include <linux/slab.h>
/**
2020-06-26 14:05:24 +01:00
* struct hl_eqe_work - This structure is used to schedule work of EQ
2020-08-15 16:28:10 +03:00
* entry and cpucp_reset event
2019-02-16 00:39:18 +02:00
*
2020-07-01 09:58:38 +01:00
* @ eq_work : workqueue object to run when EQ entry is received
* @ hdev : pointer to device structure
* @ eq_entry : copy of the EQ entry
2019-02-16 00:39:18 +02:00
*/
struct hl_eqe_work {
struct work_struct eq_work ;
struct hl_device * hdev ;
struct hl_eq_entry eq_entry ;
} ;
2019-02-16 00:39:17 +02:00
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:17 +02:00
* hl_cq_inc_ptr - increment ci or pi of cq
*
* @ ptr : the current ci or pi value of the completion queue
*
* Increment ptr by 1. If it reaches the number of completion queue
* entries , set it to 0
*/
inline u32 hl_cq_inc_ptr ( u32 ptr )
{
ptr + + ;
if ( unlikely ( ptr = = HL_CQ_LENGTH ) )
ptr = 0 ;
return ptr ;
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:18 +02:00
* hl_eq_inc_ptr - increment ci of eq
*
* @ ptr : the current ci value of the event queue
*
* Increment ptr by 1. If it reaches the number of event queue
* entries , set it to 0
*/
2021-02-16 21:49:30 +02:00
static inline u32 hl_eq_inc_ptr ( u32 ptr )
2019-02-16 00:39:18 +02:00
{
ptr + + ;
if ( unlikely ( ptr = = HL_EQ_LENGTH ) )
ptr = 0 ;
return ptr ;
}
static void irq_handle_eqe ( struct work_struct * work )
{
struct hl_eqe_work * eqe_work = container_of ( work , struct hl_eqe_work ,
eq_work ) ;
struct hl_device * hdev = eqe_work - > hdev ;
hdev - > asic_funcs - > handle_eqe ( hdev , & eqe_work - > eq_entry ) ;
kfree ( eqe_work ) ;
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:17 +02:00
* hl_irq_handler_cq - irq handler for completion queue
*
* @ irq : irq number
* @ arg : pointer to completion queue structure
*
*/
irqreturn_t hl_irq_handler_cq ( int irq , void * arg )
{
struct hl_cq * cq = arg ;
struct hl_device * hdev = cq - > hdev ;
struct hl_hw_queue * queue ;
struct hl_cs_job * job ;
bool shadow_index_valid ;
u16 shadow_index ;
2019-08-01 23:22:20 +00:00
struct hl_cq_entry * cq_entry , * cq_base ;
2019-02-16 00:39:17 +02:00
if ( hdev - > disabled ) {
dev_dbg ( hdev - > dev ,
" Device disabled but received IRQ %d for CQ %d \n " ,
irq , cq - > hw_queue_id ) ;
return IRQ_HANDLED ;
}
2020-10-26 17:08:06 +01:00
cq_base = cq - > kernel_address ;
2019-02-16 00:39:17 +02:00
while ( 1 ) {
2019-08-01 23:22:20 +00:00
bool entry_ready = ( ( le32_to_cpu ( cq_base [ cq - > ci ] . data ) &
CQ_ENTRY_READY_MASK )
2019-02-16 00:39:17 +02:00
> > CQ_ENTRY_READY_SHIFT ) ;
if ( ! entry_ready )
break ;
2019-08-01 23:22:20 +00:00
cq_entry = ( struct hl_cq_entry * ) & cq_base [ cq - > ci ] ;
2019-02-16 00:39:17 +02:00
2019-08-01 23:22:20 +00:00
/* Make sure we read CQ entry contents after we've
2019-02-16 00:39:17 +02:00
* checked the ownership bit .
*/
dma_rmb ( ) ;
2019-08-01 23:22:20 +00:00
shadow_index_valid = ( ( le32_to_cpu ( cq_entry - > data ) &
CQ_ENTRY_SHADOW_INDEX_VALID_MASK )
2019-02-16 00:39:17 +02:00
> > CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT ) ;
2019-08-01 23:22:20 +00:00
shadow_index = ( u16 ) ( ( le32_to_cpu ( cq_entry - > data ) &
CQ_ENTRY_SHADOW_INDEX_MASK )
2019-02-16 00:39:17 +02:00
> > CQ_ENTRY_SHADOW_INDEX_SHIFT ) ;
queue = & hdev - > kernel_queues [ cq - > hw_queue_id ] ;
if ( ( shadow_index_valid ) & & ( ! hdev - > disabled ) ) {
job = queue - > shadow_queue [ hl_pi_2_offset ( shadow_index ) ] ;
2020-07-05 13:35:51 +03:00
queue_work ( hdev - > cq_wq [ cq - > cq_idx ] , & job - > finish_work ) ;
2019-02-16 00:39:17 +02:00
}
2020-06-24 14:49:43 +03:00
atomic_inc ( & queue - > ci ) ;
2019-02-16 00:39:17 +02:00
/* Clear CQ entry ready bit */
2019-08-01 23:22:20 +00:00
cq_entry - > data = cpu_to_le32 ( le32_to_cpu ( cq_entry - > data ) &
~ CQ_ENTRY_READY_MASK ) ;
2019-02-16 00:39:17 +02:00
cq - > ci = hl_cq_inc_ptr ( cq - > ci ) ;
/* Increment free slots */
atomic_inc ( & cq - > free_slots_cnt ) ;
}
return IRQ_HANDLED ;
}
2021-01-12 18:37:19 +02:00
static void handle_user_cq ( struct hl_device * hdev ,
struct hl_user_interrupt * user_cq )
{
struct hl_user_pending_interrupt * pend ;
spin_lock ( & user_cq - > wait_list_lock ) ;
list_for_each_entry ( pend , & user_cq - > wait_list_head , wait_list_node )
complete_all ( & pend - > fence . completion ) ;
spin_unlock ( & user_cq - > wait_list_lock ) ;
}
2021-01-12 14:43:09 +02:00
/**
* hl_irq_handler_user_cq - irq handler for user completion queues
*
* @ irq : irq number
* @ arg : pointer to user interrupt structure
*
*/
irqreturn_t hl_irq_handler_user_cq ( int irq , void * arg )
{
struct hl_user_interrupt * user_cq = arg ;
struct hl_device * hdev = user_cq - > hdev ;
2021-01-12 18:37:19 +02:00
dev_dbg ( hdev - > dev ,
2021-01-12 14:43:09 +02:00
" got user completion interrupt id %u " ,
2021-01-12 18:37:19 +02:00
user_cq - > interrupt_id ) ;
/* Handle user cq interrupts registered on all interrupts */
handle_user_cq ( hdev , & hdev - > common_user_interrupt ) ;
/* Handle user cq interrupts registered on this specific interrupt */
handle_user_cq ( hdev , user_cq ) ;
2021-01-12 14:43:09 +02:00
return IRQ_HANDLED ;
}
/**
* hl_irq_handler_default - default irq handler
*
* @ irq : irq number
* @ arg : pointer to user interrupt structure
*
*/
irqreturn_t hl_irq_handler_default ( int irq , void * arg )
{
struct hl_user_interrupt * user_interrupt = arg ;
struct hl_device * hdev = user_interrupt - > hdev ;
u32 interrupt_id = user_interrupt - > interrupt_id ;
dev_err ( hdev - > dev ,
" got invalid user interrupt %u " ,
interrupt_id ) ;
return IRQ_HANDLED ;
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:18 +02:00
* hl_irq_handler_eq - irq handler for event queue
*
* @ irq : irq number
* @ arg : pointer to event queue structure
*
*/
irqreturn_t hl_irq_handler_eq ( int irq , void * arg )
{
struct hl_eq * eq = arg ;
struct hl_device * hdev = eq - > hdev ;
struct hl_eq_entry * eq_entry ;
struct hl_eq_entry * eq_base ;
struct hl_eqe_work * handle_eqe_work ;
2020-10-26 17:08:06 +01:00
eq_base = eq - > kernel_address ;
2019-02-16 00:39:18 +02:00
while ( 1 ) {
bool entry_ready =
2019-08-08 17:05:45 +03:00
( ( le32_to_cpu ( eq_base [ eq - > ci ] . hdr . ctl ) &
2019-02-28 10:46:24 +02:00
EQ_CTL_READY_MASK ) > > EQ_CTL_READY_SHIFT ) ;
2019-02-16 00:39:18 +02:00
if ( ! entry_ready )
break ;
eq_entry = & eq_base [ eq - > ci ] ;
/*
* Make sure we read EQ entry contents after we ' ve
* checked the ownership bit .
*/
dma_rmb ( ) ;
if ( hdev - > disabled ) {
dev_warn ( hdev - > dev ,
" Device disabled but received IRQ %d for EQ \n " ,
irq ) ;
goto skip_irq ;
}
handle_eqe_work = kmalloc ( sizeof ( * handle_eqe_work ) , GFP_ATOMIC ) ;
if ( handle_eqe_work ) {
INIT_WORK ( & handle_eqe_work - > eq_work , irq_handle_eqe ) ;
handle_eqe_work - > hdev = hdev ;
memcpy ( & handle_eqe_work - > eq_entry , eq_entry ,
sizeof ( * eq_entry ) ) ;
queue_work ( hdev - > eq_wq , & handle_eqe_work - > eq_work ) ;
}
skip_irq :
/* Clear EQ entry ready bit */
2019-02-28 10:46:24 +02:00
eq_entry - > hdr . ctl =
2019-08-08 17:05:45 +03:00
cpu_to_le32 ( le32_to_cpu ( eq_entry - > hdr . ctl ) &
2019-02-28 10:46:24 +02:00
~ EQ_CTL_READY_MASK ) ;
2019-02-16 00:39:18 +02:00
eq - > ci = hl_eq_inc_ptr ( eq - > ci ) ;
hdev - > asic_funcs - > update_eq_ci ( hdev , eq - > ci ) ;
}
return IRQ_HANDLED ;
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:17 +02:00
* hl_cq_init - main initialization function for an cq object
*
* @ hdev : pointer to device structure
* @ q : pointer to cq structure
* @ hw_queue_id : The H / W queue ID this completion queue belongs to
*
* Allocate dma - able memory for the completion queue and initialize fields
* Returns 0 on success
*/
int hl_cq_init ( struct hl_device * hdev , struct hl_cq * q , u32 hw_queue_id )
{
void * p ;
2019-05-01 11:47:04 +03:00
p = hdev - > asic_funcs - > asic_dma_alloc_coherent ( hdev , HL_CQ_SIZE_IN_BYTES ,
2019-02-16 00:39:17 +02:00
& q - > bus_address , GFP_KERNEL | __GFP_ZERO ) ;
if ( ! p )
return - ENOMEM ;
q - > hdev = hdev ;
2020-10-26 17:08:06 +01:00
q - > kernel_address = p ;
2019-02-16 00:39:17 +02:00
q - > hw_queue_id = hw_queue_id ;
q - > ci = 0 ;
q - > pi = 0 ;
atomic_set ( & q - > free_slots_cnt , HL_CQ_LENGTH ) ;
return 0 ;
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:17 +02:00
* hl_cq_fini - destroy completion queue
*
* @ hdev : pointer to device structure
* @ q : pointer to cq structure
*
* Free the completion queue memory
*/
void hl_cq_fini ( struct hl_device * hdev , struct hl_cq * q )
{
2019-05-01 11:47:04 +03:00
hdev - > asic_funcs - > asic_dma_free_coherent ( hdev , HL_CQ_SIZE_IN_BYTES ,
2020-10-26 17:08:06 +01:00
q - > kernel_address ,
q - > bus_address ) ;
2019-02-16 00:39:17 +02:00
}
2019-02-16 00:39:18 +02:00
2019-02-16 00:39:20 +02:00
void hl_cq_reset ( struct hl_device * hdev , struct hl_cq * q )
{
q - > ci = 0 ;
q - > pi = 0 ;
atomic_set ( & q - > free_slots_cnt , HL_CQ_LENGTH ) ;
/*
* It ' s not enough to just reset the PI / CI because the H / W may have
* written valid completion entries before it was halted and therefore
* we need to clean the actual queues so we won ' t process old entries
* when the device is operational again
*/
2020-10-26 17:08:06 +01:00
memset ( q - > kernel_address , 0 , HL_CQ_SIZE_IN_BYTES ) ;
2019-02-16 00:39:20 +02:00
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:18 +02:00
* hl_eq_init - main initialization function for an event queue object
*
* @ hdev : pointer to device structure
* @ q : pointer to eq structure
*
* Allocate dma - able memory for the event queue and initialize fields
* Returns 0 on success
*/
int hl_eq_init ( struct hl_device * hdev , struct hl_eq * q )
{
void * p ;
2019-04-28 19:17:38 +03:00
p = hdev - > asic_funcs - > cpu_accessible_dma_pool_alloc ( hdev ,
HL_EQ_SIZE_IN_BYTES ,
& q - > bus_address ) ;
2019-02-16 00:39:18 +02:00
if ( ! p )
return - ENOMEM ;
q - > hdev = hdev ;
2020-10-26 17:08:06 +01:00
q - > kernel_address = p ;
2019-02-16 00:39:18 +02:00
q - > ci = 0 ;
return 0 ;
}
2020-07-01 09:58:38 +01:00
/**
2019-02-16 00:39:18 +02:00
* hl_eq_fini - destroy event queue
*
* @ hdev : pointer to device structure
* @ q : pointer to eq structure
*
* Free the event queue memory
*/
void hl_eq_fini ( struct hl_device * hdev , struct hl_eq * q )
{
flush_workqueue ( hdev - > eq_wq ) ;
2019-04-28 19:17:38 +03:00
hdev - > asic_funcs - > cpu_accessible_dma_pool_free ( hdev ,
HL_EQ_SIZE_IN_BYTES ,
2020-10-26 17:08:06 +01:00
q - > kernel_address ) ;
2019-02-16 00:39:18 +02:00
}
2019-02-16 00:39:20 +02:00
void hl_eq_reset ( struct hl_device * hdev , struct hl_eq * q )
{
q - > ci = 0 ;
/*
* It ' s not enough to just reset the PI / CI because the H / W may have
* written valid completion entries before it was halted and therefore
* we need to clean the actual queues so we won ' t process old entries
* when the device is operational again
*/
2020-10-26 17:08:06 +01:00
memset ( q - > kernel_address , 0 , HL_EQ_SIZE_IN_BYTES ) ;
2019-02-16 00:39:20 +02:00
}