2023-01-17 10:27:20 +01:00
// SPDX-License-Identifier: GPL-2.0-only
/*
2024-04-02 12:49:22 +02:00
* Copyright ( C ) 2020 - 2024 Intel Corporation
2023-01-17 10:27:20 +01:00
*/
# include <linux/genalloc.h>
# include <linux/highmem.h>
2023-11-06 14:08:26 +01:00
# include <linux/pm_runtime.h>
2023-01-17 10:27:20 +01:00
# include <linux/wait.h>
# include "ivpu_drv.h"
# include "ivpu_gem.h"
# include "ivpu_hw.h"
# include "ivpu_hw_reg_io.h"
# include "ivpu_ipc.h"
# include "ivpu_jsm_msg.h"
2023-01-17 10:27:23 +01:00
# include "ivpu_pm.h"
2023-01-17 10:27:20 +01:00
# define IPC_MAX_RX_MSG 128
struct ivpu_ipc_tx_buf {
struct ivpu_ipc_hdr ipc ;
struct vpu_jsm_msg jsm ;
} ;
static void ivpu_ipc_msg_dump ( struct ivpu_device * vdev , char * c ,
struct ivpu_ipc_hdr * ipc_hdr , u32 vpu_addr )
{
ivpu_dbg ( vdev , IPC ,
" %s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x) " ,
c , vpu_addr , ipc_hdr - > data_addr , ipc_hdr - > data_size , ipc_hdr - > channel ,
ipc_hdr - > src_node , ipc_hdr - > dst_node , ipc_hdr - > status ) ;
}
static void ivpu_jsm_msg_dump ( struct ivpu_device * vdev , char * c ,
struct vpu_jsm_msg * jsm_msg , u32 vpu_addr )
{
u32 * payload = ( u32 * ) & jsm_msg - > payload ;
ivpu_dbg ( vdev , JSM ,
2023-10-20 12:44:59 +02:00
" %s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x) \n " ,
c , vpu_addr , ivpu_jsm_msg_type_to_str ( jsm_msg - > type ) ,
jsm_msg - > status , jsm_msg - > request_id , jsm_msg - > result ,
2023-01-17 10:27:20 +01:00
payload [ 0 ] , payload [ 1 ] , payload [ 2 ] , payload [ 3 ] , payload [ 4 ] ) ;
}
static void
ivpu_ipc_rx_mark_free ( struct ivpu_device * vdev , struct ivpu_ipc_hdr * ipc_hdr ,
struct vpu_jsm_msg * jsm_msg )
{
ipc_hdr - > status = IVPU_IPC_HDR_FREE ;
if ( jsm_msg )
jsm_msg - > status = VPU_JSM_MSG_FREE ;
wmb ( ) ; /* Flush WC buffers for message statuses */
}
static void ivpu_ipc_mem_fini ( struct ivpu_device * vdev )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
2024-02-14 09:13:04 +01:00
ivpu_bo_free ( ipc - > mem_rx ) ;
ivpu_bo_free ( ipc - > mem_tx ) ;
2023-01-17 10:27:20 +01:00
}
static int
ivpu_ipc_tx_prepare ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons ,
struct vpu_jsm_msg * req )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
struct ivpu_ipc_tx_buf * tx_buf ;
u32 tx_buf_vpu_addr ;
u32 jsm_vpu_addr ;
tx_buf_vpu_addr = gen_pool_alloc ( ipc - > mm_tx , sizeof ( * tx_buf ) ) ;
if ( ! tx_buf_vpu_addr ) {
2023-10-20 12:44:56 +02:00
ivpu_err_ratelimited ( vdev , " Failed to reserve IPC buffer, size %ld \n " ,
sizeof ( * tx_buf ) ) ;
2023-01-17 10:27:20 +01:00
return - ENOMEM ;
}
tx_buf = ivpu_to_cpu_addr ( ipc - > mem_tx , tx_buf_vpu_addr ) ;
if ( drm_WARN_ON ( & vdev - > drm , ! tx_buf ) ) {
gen_pool_free ( ipc - > mm_tx , tx_buf_vpu_addr , sizeof ( * tx_buf ) ) ;
return - EIO ;
}
jsm_vpu_addr = tx_buf_vpu_addr + offsetof ( struct ivpu_ipc_tx_buf , jsm ) ;
if ( tx_buf - > ipc . status ! = IVPU_IPC_HDR_FREE )
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " IPC message vpu:0x%x not released by firmware \n " ,
tx_buf_vpu_addr ) ;
2023-01-17 10:27:20 +01:00
if ( tx_buf - > jsm . status ! = VPU_JSM_MSG_FREE )
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " JSM message vpu:0x%x not released by firmware \n " ,
jsm_vpu_addr ) ;
2023-01-17 10:27:20 +01:00
memset ( tx_buf , 0 , sizeof ( * tx_buf ) ) ;
tx_buf - > ipc . data_addr = jsm_vpu_addr ;
/* TODO: Set data_size to actual JSM message size, not union of all messages */
tx_buf - > ipc . data_size = sizeof ( * req ) ;
tx_buf - > ipc . channel = cons - > channel ;
tx_buf - > ipc . src_node = 0 ;
tx_buf - > ipc . dst_node = 1 ;
tx_buf - > ipc . status = IVPU_IPC_HDR_ALLOCATED ;
tx_buf - > jsm . type = req - > type ;
tx_buf - > jsm . status = VPU_JSM_MSG_ALLOCATED ;
tx_buf - > jsm . payload = req - > payload ;
req - > request_id = atomic_inc_return ( & ipc - > request_id ) ;
tx_buf - > jsm . request_id = req - > request_id ;
cons - > request_id = req - > request_id ;
wmb ( ) ; /* Flush WC buffers for IPC, JSM msgs */
cons - > tx_vpu_addr = tx_buf_vpu_addr ;
ivpu_jsm_msg_dump ( vdev , " TX " , & tx_buf - > jsm , jsm_vpu_addr ) ;
ivpu_ipc_msg_dump ( vdev , " TX " , & tx_buf - > ipc , tx_buf_vpu_addr ) ;
return 0 ;
}
static void ivpu_ipc_tx_release ( struct ivpu_device * vdev , u32 vpu_addr )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
if ( vpu_addr )
gen_pool_free ( ipc - > mm_tx , vpu_addr , sizeof ( struct ivpu_ipc_tx_buf ) ) ;
}
static void ivpu_ipc_tx ( struct ivpu_device * vdev , u32 vpu_addr )
{
2024-05-15 13:30:05 +02:00
ivpu_hw_ipc_tx_set ( vdev , vpu_addr ) ;
2023-01-17 10:27:20 +01:00
}
2023-11-13 18:02:52 +01:00
static void
ivpu_ipc_rx_msg_add ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons ,
struct ivpu_ipc_hdr * ipc_hdr , struct vpu_jsm_msg * jsm_msg )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
struct ivpu_ipc_rx_msg * rx_msg ;
lockdep_assert_held ( & ipc - > cons_lock ) ;
lockdep_assert_irqs_disabled ( ) ;
rx_msg = kzalloc ( sizeof ( * rx_msg ) , GFP_ATOMIC ) ;
if ( ! rx_msg ) {
ivpu_ipc_rx_mark_free ( vdev , ipc_hdr , jsm_msg ) ;
return ;
}
atomic_inc ( & ipc - > rx_msg_count ) ;
rx_msg - > ipc_hdr = ipc_hdr ;
rx_msg - > jsm_msg = jsm_msg ;
rx_msg - > callback = cons - > rx_callback ;
if ( rx_msg - > callback ) {
list_add_tail ( & rx_msg - > link , & ipc - > cb_msg_list ) ;
} else {
spin_lock ( & cons - > rx_lock ) ;
list_add_tail ( & rx_msg - > link , & cons - > rx_msg_list ) ;
spin_unlock ( & cons - > rx_lock ) ;
wake_up ( & cons - > rx_msg_wq ) ;
}
}
static void
ivpu_ipc_rx_msg_del ( struct ivpu_device * vdev , struct ivpu_ipc_rx_msg * rx_msg )
{
list_del ( & rx_msg - > link ) ;
ivpu_ipc_rx_mark_free ( vdev , rx_msg - > ipc_hdr , rx_msg - > jsm_msg ) ;
atomic_dec ( & vdev - > ipc - > rx_msg_count ) ;
kfree ( rx_msg ) ;
}
void ivpu_ipc_consumer_add ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons ,
u32 channel , ivpu_ipc_rx_callback_t rx_callback )
2023-01-17 10:27:20 +01:00
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
INIT_LIST_HEAD ( & cons - > link ) ;
cons - > channel = channel ;
cons - > tx_vpu_addr = 0 ;
cons - > request_id = 0 ;
2023-10-28 17:59:32 +02:00
cons - > aborted = false ;
2023-11-13 18:02:52 +01:00
cons - > rx_callback = rx_callback ;
2023-11-13 18:02:48 +01:00
spin_lock_init ( & cons - > rx_lock ) ;
2023-01-17 10:27:20 +01:00
INIT_LIST_HEAD ( & cons - > rx_msg_list ) ;
init_waitqueue_head ( & cons - > rx_msg_wq ) ;
2023-11-13 18:02:52 +01:00
spin_lock_irq ( & ipc - > cons_lock ) ;
2023-01-17 10:27:20 +01:00
list_add_tail ( & cons - > link , & ipc - > cons_list ) ;
2023-11-13 18:02:52 +01:00
spin_unlock_irq ( & ipc - > cons_lock ) ;
2023-01-17 10:27:20 +01:00
}
void ivpu_ipc_consumer_del ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
struct ivpu_ipc_rx_msg * rx_msg , * r ;
2023-11-13 18:02:52 +01:00
spin_lock_irq ( & ipc - > cons_lock ) ;
2023-01-17 10:27:20 +01:00
list_del ( & cons - > link ) ;
2023-11-13 18:02:52 +01:00
spin_unlock_irq ( & ipc - > cons_lock ) ;
2023-01-17 10:27:20 +01:00
2023-11-13 18:02:48 +01:00
spin_lock_irq ( & cons - > rx_lock ) ;
2023-11-13 18:02:52 +01:00
list_for_each_entry_safe ( rx_msg , r , & cons - > rx_msg_list , link )
ivpu_ipc_rx_msg_del ( vdev , rx_msg ) ;
2023-11-13 18:02:48 +01:00
spin_unlock_irq ( & cons - > rx_lock ) ;
2023-01-17 10:27:20 +01:00
ivpu_ipc_tx_release ( vdev , cons - > tx_vpu_addr ) ;
}
2024-06-11 14:04:19 +02:00
int ivpu_ipc_send ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons , struct vpu_jsm_msg * req )
2023-01-17 10:27:20 +01:00
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
int ret ;
2023-05-25 12:38:18 +02:00
mutex_lock ( & ipc - > lock ) ;
2023-01-17 10:27:20 +01:00
if ( ! ipc - > on ) {
ret = - EAGAIN ;
goto unlock ;
}
ret = ivpu_ipc_tx_prepare ( vdev , cons , req ) ;
if ( ret )
goto unlock ;
ivpu_ipc_tx ( vdev , cons - > tx_vpu_addr ) ;
unlock :
mutex_unlock ( & ipc - > lock ) ;
return ret ;
}
2023-11-13 18:02:52 +01:00
static bool ivpu_ipc_rx_need_wakeup ( struct ivpu_ipc_consumer * cons )
2023-10-28 17:59:31 +02:00
{
2023-11-13 18:02:52 +01:00
bool ret ;
2023-10-28 17:59:31 +02:00
2023-11-13 18:02:48 +01:00
spin_lock_irq ( & cons - > rx_lock ) ;
2023-11-13 18:02:52 +01:00
ret = ! list_empty ( & cons - > rx_msg_list ) | | cons - > aborted ;
2023-11-13 18:02:48 +01:00
spin_unlock_irq ( & cons - > rx_lock ) ;
2023-10-28 17:59:31 +02:00
return ret ;
}
2023-01-17 10:27:20 +01:00
int ivpu_ipc_receive ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons ,
struct ivpu_ipc_hdr * ipc_buf ,
2023-11-13 18:02:52 +01:00
struct vpu_jsm_msg * jsm_msg , unsigned long timeout_ms )
2023-01-17 10:27:20 +01:00
{
struct ivpu_ipc_rx_msg * rx_msg ;
int wait_ret , ret = 0 ;
2023-11-13 18:02:52 +01:00
if ( drm_WARN_ONCE ( & vdev - > drm , cons - > rx_callback , " Consumer works only in async mode \n " ) )
return - EINVAL ;
2023-09-25 14:11:32 +02:00
wait_ret = wait_event_timeout ( cons - > rx_msg_wq ,
2023-11-15 10:45:19 +01:00
ivpu_ipc_rx_need_wakeup ( cons ) ,
2023-09-25 14:11:32 +02:00
msecs_to_jiffies ( timeout_ms ) ) ;
2023-01-17 10:27:20 +01:00
if ( wait_ret = = 0 )
return - ETIMEDOUT ;
2023-11-13 18:02:48 +01:00
spin_lock_irq ( & cons - > rx_lock ) ;
2023-11-13 18:02:50 +01:00
if ( cons - > aborted ) {
spin_unlock_irq ( & cons - > rx_lock ) ;
return - ECANCELED ;
}
2023-01-17 10:27:20 +01:00
rx_msg = list_first_entry_or_null ( & cons - > rx_msg_list , struct ivpu_ipc_rx_msg , link ) ;
if ( ! rx_msg ) {
2023-11-13 18:02:48 +01:00
spin_unlock_irq ( & cons - > rx_lock ) ;
2023-01-17 10:27:20 +01:00
return - EAGAIN ;
}
if ( ipc_buf )
memcpy ( ipc_buf , rx_msg - > ipc_hdr , sizeof ( * ipc_buf ) ) ;
if ( rx_msg - > jsm_msg ) {
2023-11-13 18:02:52 +01:00
u32 size = min_t ( int , rx_msg - > ipc_hdr - > data_size , sizeof ( * jsm_msg ) ) ;
2023-01-17 10:27:20 +01:00
if ( rx_msg - > jsm_msg - > result ! = VPU_JSM_STATUS_SUCCESS ) {
ivpu_dbg ( vdev , IPC , " IPC resp result error: %d \n " , rx_msg - > jsm_msg - > result ) ;
ret = - EBADMSG ;
}
2023-11-13 18:02:52 +01:00
if ( jsm_msg )
memcpy ( jsm_msg , rx_msg - > jsm_msg , size ) ;
2023-01-17 10:27:20 +01:00
}
2023-11-13 18:02:52 +01:00
ivpu_ipc_rx_msg_del ( vdev , rx_msg ) ;
spin_unlock_irq ( & cons - > rx_lock ) ;
2023-01-17 10:27:20 +01:00
return ret ;
}
static int
ivpu_ipc_send_receive_internal ( struct ivpu_device * vdev , struct vpu_jsm_msg * req ,
enum vpu_ipc_msg_type expected_resp_type ,
struct vpu_jsm_msg * resp , u32 channel ,
unsigned long timeout_ms )
{
struct ivpu_ipc_consumer cons ;
int ret ;
2023-11-13 18:02:52 +01:00
ivpu_ipc_consumer_add ( vdev , & cons , channel , NULL ) ;
2023-01-17 10:27:20 +01:00
ret = ivpu_ipc_send ( vdev , & cons , req ) ;
if ( ret ) {
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " IPC send failed: %d \n " , ret ) ;
2023-01-17 10:27:20 +01:00
goto consumer_del ;
}
ret = ivpu_ipc_receive ( vdev , & cons , NULL , resp , timeout_ms ) ;
if ( ret ) {
2023-10-20 12:44:59 +02:00
ivpu_warn_ratelimited ( vdev , " IPC receive failed: type %s, ret %d \n " ,
ivpu_jsm_msg_type_to_str ( req - > type ) , ret ) ;
2023-01-17 10:27:20 +01:00
goto consumer_del ;
}
if ( resp - > type ! = expected_resp_type ) {
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " Invalid JSM response type: 0x%x \n " , resp - > type ) ;
2023-01-17 10:27:20 +01:00
ret = - EBADE ;
}
consumer_del :
ivpu_ipc_consumer_del ( vdev , & cons ) ;
return ret ;
}
2023-10-28 15:34:13 +02:00
int ivpu_ipc_send_receive_active ( struct ivpu_device * vdev , struct vpu_jsm_msg * req ,
enum vpu_ipc_msg_type expected_resp , struct vpu_jsm_msg * resp ,
u32 channel , unsigned long timeout_ms )
2023-01-17 10:27:20 +01:00
{
struct vpu_jsm_msg hb_req = { . type = VPU_JSM_MSG_QUERY_ENGINE_HB } ;
struct vpu_jsm_msg hb_resp ;
2023-01-17 10:27:23 +01:00
int ret , hb_ret ;
2023-11-06 14:08:26 +01:00
drm_WARN_ON ( & vdev - > drm , pm_runtime_status_suspended ( vdev - > drm . dev ) ) ;
2023-01-17 10:27:20 +01:00
2023-10-28 15:34:13 +02:00
ret = ivpu_ipc_send_receive_internal ( vdev , req , expected_resp , resp , channel , timeout_ms ) ;
2023-01-17 10:27:20 +01:00
if ( ret ! = - ETIMEDOUT )
2023-10-28 15:34:13 +02:00
return ret ;
2023-01-17 10:27:20 +01:00
2023-01-17 10:27:23 +01:00
hb_ret = ivpu_ipc_send_receive_internal ( vdev , & hb_req , VPU_JSM_MSG_QUERY_ENGINE_HB_DONE ,
& hb_resp , VPU_IPC_CHAN_ASYNC_CMD ,
vdev - > timeout . jsm ) ;
2024-01-22 13:09:45 +01:00
if ( hb_ret = = - ETIMEDOUT )
ivpu_pm_trigger_recovery ( vdev , " IPC timeout " ) ;
2023-01-17 10:27:20 +01:00
2023-10-28 15:34:13 +02:00
return ret ;
}
int ivpu_ipc_send_receive ( struct ivpu_device * vdev , struct vpu_jsm_msg * req ,
enum vpu_ipc_msg_type expected_resp , struct vpu_jsm_msg * resp ,
u32 channel , unsigned long timeout_ms )
{
int ret ;
ret = ivpu_rpm_get ( vdev ) ;
if ( ret < 0 )
return ret ;
ret = ivpu_ipc_send_receive_active ( vdev , req , expected_resp , resp , channel , timeout_ms ) ;
2023-01-17 10:27:23 +01:00
ivpu_rpm_put ( vdev ) ;
2023-01-17 10:27:20 +01:00
return ret ;
}
static bool
ivpu_ipc_match_consumer ( struct ivpu_device * vdev , struct ivpu_ipc_consumer * cons ,
struct ivpu_ipc_hdr * ipc_hdr , struct vpu_jsm_msg * jsm_msg )
{
if ( cons - > channel ! = ipc_hdr - > channel )
return false ;
if ( ! jsm_msg | | jsm_msg - > request_id = = cons - > request_id )
return true ;
return false ;
}
2024-05-15 13:30:06 +02:00
void ivpu_ipc_irq_handler ( struct ivpu_device * vdev )
2023-01-17 10:27:20 +01:00
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
struct ivpu_ipc_consumer * cons ;
struct ivpu_ipc_hdr * ipc_hdr ;
struct vpu_jsm_msg * jsm_msg ;
unsigned long flags ;
bool dispatched ;
u32 vpu_addr ;
/*
* Driver needs to purge all messages from IPC FIFO to clear IPC interrupt .
* Without purge IPC FIFO to 0 next IPC interrupts won ' t be generated .
*/
2024-05-15 13:30:05 +02:00
while ( ivpu_hw_ipc_rx_count_get ( vdev ) ) {
vpu_addr = ivpu_hw_ipc_rx_addr_get ( vdev ) ;
2023-01-17 10:27:20 +01:00
if ( vpu_addr = = REG_IO_ERROR ) {
2023-10-20 12:44:56 +02:00
ivpu_err_ratelimited ( vdev , " Failed to read IPC rx addr register \n " ) ;
2023-11-13 18:02:52 +01:00
return ;
2023-01-17 10:27:20 +01:00
}
ipc_hdr = ivpu_to_cpu_addr ( ipc - > mem_rx , vpu_addr ) ;
if ( ! ipc_hdr ) {
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " IPC msg 0x%x out of range \n " , vpu_addr ) ;
2023-01-17 10:27:20 +01:00
continue ;
}
ivpu_ipc_msg_dump ( vdev , " RX " , ipc_hdr , vpu_addr ) ;
jsm_msg = NULL ;
if ( ipc_hdr - > channel ! = IVPU_IPC_CHAN_BOOT_MSG ) {
jsm_msg = ivpu_to_cpu_addr ( ipc - > mem_rx , ipc_hdr - > data_addr ) ;
if ( ! jsm_msg ) {
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " JSM msg 0x%x out of range \n " ,
ipc_hdr - > data_addr ) ;
2023-01-17 10:27:20 +01:00
ivpu_ipc_rx_mark_free ( vdev , ipc_hdr , NULL ) ;
continue ;
}
ivpu_jsm_msg_dump ( vdev , " RX " , jsm_msg , ipc_hdr - > data_addr ) ;
}
if ( atomic_read ( & ipc - > rx_msg_count ) > IPC_MAX_RX_MSG ) {
2023-10-20 12:44:56 +02:00
ivpu_warn_ratelimited ( vdev , " IPC RX msg dropped, msg count %d \n " ,
IPC_MAX_RX_MSG ) ;
2023-01-17 10:27:20 +01:00
ivpu_ipc_rx_mark_free ( vdev , ipc_hdr , jsm_msg ) ;
continue ;
}
dispatched = false ;
2023-11-13 18:02:52 +01:00
spin_lock_irqsave ( & ipc - > cons_lock , flags ) ;
2023-01-17 10:27:20 +01:00
list_for_each_entry ( cons , & ipc - > cons_list , link ) {
if ( ivpu_ipc_match_consumer ( vdev , cons , ipc_hdr , jsm_msg ) ) {
2023-11-13 18:02:52 +01:00
ivpu_ipc_rx_msg_add ( vdev , cons , ipc_hdr , jsm_msg ) ;
2023-01-17 10:27:20 +01:00
dispatched = true ;
break ;
}
}
2023-11-13 18:02:52 +01:00
spin_unlock_irqrestore ( & ipc - > cons_lock , flags ) ;
2023-01-17 10:27:20 +01:00
if ( ! dispatched ) {
ivpu_dbg ( vdev , IPC , " IPC RX msg 0x%x dropped (no consumer) \n " , vpu_addr ) ;
ivpu_ipc_rx_mark_free ( vdev , ipc_hdr , jsm_msg ) ;
}
}
2024-05-15 13:30:06 +02:00
if ( ! list_empty ( & ipc - > cb_msg_list ) )
if ( ! kfifo_put ( & vdev - > hw - > irq . fifo , IVPU_HW_IRQ_SRC_IPC ) )
ivpu_err_ratelimited ( vdev , " IRQ FIFO full \n " ) ;
2023-11-13 18:02:52 +01:00
}
2024-05-15 13:30:06 +02:00
void ivpu_ipc_irq_thread_handler ( struct ivpu_device * vdev )
2023-11-13 18:02:52 +01:00
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
struct ivpu_ipc_rx_msg * rx_msg , * r ;
struct list_head cb_msg_list ;
INIT_LIST_HEAD ( & cb_msg_list ) ;
spin_lock_irq ( & ipc - > cons_lock ) ;
list_splice_tail_init ( & ipc - > cb_msg_list , & cb_msg_list ) ;
spin_unlock_irq ( & ipc - > cons_lock ) ;
list_for_each_entry_safe ( rx_msg , r , & cb_msg_list , link ) {
rx_msg - > callback ( vdev , rx_msg - > ipc_hdr , rx_msg - > jsm_msg ) ;
ivpu_ipc_rx_msg_del ( vdev , rx_msg ) ;
}
2023-01-17 10:27:20 +01:00
}
int ivpu_ipc_init ( struct ivpu_device * vdev )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
2023-09-01 11:49:48 +02:00
int ret ;
2023-01-17 10:27:20 +01:00
2024-02-14 09:13:04 +01:00
ipc - > mem_tx = ivpu_bo_create_global ( vdev , SZ_16K , DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE ) ;
2023-09-01 11:49:48 +02:00
if ( ! ipc - > mem_tx ) {
ivpu_err ( vdev , " Failed to allocate mem_tx \n " ) ;
return - ENOMEM ;
}
2023-01-17 10:27:20 +01:00
2024-02-14 09:13:04 +01:00
ipc - > mem_rx = ivpu_bo_create_global ( vdev , SZ_16K , DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE ) ;
2023-09-01 11:49:48 +02:00
if ( ! ipc - > mem_rx ) {
ivpu_err ( vdev , " Failed to allocate mem_rx \n " ) ;
ret = - ENOMEM ;
2023-01-17 10:27:20 +01:00
goto err_free_tx ;
2023-09-01 11:49:48 +02:00
}
2023-01-17 10:27:20 +01:00
ipc - > mm_tx = devm_gen_pool_create ( vdev - > drm . dev , __ffs ( IVPU_IPC_ALIGNMENT ) ,
- 1 , " TX_IPC_JSM " ) ;
if ( IS_ERR ( ipc - > mm_tx ) ) {
ret = PTR_ERR ( ipc - > mm_tx ) ;
ivpu_err ( vdev , " Failed to create gen pool, %pe \n " , ipc - > mm_tx ) ;
goto err_free_rx ;
}
2023-09-01 11:49:54 +02:00
ret = gen_pool_add ( ipc - > mm_tx , ipc - > mem_tx - > vpu_addr , ivpu_bo_size ( ipc - > mem_tx ) , - 1 ) ;
2023-01-17 10:27:20 +01:00
if ( ret ) {
ivpu_err ( vdev , " gen_pool_add failed, ret %d \n " , ret ) ;
goto err_free_rx ;
}
2023-11-13 18:02:52 +01:00
spin_lock_init ( & ipc - > cons_lock ) ;
2023-01-17 10:27:20 +01:00
INIT_LIST_HEAD ( & ipc - > cons_list ) ;
2023-11-13 18:02:52 +01:00
INIT_LIST_HEAD ( & ipc - > cb_msg_list ) ;
2024-04-02 12:49:22 +02:00
ret = drmm_mutex_init ( & vdev - > drm , & ipc - > lock ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to initialize ipc->lock, ret %d \n " , ret ) ;
goto err_free_rx ;
}
2023-01-17 10:27:20 +01:00
ivpu_ipc_reset ( vdev ) ;
return 0 ;
err_free_rx :
2024-02-14 09:13:04 +01:00
ivpu_bo_free ( ipc - > mem_rx ) ;
2023-01-17 10:27:20 +01:00
err_free_tx :
2024-02-14 09:13:04 +01:00
ivpu_bo_free ( ipc - > mem_tx ) ;
2023-01-17 10:27:20 +01:00
return ret ;
}
void ivpu_ipc_fini ( struct ivpu_device * vdev )
{
2023-11-13 18:02:52 +01:00
struct ivpu_ipc_info * ipc = vdev - > ipc ;
drm_WARN_ON ( & vdev - > drm , ipc - > on ) ;
drm_WARN_ON ( & vdev - > drm , ! list_empty ( & ipc - > cons_list ) ) ;
drm_WARN_ON ( & vdev - > drm , ! list_empty ( & ipc - > cb_msg_list ) ) ;
drm_WARN_ON ( & vdev - > drm , atomic_read ( & ipc - > rx_msg_count ) > 0 ) ;
2023-01-17 10:27:20 +01:00
ivpu_ipc_mem_fini ( vdev ) ;
}
void ivpu_ipc_enable ( struct ivpu_device * vdev )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
mutex_lock ( & ipc - > lock ) ;
ipc - > on = true ;
mutex_unlock ( & ipc - > lock ) ;
}
void ivpu_ipc_disable ( struct ivpu_device * vdev )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
struct ivpu_ipc_consumer * cons , * c ;
2023-11-13 18:02:52 +01:00
struct ivpu_ipc_rx_msg * rx_msg , * r ;
drm_WARN_ON ( & vdev - > drm , ! list_empty ( & ipc - > cb_msg_list ) ) ;
2023-01-17 10:27:20 +01:00
mutex_lock ( & ipc - > lock ) ;
ipc - > on = false ;
mutex_unlock ( & ipc - > lock ) ;
2023-11-13 18:02:52 +01:00
spin_lock_irq ( & ipc - > cons_lock ) ;
2023-10-28 17:59:32 +02:00
list_for_each_entry_safe ( cons , c , & ipc - > cons_list , link ) {
2023-11-13 18:02:52 +01:00
spin_lock ( & cons - > rx_lock ) ;
if ( ! cons - > rx_callback )
2023-11-13 18:02:50 +01:00
cons - > aborted = true ;
2023-11-13 18:02:52 +01:00
list_for_each_entry_safe ( rx_msg , r , & cons - > rx_msg_list , link )
ivpu_ipc_rx_msg_del ( vdev , rx_msg ) ;
spin_unlock ( & cons - > rx_lock ) ;
2023-01-17 10:27:20 +01:00
wake_up ( & cons - > rx_msg_wq ) ;
2023-10-28 17:59:32 +02:00
}
2023-11-13 18:02:52 +01:00
spin_unlock_irq ( & ipc - > cons_lock ) ;
drm_WARN_ON ( & vdev - > drm , atomic_read ( & ipc - > rx_msg_count ) > 0 ) ;
2023-01-17 10:27:20 +01:00
}
void ivpu_ipc_reset ( struct ivpu_device * vdev )
{
struct ivpu_ipc_info * ipc = vdev - > ipc ;
mutex_lock ( & ipc - > lock ) ;
2023-10-28 17:59:32 +02:00
drm_WARN_ON ( & vdev - > drm , ipc - > on ) ;
2023-01-17 10:27:20 +01:00
2023-09-01 11:49:54 +02:00
memset ( ivpu_bo_vaddr ( ipc - > mem_tx ) , 0 , ivpu_bo_size ( ipc - > mem_tx ) ) ;
memset ( ivpu_bo_vaddr ( ipc - > mem_rx ) , 0 , ivpu_bo_size ( ipc - > mem_rx ) ) ;
2023-01-17 10:27:20 +01:00
wmb ( ) ; /* Flush WC buffers for TX and RX rings */
mutex_unlock ( & ipc - > lock ) ;
}