2019-05-28 10:10:04 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2013-03-22 16:34:02 +02:00
/*
* Tegra host1x Interrupt Management
*
2023-01-19 15:09:20 +02:00
* Copyright ( c ) 2010 - 2021 , NVIDIA Corporation .
2013-03-22 16:34:02 +02:00
*/
# include <linux/clk.h>
# include "dev.h"
2021-06-10 14:04:42 +03:00
# include "fence.h"
2013-03-22 16:34:02 +02:00
# include "intr.h"
2023-01-19 15:09:20 +02:00
static void host1x_intr_add_fence_to_list ( struct host1x_fence_list * list ,
struct host1x_syncpt_fence * fence )
2013-03-22 16:34:02 +02:00
{
2023-01-19 15:09:20 +02:00
struct host1x_syncpt_fence * fence_in_list ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
list_for_each_entry_reverse ( fence_in_list , & list - > list , list ) {
if ( ( s32 ) ( fence_in_list - > threshold - fence - > threshold ) < = 0 ) {
/* Fence in list is before us, we can insert here */
list_add ( & fence - > list , & fence_in_list - > list ) ;
return ;
2013-03-22 16:34:03 +02:00
}
2013-03-22 16:34:02 +02:00
}
2013-03-22 16:34:03 +02:00
2023-01-19 15:09:20 +02:00
/* Add as first in list */
list_add ( & fence - > list , & list - > list ) ;
2013-03-22 16:34:02 +02:00
}
2023-01-19 15:09:20 +02:00
static void host1x_intr_update_hw_state ( struct host1x * host , struct host1x_syncpt * sp )
2021-06-10 14:04:42 +03:00
{
2023-01-19 15:09:20 +02:00
struct host1x_syncpt_fence * fence ;
2021-06-10 14:04:42 +03:00
2023-01-19 15:09:20 +02:00
if ( ! list_empty ( & sp - > fences . list ) ) {
fence = list_first_entry ( & sp - > fences . list , struct host1x_syncpt_fence , list ) ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
host1x_hw_intr_set_syncpt_threshold ( host , sp - > id , fence - > threshold ) ;
host1x_hw_intr_enable_syncpt_intr ( host , sp - > id ) ;
} else {
host1x_hw_intr_disable_syncpt_intr ( host , sp - > id ) ;
2013-03-22 16:34:02 +02:00
}
}
2023-01-19 15:09:20 +02:00
void host1x_intr_add_fence_locked ( struct host1x * host , struct host1x_syncpt_fence * fence )
2013-03-22 16:34:02 +02:00
{
2023-01-19 15:09:20 +02:00
struct host1x_fence_list * fence_list = & fence - > sp - > fences ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
INIT_LIST_HEAD ( & fence - > list ) ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
host1x_intr_add_fence_to_list ( fence_list , fence ) ;
host1x_intr_update_hw_state ( host , fence - > sp ) ;
2013-03-22 16:34:02 +02:00
}
2023-01-19 15:09:20 +02:00
bool host1x_intr_remove_fence ( struct host1x * host , struct host1x_syncpt_fence * fence )
2013-03-22 16:34:02 +02:00
{
2023-01-19 15:09:20 +02:00
struct host1x_fence_list * fence_list = & fence - > sp - > fences ;
unsigned long irqflags ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
spin_lock_irqsave ( & fence_list - > lock , irqflags ) ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
if ( list_empty ( & fence - > list ) ) {
spin_unlock_irqrestore ( & fence_list - > lock , irqflags ) ;
return false ;
2013-03-22 16:34:02 +02:00
}
2023-01-19 15:09:20 +02:00
list_del_init ( & fence - > list ) ;
host1x_intr_update_hw_state ( host , fence - > sp ) ;
2021-03-29 16:38:35 +03:00
2023-01-19 15:09:20 +02:00
spin_unlock_irqrestore ( & fence_list - > lock , irqflags ) ;
2021-03-29 16:38:35 +03:00
2023-01-19 15:09:20 +02:00
return true ;
2013-03-22 16:34:02 +02:00
}
2023-01-19 15:09:20 +02:00
void host1x_intr_handle_interrupt ( struct host1x * host , unsigned int id )
2013-03-22 16:34:02 +02:00
{
2023-01-19 15:09:20 +02:00
struct host1x_syncpt * sp = & host - > syncpt [ id ] ;
struct host1x_syncpt_fence * fence , * tmp ;
unsigned int value ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
value = host1x_syncpt_load ( sp ) ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
spin_lock ( & sp - > fences . lock ) ;
2021-03-29 16:38:30 +03:00
2023-01-19 15:09:20 +02:00
list_for_each_entry_safe ( fence , tmp , & sp - > fences . list , list ) {
if ( ( ( value - fence - > threshold ) & 0x80000000U ) ! = 0U ) {
/* Fence is not yet expired, we are done */
break ;
}
2021-03-29 16:38:30 +03:00
2023-01-19 15:09:20 +02:00
list_del_init ( & fence - > list ) ;
host1x_fence_signal ( fence ) ;
2021-03-29 16:38:30 +03:00
}
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
/* Re-enable interrupt if necessary */
host1x_intr_update_hw_state ( host , sp ) ;
spin_unlock ( & sp - > fences . lock ) ;
2013-03-22 16:34:02 +02:00
}
2023-01-19 15:09:20 +02:00
int host1x_intr_init ( struct host1x * host )
2013-03-22 16:34:02 +02:00
{
unsigned int id ;
mutex_init ( & host - > intr_mutex ) ;
2023-01-19 15:09:20 +02:00
for ( id = 0 ; id < host1x_syncpt_nb_pts ( host ) ; + + id ) {
struct host1x_syncpt * syncpt = & host - > syncpt [ id ] ;
2013-03-22 16:34:02 +02:00
2023-01-19 15:09:20 +02:00
spin_lock_init ( & syncpt - > fences . lock ) ;
INIT_LIST_HEAD ( & syncpt - > fences . list ) ;
2013-03-22 16:34:02 +02:00
}
return 0 ;
}
void host1x_intr_deinit ( struct host1x * host )
{
}
void host1x_intr_start ( struct host1x * host )
{
u32 hz = clk_get_rate ( host - > clk ) ;
int err ;
mutex_lock ( & host - > intr_mutex ) ;
2023-01-19 15:09:20 +02:00
err = host1x_hw_intr_init_host_sync ( host , DIV_ROUND_UP ( hz , 1000000 ) ) ;
2013-03-22 16:34:02 +02:00
if ( err ) {
mutex_unlock ( & host - > intr_mutex ) ;
return ;
}
mutex_unlock ( & host - > intr_mutex ) ;
}
void host1x_intr_stop ( struct host1x * host )
{
host1x_hw_intr_disable_all_syncpt_intrs ( host ) ;
}