2019-05-28 10:10:04 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2013-03-22 16:34:03 +02:00
/*
* Tegra host1x Channel
*
* Copyright ( c ) 2010 - 2013 , NVIDIA Corporation .
*/
2013-09-24 13:59:01 +02:00
# include <linux/host1x.h>
2019-02-01 14:28:23 +01:00
# include <linux/iommu.h>
2013-03-22 16:34:03 +02:00
# include <linux/slab.h>
2013-09-24 13:59:01 +02:00
2013-03-22 16:34:03 +02:00
# include <trace/events/host1x.h>
2013-10-09 10:32:54 +02:00
# include "../channel.h"
# include "../dev.h"
# include "../intr.h"
# include "../job.h"
2013-03-22 16:34:03 +02:00
# define TRACE_MAX_LENGTH 128U
2013-03-22 16:34:04 +02:00
static void trace_write_gather ( struct host1x_cdma * cdma , struct host1x_bo * bo ,
u32 offset , u32 words )
{
2014-06-12 13:14:50 +02:00
struct device * dev = cdma_to_channel ( cdma ) - > dev ;
2013-03-22 16:34:04 +02:00
void * mem = NULL ;
if ( host1x_debug_trace_cmdbuf )
mem = host1x_bo_mmap ( bo ) ;
if ( mem ) {
u32 i ;
/*
* Write in batches of 128 as there seems to be a limit
* of how much you can output to ftrace at once .
*/
for ( i = 0 ; i < words ; i + = TRACE_MAX_LENGTH ) {
2014-06-12 13:14:50 +02:00
u32 num_words = min ( words - i , TRACE_MAX_LENGTH ) ;
2016-06-23 11:35:50 +02:00
2014-06-12 13:14:50 +02:00
offset + = i * sizeof ( u32 ) ;
trace_host1x_cdma_push_gather ( dev_name ( dev ) , bo ,
num_words , offset ,
mem ) ;
2013-03-22 16:34:04 +02:00
}
2014-06-12 13:14:50 +02:00
2013-03-22 16:34:04 +02:00
host1x_bo_munmap ( bo , mem ) ;
}
}
2021-06-10 14:04:45 +03:00
static void submit_wait ( struct host1x_cdma * cdma , u32 id , u32 threshold ,
u32 next_class )
{
# if HOST1X_HW >= 2
host1x_cdma_push_wide ( cdma ,
host1x_opcode_setclass (
HOST1X_CLASS_HOST1X ,
HOST1X_UCLASS_LOAD_SYNCPT_PAYLOAD_32 ,
/* WAIT_SYNCPT_32 is at SYNCPT_PAYLOAD_32+2 */
BIT ( 0 ) | BIT ( 2 )
) ,
threshold ,
id ,
host1x_opcode_setclass ( next_class , 0 , 0 )
) ;
# else
/* TODO add waitchk or use waitbases or other mitigation */
host1x_cdma_push ( cdma ,
host1x_opcode_setclass (
HOST1X_CLASS_HOST1X ,
host1x_uclass_wait_syncpt_r ( ) ,
BIT ( 0 )
) ,
host1x_class_host_wait_syncpt ( id , threshold )
) ;
host1x_cdma_push ( cdma ,
host1x_opcode_setclass ( next_class , 0 , 0 ) ,
HOST1X_OPCODE_NOP
) ;
# endif
}
static void submit_gathers ( struct host1x_job * job , u32 job_syncpt_base )
2013-03-22 16:34:03 +02:00
{
struct host1x_cdma * cdma = & job - > channel - > cdma ;
2019-02-01 14:28:25 +01:00
# if HOST1X_HW < 6
struct device * dev = job - > channel - > dev ;
# endif
2013-03-22 16:34:03 +02:00
unsigned int i ;
2021-06-10 14:04:45 +03:00
u32 threshold ;
2013-03-22 16:34:03 +02:00
2021-06-10 14:04:45 +03:00
for ( i = 0 ; i < job - > num_cmds ; i + + ) {
struct host1x_job_cmd * cmd = & job - > cmds [ i ] ;
2019-02-01 14:28:25 +01:00
2021-06-10 14:04:45 +03:00
if ( cmd - > is_wait ) {
if ( cmd - > wait . relative )
threshold = job_syncpt_base + cmd - > wait . threshold ;
else
threshold = cmd - > wait . threshold ;
2016-06-23 11:35:50 +02:00
2021-06-10 14:04:45 +03:00
submit_wait ( cdma , cmd - > wait . id , threshold , cmd - > wait . next_class ) ;
} else {
struct host1x_job_gather * g = & cmd - > gather ;
dma_addr_t addr = g - > base + g - > offset ;
u32 op2 , op3 ;
op2 = lower_32_bits ( addr ) ;
op3 = upper_32_bits ( addr ) ;
2019-02-01 14:28:25 +01:00
2021-06-10 14:04:45 +03:00
trace_write_gather ( cdma , g - > bo , g - > offset , g - > words ) ;
if ( op3 ! = 0 ) {
2019-02-01 14:28:25 +01:00
# if HOST1X_HW >= 6
2021-06-10 14:04:45 +03:00
u32 op1 = host1x_opcode_gather_wide ( g - > words ) ;
u32 op4 = HOST1X_OPCODE_NOP ;
2019-02-01 14:28:25 +01:00
2021-06-10 14:04:45 +03:00
host1x_cdma_push_wide ( cdma , op1 , op2 , op3 , op4 ) ;
2019-02-01 14:28:25 +01:00
# else
2021-06-10 14:04:45 +03:00
dev_err ( dev , " invalid gather for push buffer %pad \n " ,
& addr ) ;
continue ;
2019-02-01 14:28:25 +01:00
# endif
2021-06-10 14:04:45 +03:00
} else {
u32 op1 = host1x_opcode_gather ( g - > words ) ;
2019-02-01 14:28:25 +01:00
2021-06-10 14:04:45 +03:00
host1x_cdma_push ( cdma , op1 , op2 ) ;
}
2019-02-01 14:28:25 +01:00
}
2013-03-22 16:34:03 +02:00
}
}
2013-10-14 15:21:53 +03:00
static inline void synchronize_syncpt_base ( struct host1x_job * job )
{
2021-03-29 16:38:32 +03:00
struct host1x_syncpt * sp = job - > syncpt ;
2016-06-23 11:19:00 +02:00
unsigned int id ;
u32 value ;
2013-10-14 15:21:53 +03:00
value = host1x_syncpt_read_max ( sp ) ;
id = sp - > base - > id ;
host1x_cdma_push ( & job - > channel - > cdma ,
host1x_opcode_setclass ( HOST1X_CLASS_HOST1X ,
HOST1X_UCLASS_LOAD_SYNCPT_BASE , 1 ) ,
HOST1X_UCLASS_LOAD_SYNCPT_BASE_BASE_INDX_F ( id ) |
HOST1X_UCLASS_LOAD_SYNCPT_BASE_VALUE_F ( value ) ) ;
}
2019-02-01 14:28:23 +01:00
static void host1x_channel_set_streamid ( struct host1x_channel * channel )
{
2019-03-06 14:57:43 +01:00
# if HOST1X_HW >= 6
u32 sid = 0x7f ;
# ifdef CONFIG_IOMMU_API
2019-02-01 14:28:23 +01:00
struct iommu_fwspec * spec = dev_iommu_fwspec_get ( channel - > dev - > parent ) ;
2019-03-06 14:57:43 +01:00
if ( spec )
sid = spec - > ids [ 0 ] & 0xffff ;
# endif
2019-02-01 14:28:23 +01:00
host1x_ch_writel ( channel , sid , HOST1X_CHANNEL_SMMU_STREAMID ) ;
# endif
}
2013-03-22 16:34:03 +02:00
static int channel_submit ( struct host1x_job * job )
{
struct host1x_channel * ch = job - > channel ;
2021-03-29 16:38:32 +03:00
struct host1x_syncpt * sp = job - > syncpt ;
2013-03-22 16:34:03 +02:00
u32 user_syncpt_incrs = job - > syncpt_incrs ;
u32 prev_max = 0 ;
u32 syncval ;
int err ;
struct host1x_waitlist * completed_waiter = NULL ;
struct host1x * host = dev_get_drvdata ( ch - > dev - > parent ) ;
trace_host1x_channel_submit ( dev_name ( ch - > dev ) ,
2021-06-10 14:04:45 +03:00
job - > num_cmds , job - > num_relocs ,
2021-03-29 16:38:32 +03:00
job - > syncpt - > id , job - > syncpt_incrs ) ;
2013-03-22 16:34:03 +02:00
/* before error checks, return current max */
prev_max = job - > syncpt_end = host1x_syncpt_read_max ( sp ) ;
/* get submit lock */
err = mutex_lock_interruptible ( & ch - > submitlock ) ;
if ( err )
goto error ;
completed_waiter = kzalloc ( sizeof ( * completed_waiter ) , GFP_KERNEL ) ;
if ( ! completed_waiter ) {
mutex_unlock ( & ch - > submitlock ) ;
err = - ENOMEM ;
goto error ;
}
2019-02-01 14:28:23 +01:00
host1x_channel_set_streamid ( ch ) ;
2013-03-22 16:34:03 +02:00
/* begin a CDMA submit */
err = host1x_cdma_begin ( & ch - > cdma , job ) ;
if ( err ) {
mutex_unlock ( & ch - > submitlock ) ;
goto error ;
}
if ( job - > serialize ) {
/*
* Force serialization by inserting a host wait for the
* previous job to finish before this one can commence .
*/
host1x_cdma_push ( & ch - > cdma ,
host1x_opcode_setclass ( HOST1X_CLASS_HOST1X ,
host1x_uclass_wait_syncpt_r ( ) , 1 ) ,
2021-03-29 16:38:32 +03:00
host1x_class_host_wait_syncpt ( job - > syncpt - > id ,
2013-03-22 16:34:03 +02:00
host1x_syncpt_read_max ( sp ) ) ) ;
}
2013-10-14 15:21:53 +03:00
/* Synchronize base register to allow using it for relative waiting */
if ( sp - > base )
synchronize_syncpt_base ( job ) ;
2013-03-22 16:34:03 +02:00
syncval = host1x_syncpt_incr_max ( sp , user_syncpt_incrs ) ;
2017-09-28 15:50:39 +03:00
host1x_hw_syncpt_assign_to_channel ( host , sp , ch ) ;
2013-03-22 16:34:03 +02:00
job - > syncpt_end = syncval ;
/* add a setclass for modules that require it */
if ( job - > class )
host1x_cdma_push ( & ch - > cdma ,
host1x_opcode_setclass ( job - > class , 0 , 0 ) ,
HOST1X_OPCODE_NOP ) ;
2021-06-10 14:04:45 +03:00
submit_gathers ( job , syncval - user_syncpt_incrs ) ;
2013-03-22 16:34:03 +02:00
/* end CDMA submit & stash pinned hMems into sync queue */
host1x_cdma_end ( & ch - > cdma , job ) ;
trace_host1x_channel_submitted ( dev_name ( ch - > dev ) , prev_max , syncval ) ;
/* schedule a submit complete interrupt */
2018-05-16 14:29:33 +02:00
err = host1x_intr_add_action ( host , sp , syncval ,
2013-03-22 16:34:03 +02:00
HOST1X_INTR_ACTION_SUBMIT_COMPLETE , ch ,
gpu: host1x: Add no-recovery mode
Add a new property for jobs to enable or disable recovery i.e.
CPU increments of syncpoints to max value on job timeout. This
allows for a more solid model for hanged jobs, where userspace
doesn't need to guess if a syncpoint increment happened because
the job completed, or because job timeout was triggered.
On job timeout, we stop the channel, NOP all future jobs on the
channel using the same syncpoint, mark the syncpoint as locked
and resume the channel from the next job, if any.
The future jobs are NOPed, since because we don't do the CPU
increments, the value of the syncpoint is no longer synchronized,
and any waiters would become confused if a future job incremented
the syncpoint. The syncpoint is marked locked to ensure that any
future jobs cannot increment the syncpoint either, until the
application has recognized the situation and reallocated the
syncpoint.
Signed-off-by: Mikko Perttunen <mperttunen@nvidia.com>
Signed-off-by: Thierry Reding <treding@nvidia.com>
2021-06-10 14:04:43 +03:00
completed_waiter , & job - > waiter ) ;
2013-03-22 16:34:03 +02:00
completed_waiter = NULL ;
WARN ( err , " Failed to set submit complete interrupt " ) ;
mutex_unlock ( & ch - > submitlock ) ;
return 0 ;
error :
kfree ( completed_waiter ) ;
return err ;
}
2017-09-28 15:50:40 +03:00
static void enable_gather_filter ( struct host1x * host ,
struct host1x_channel * ch )
{
# if HOST1X_HW >= 6
u32 val ;
if ( ! host - > hv_regs )
return ;
val = host1x_hypervisor_readl (
host , HOST1X_HV_CH_KERNEL_FILTER_GBUFFER ( ch - > id / 32 ) ) ;
val | = BIT ( ch - > id % 32 ) ;
host1x_hypervisor_writel (
host , val , HOST1X_HV_CH_KERNEL_FILTER_GBUFFER ( ch - > id / 32 ) ) ;
# elif HOST1X_HW >= 4
host1x_ch_writel ( ch ,
HOST1X_CHANNEL_CHANNELCTRL_KERNEL_FILTER_GBUFFER ( 1 ) ,
HOST1X_CHANNEL_CHANNELCTRL ) ;
# endif
}
2013-03-22 16:34:03 +02:00
static int host1x_channel_init ( struct host1x_channel * ch , struct host1x * dev ,
unsigned int index )
{
2018-11-19 11:27:39 +01:00
# if HOST1X_HW < 6
ch - > regs = dev - > regs + index * 0x4000 ;
# else
ch - > regs = dev - > regs + index * 0x100 ;
# endif
2017-09-28 15:50:40 +03:00
enable_gather_filter ( dev , ch ) ;
2013-03-22 16:34:03 +02:00
return 0 ;
}
static const struct host1x_channel_ops host1x_channel_ops = {
. init = host1x_channel_init ,
. submit = channel_submit ,
} ;