2019-06-03 07:44:50 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2016-03-15 15:35:08 -04:00
/*
* Copyright ( C ) 2013 - 2016 Red Hat
* Author : Rob Clark < robdclark @ gmail . com >
*/
2016-10-25 13:00:45 +01:00
# include <linux/dma-fence.h>
2016-03-15 17:22:13 -04:00
2016-03-15 15:35:08 -04:00
# include "msm_drv.h"
# include "msm_fence.h"
2023-03-08 07:53:03 -08:00
# include "msm_gpu.h"
static struct msm_gpu * fctx2gpu ( struct msm_fence_context * fctx )
{
struct msm_drm_private * priv = fctx - > dev - > dev_private ;
return priv - > gpu ;
}
static enum hrtimer_restart deadline_timer ( struct hrtimer * t )
{
struct msm_fence_context * fctx = container_of ( t ,
struct msm_fence_context , deadline_timer ) ;
kthread_queue_work ( fctx2gpu ( fctx ) - > worker , & fctx - > deadline_work ) ;
return HRTIMER_NORESTART ;
}
static void deadline_work ( struct kthread_work * work )
{
struct msm_fence_context * fctx = container_of ( work ,
struct msm_fence_context , deadline_work ) ;
/* If deadline fence has already passed, nothing to do: */
if ( msm_fence_completed ( fctx , fctx - > next_deadline_fence ) )
return ;
msm_devfreq_boost ( fctx2gpu ( fctx ) , 2 ) ;
}
2016-03-15 15:35:08 -04:00
2016-03-15 17:22:13 -04:00
struct msm_fence_context *
2021-07-26 07:43:57 -07:00
msm_fence_context_alloc ( struct drm_device * dev , volatile uint32_t * fenceptr ,
const char * name )
2016-03-15 15:35:08 -04:00
{
2016-03-15 17:22:13 -04:00
struct msm_fence_context * fctx ;
2022-04-11 14:58:38 -07:00
static int index = 0 ;
2016-03-15 17:22:13 -04:00
fctx = kzalloc ( sizeof ( * fctx ) , GFP_KERNEL ) ;
if ( ! fctx )
return ERR_PTR ( - ENOMEM ) ;
fctx - > dev = dev ;
2023-01-18 04:01:52 +02:00
strscpy ( fctx - > name , name , sizeof ( fctx - > name ) ) ;
2016-10-25 13:00:45 +01:00
fctx - > context = dma_fence_context_alloc ( 1 ) ;
2022-04-11 14:58:38 -07:00
fctx - > index = index + + ;
2021-07-26 07:43:57 -07:00
fctx - > fenceptr = fenceptr ;
2016-03-15 18:26:28 -04:00
spin_lock_init ( & fctx - > spinlock ) ;
2016-03-15 17:22:13 -04:00
2022-06-15 09:24:35 -07:00
/*
* Start out close to the 32 b fence rollover point , so we can
* catch bugs with fence comparisons .
*/
fctx - > last_fence = 0xffffff00 ;
fctx - > completed_fence = fctx - > last_fence ;
* fctx - > fenceptr = fctx - > last_fence ;
2023-03-08 07:53:03 -08:00
hrtimer_init ( & fctx - > deadline_timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS ) ;
fctx - > deadline_timer . function = deadline_timer ;
kthread_init_work ( & fctx - > deadline_work , deadline_work ) ;
fctx - > next_deadline = ktime_get ( ) ;
2016-03-15 17:22:13 -04:00
return fctx ;
2016-03-15 15:35:08 -04:00
}
2016-03-15 17:22:13 -04:00
void msm_fence_context_free ( struct msm_fence_context * fctx )
2016-03-15 15:35:08 -04:00
{
2016-03-15 17:22:13 -04:00
kfree ( fctx ) ;
}
2022-04-11 14:58:38 -07:00
bool msm_fence_completed ( struct msm_fence_context * fctx , uint32_t fence )
2016-03-15 17:22:13 -04:00
{
2021-07-26 07:43:57 -07:00
/*
* Note : Check completed_fence first , as fenceptr is in a write - combine
* mapping , so it will be more expensive to read .
*/
return ( int32_t ) ( fctx - > completed_fence - fence ) > = 0 | |
( int32_t ) ( * fctx - > fenceptr - fence ) > = 0 ;
2016-03-15 17:22:13 -04:00
}
2016-03-15 15:35:08 -04:00
2022-06-18 09:11:18 -07:00
/* called from irq handler and workqueue (in recover path) */
2016-03-15 17:22:13 -04:00
void msm_update_fence ( struct msm_fence_context * fctx , uint32_t fence )
2016-03-15 15:35:08 -04:00
{
2022-06-18 09:11:18 -07:00
unsigned long flags ;
spin_lock_irqsave ( & fctx - > spinlock , flags ) ;
2022-06-15 09:24:35 -07:00
if ( fence_after ( fence , fctx - > completed_fence ) )
fctx - > completed_fence = fence ;
2023-03-08 07:53:03 -08:00
if ( msm_fence_completed ( fctx , fctx - > next_deadline_fence ) )
hrtimer_cancel ( & fctx - > deadline_timer ) ;
2022-06-18 09:11:18 -07:00
spin_unlock_irqrestore ( & fctx - > spinlock , flags ) ;
2016-03-15 15:35:08 -04:00
}
2016-03-15 18:26:28 -04:00
struct msm_fence {
2016-10-25 13:00:45 +01:00
struct dma_fence base ;
2017-04-12 12:12:00 -07:00
struct msm_fence_context * fctx ;
2016-03-15 18:26:28 -04:00
} ;
2016-10-25 13:00:45 +01:00
static inline struct msm_fence * to_msm_fence ( struct dma_fence * fence )
2016-03-15 18:26:28 -04:00
{
return container_of ( fence , struct msm_fence , base ) ;
}
2016-10-25 13:00:45 +01:00
static const char * msm_fence_get_driver_name ( struct dma_fence * fence )
2016-03-15 18:26:28 -04:00
{
return " msm " ;
}
2016-10-25 13:00:45 +01:00
static const char * msm_fence_get_timeline_name ( struct dma_fence * fence )
2016-03-15 18:26:28 -04:00
{
struct msm_fence * f = to_msm_fence ( fence ) ;
return f - > fctx - > name ;
}
2016-10-25 13:00:45 +01:00
static bool msm_fence_signaled ( struct dma_fence * fence )
2016-03-15 18:26:28 -04:00
{
struct msm_fence * f = to_msm_fence ( fence ) ;
2022-04-11 14:58:38 -07:00
return msm_fence_completed ( f - > fctx , f - > base . seqno ) ;
2016-03-15 18:26:28 -04:00
}
2023-03-08 07:53:03 -08:00
static void msm_fence_set_deadline ( struct dma_fence * fence , ktime_t deadline )
{
struct msm_fence * f = to_msm_fence ( fence ) ;
struct msm_fence_context * fctx = f - > fctx ;
unsigned long flags ;
ktime_t now ;
spin_lock_irqsave ( & fctx - > spinlock , flags ) ;
now = ktime_get ( ) ;
if ( ktime_after ( now , fctx - > next_deadline ) | |
ktime_before ( deadline , fctx - > next_deadline ) ) {
fctx - > next_deadline = deadline ;
fctx - > next_deadline_fence =
max ( fctx - > next_deadline_fence , ( uint32_t ) fence - > seqno ) ;
/*
* Set timer to trigger boost 3 ms before deadline , or
* if we are already less than 3 ms before the deadline
* schedule boost work immediately .
*/
deadline = ktime_sub ( deadline , ms_to_ktime ( 3 ) ) ;
if ( ktime_after ( now , deadline ) ) {
kthread_queue_work ( fctx2gpu ( fctx ) - > worker ,
& fctx - > deadline_work ) ;
} else {
hrtimer_start ( & fctx - > deadline_timer , deadline ,
HRTIMER_MODE_ABS ) ;
}
}
spin_unlock_irqrestore ( & fctx - > spinlock , flags ) ;
}
2016-10-25 13:00:45 +01:00
static const struct dma_fence_ops msm_fence_ops = {
2016-03-15 18:26:28 -04:00
. get_driver_name = msm_fence_get_driver_name ,
. get_timeline_name = msm_fence_get_timeline_name ,
. signaled = msm_fence_signaled ,
2023-03-08 07:53:03 -08:00
. set_deadline = msm_fence_set_deadline ,
2016-03-15 18:26:28 -04:00
} ;
2016-10-25 13:00:45 +01:00
struct dma_fence *
2023-03-20 07:43:23 -07:00
msm_fence_alloc ( void )
2016-03-15 18:26:28 -04:00
{
struct msm_fence * f ;
f = kzalloc ( sizeof ( * f ) , GFP_KERNEL ) ;
if ( ! f )
return ERR_PTR ( - ENOMEM ) ;
2023-03-20 07:43:23 -07:00
return & f - > base ;
}
void
msm_fence_init ( struct dma_fence * fence , struct msm_fence_context * fctx )
{
struct msm_fence * f = to_msm_fence ( fence ) ;
2016-03-15 18:26:28 -04:00
f - > fctx = fctx ;
2016-10-25 13:00:45 +01:00
dma_fence_init ( & f - > base , & msm_fence_ops , & fctx - > spinlock ,
fctx - > context , + + fctx - > last_fence ) ;
2016-03-15 18:26:28 -04:00
}