2015-08-02 11:18:04 +08:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
2017-12-06 17:49:39 +01:00
2015-08-02 11:18:04 +08:00
# include <linux/kthread.h>
2019-06-30 08:19:14 +02:00
# include <linux/module.h>
2015-08-02 11:18:04 +08:00
# include <linux/sched.h>
2019-06-30 08:19:14 +02:00
# include <linux/slab.h>
# include <linux/wait.h>
2017-12-06 17:49:39 +01:00
# include <drm/gpu_scheduler.h>
2015-08-02 11:18:04 +08:00
2016-10-28 17:04:07 +02:00
static struct kmem_cache * sched_fence_slab ;
2017-12-06 17:49:40 +01:00
static int __init drm_sched_fence_slab_init ( void )
2016-10-28 17:04:07 +02:00
{
sched_fence_slab = kmem_cache_create (
2017-12-06 17:49:39 +01:00
" drm_sched_fence " , sizeof ( struct drm_sched_fence ) , 0 ,
2016-10-28 17:04:07 +02:00
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! sched_fence_slab )
return - ENOMEM ;
return 0 ;
}
2017-12-06 17:49:40 +01:00
static void __exit drm_sched_fence_slab_fini ( void )
2016-10-28 17:04:07 +02:00
{
rcu_barrier ( ) ;
kmem_cache_destroy ( sched_fence_slab ) ;
}
2017-12-06 17:49:39 +01:00
void drm_sched_fence_scheduled ( struct drm_sched_fence * fence )
2015-08-02 11:18:04 +08:00
{
2016-10-25 13:00:45 +01:00
int ret = dma_fence_signal ( & fence - > scheduled ) ;
2016-05-20 12:53:52 +02:00
2015-08-10 14:20:55 +02:00
if ( ! ret )
2016-10-25 13:00:45 +01:00
DMA_FENCE_TRACE ( & fence - > scheduled ,
" signaled from irq context \n " ) ;
2015-08-10 14:20:55 +02:00
else
2016-10-25 13:00:45 +01:00
DMA_FENCE_TRACE ( & fence - > scheduled ,
" was already signaled \n " ) ;
2015-08-02 11:18:04 +08:00
}
2017-12-06 17:49:39 +01:00
void drm_sched_fence_finished ( struct drm_sched_fence * fence )
2015-11-05 12:57:10 +01:00
{
2016-10-25 13:00:45 +01:00
int ret = dma_fence_signal ( & fence - > finished ) ;
2015-11-05 12:57:10 +01:00
2016-05-20 12:53:52 +02:00
if ( ! ret )
2016-10-25 13:00:45 +01:00
DMA_FENCE_TRACE ( & fence - > finished ,
" signaled from irq context \n " ) ;
2016-05-20 12:53:52 +02:00
else
2016-10-25 13:00:45 +01:00
DMA_FENCE_TRACE ( & fence - > finished ,
" was already signaled \n " ) ;
2015-11-05 12:57:10 +01:00
}
2017-12-06 17:49:39 +01:00
static const char * drm_sched_fence_get_driver_name ( struct dma_fence * fence )
2015-08-02 11:18:04 +08:00
{
2017-12-06 17:49:39 +01:00
return " drm_sched " ;
2015-08-02 11:18:04 +08:00
}
2017-12-06 17:49:39 +01:00
static const char * drm_sched_fence_get_timeline_name ( struct dma_fence * f )
2015-08-02 11:18:04 +08:00
{
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2015-09-07 18:16:49 +02:00
return ( const char * ) fence - > sched - > name ;
2015-08-02 11:18:04 +08:00
}
2016-03-15 13:58:14 +01:00
/**
2018-05-16 18:54:18 +05:30
* drm_sched_fence_free - free up the fence memory
2016-03-15 13:58:14 +01:00
*
* @ rcu : RCU callback head
*
* Free up the fence memory after the RCU grace period .
*/
2017-12-06 17:49:39 +01:00
static void drm_sched_fence_free ( struct rcu_head * rcu )
2015-11-05 11:41:50 +08:00
{
2016-10-25 13:00:45 +01:00
struct dma_fence * f = container_of ( rcu , struct dma_fence , rcu ) ;
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2016-05-20 12:53:52 +02:00
2015-11-05 11:41:50 +08:00
kmem_cache_free ( sched_fence_slab , fence ) ;
}
2016-03-15 13:58:14 +01:00
/**
2018-05-16 18:54:18 +05:30
* drm_sched_fence_release_scheduled - callback that fence can be freed
2016-03-15 13:58:14 +01:00
*
* @ fence : fence
*
* This function is called when the reference count becomes zero .
* It just RCU schedules freeing up the fence .
*/
2017-12-06 17:49:39 +01:00
static void drm_sched_fence_release_scheduled ( struct dma_fence * f )
2016-05-20 12:53:52 +02:00
{
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2016-05-20 12:53:52 +02:00
2018-05-23 15:53:03 +08:00
dma_fence_put ( fence - > parent ) ;
2017-12-06 17:49:39 +01:00
call_rcu ( & fence - > finished . rcu , drm_sched_fence_free ) ;
2016-05-20 12:53:52 +02:00
}
/**
2018-05-16 18:54:18 +05:30
* drm_sched_fence_release_finished - drop extra reference
2016-05-20 12:53:52 +02:00
*
* @ f : fence
*
* Drop the extra reference from the scheduled fence to the base fence .
*/
2017-12-06 17:49:39 +01:00
static void drm_sched_fence_release_finished ( struct dma_fence * f )
2016-03-15 13:58:14 +01:00
{
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2016-05-20 12:53:52 +02:00
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > scheduled ) ;
2016-03-15 13:58:14 +01:00
}
2019-10-09 13:14:47 +01:00
static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
2017-12-06 17:49:39 +01:00
. get_driver_name = drm_sched_fence_get_driver_name ,
. get_timeline_name = drm_sched_fence_get_timeline_name ,
. release = drm_sched_fence_release_scheduled ,
2016-05-20 12:53:52 +02:00
} ;
2019-10-09 13:14:47 +01:00
static const struct dma_fence_ops drm_sched_fence_ops_finished = {
2017-12-06 17:49:39 +01:00
. get_driver_name = drm_sched_fence_get_driver_name ,
. get_timeline_name = drm_sched_fence_get_timeline_name ,
. release = drm_sched_fence_release_finished ,
2015-08-02 11:18:04 +08:00
} ;
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * to_drm_sched_fence ( struct dma_fence * f )
{
if ( f - > ops = = & drm_sched_fence_ops_scheduled )
return container_of ( f , struct drm_sched_fence , scheduled ) ;
if ( f - > ops = = & drm_sched_fence_ops_finished )
return container_of ( f , struct drm_sched_fence , finished ) ;
return NULL ;
}
EXPORT_SYMBOL ( to_drm_sched_fence ) ;
struct drm_sched_fence * drm_sched_fence_create ( struct drm_sched_entity * entity ,
void * owner )
{
struct drm_sched_fence * fence = NULL ;
unsigned seq ;
fence = kmem_cache_zalloc ( sched_fence_slab , GFP_KERNEL ) ;
if ( fence = = NULL )
return NULL ;
fence - > owner = owner ;
2018-07-20 17:51:06 +05:30
fence - > sched = entity - > rq - > sched ;
2017-12-06 17:49:39 +01:00
spin_lock_init ( & fence - > lock ) ;
seq = atomic_inc_return ( & entity - > fence_seq ) ;
dma_fence_init ( & fence - > scheduled , & drm_sched_fence_ops_scheduled ,
& fence - > lock , entity - > fence_context , seq ) ;
dma_fence_init ( & fence - > finished , & drm_sched_fence_ops_finished ,
& fence - > lock , entity - > fence_context + 1 , seq ) ;
return fence ;
}
2017-12-06 17:49:40 +01:00
module_init ( drm_sched_fence_slab_init ) ;
module_exit ( drm_sched_fence_slab_fini ) ;
MODULE_DESCRIPTION ( " DRM GPU scheduler " ) ;
MODULE_LICENSE ( " GPL and additional rights " ) ;