2015-08-02 11:18:04 +08:00
/*
* Copyright 2015 Advanced Micro Devices , Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
*/
2017-12-06 17:49:39 +01:00
2015-08-02 11:18:04 +08:00
# include <linux/kthread.h>
2019-06-30 08:19:14 +02:00
# include <linux/module.h>
2015-08-02 11:18:04 +08:00
# include <linux/sched.h>
2019-06-30 08:19:14 +02:00
# include <linux/slab.h>
# include <linux/wait.h>
2017-12-06 17:49:39 +01:00
# include <drm/gpu_scheduler.h>
2015-08-02 11:18:04 +08:00
2016-10-28 17:04:07 +02:00
static struct kmem_cache * sched_fence_slab ;
2017-12-06 17:49:40 +01:00
static int __init drm_sched_fence_slab_init ( void )
2016-10-28 17:04:07 +02:00
{
sched_fence_slab = kmem_cache_create (
2017-12-06 17:49:39 +01:00
" drm_sched_fence " , sizeof ( struct drm_sched_fence ) , 0 ,
2016-10-28 17:04:07 +02:00
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! sched_fence_slab )
return - ENOMEM ;
return 0 ;
}
2017-12-06 17:49:40 +01:00
static void __exit drm_sched_fence_slab_fini ( void )
2016-10-28 17:04:07 +02:00
{
rcu_barrier ( ) ;
kmem_cache_destroy ( sched_fence_slab ) ;
}
2017-12-06 17:49:39 +01:00
void drm_sched_fence_scheduled ( struct drm_sched_fence * fence )
2015-08-02 11:18:04 +08:00
{
2021-07-29 08:39:31 +02:00
dma_fence_signal ( & fence - > scheduled ) ;
2015-08-02 11:18:04 +08:00
}
2017-12-06 17:49:39 +01:00
void drm_sched_fence_finished ( struct drm_sched_fence * fence )
2015-11-05 12:57:10 +01:00
{
2021-07-29 08:39:31 +02:00
dma_fence_signal ( & fence - > finished ) ;
2015-11-05 12:57:10 +01:00
}
2017-12-06 17:49:39 +01:00
static const char * drm_sched_fence_get_driver_name ( struct dma_fence * fence )
2015-08-02 11:18:04 +08:00
{
2017-12-06 17:49:39 +01:00
return " drm_sched " ;
2015-08-02 11:18:04 +08:00
}
2017-12-06 17:49:39 +01:00
static const char * drm_sched_fence_get_timeline_name ( struct dma_fence * f )
2015-08-02 11:18:04 +08:00
{
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2015-09-07 18:16:49 +02:00
return ( const char * ) fence - > sched - > name ;
2015-08-02 11:18:04 +08:00
}
2021-09-03 14:05:54 +02:00
static void drm_sched_fence_free_rcu ( struct rcu_head * rcu )
{
struct dma_fence * f = container_of ( rcu , struct dma_fence , rcu ) ;
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
if ( ! WARN_ON_ONCE ( ! fence ) )
kmem_cache_free ( sched_fence_slab , fence ) ;
}
2016-03-15 13:58:14 +01:00
/**
2021-09-03 14:05:54 +02:00
* drm_sched_fence_free - free up an uninitialized fence
2016-03-15 13:58:14 +01:00
*
2021-09-03 14:05:54 +02:00
* @ fence : fence to free
2016-03-15 13:58:14 +01:00
*
2021-09-03 14:05:54 +02:00
* Free up the fence memory . Should only be used if drm_sched_fence_init ( )
* has not been called yet .
2016-03-15 13:58:14 +01:00
*/
2021-09-03 14:05:54 +02:00
void drm_sched_fence_free ( struct drm_sched_fence * fence )
2015-11-05 11:41:50 +08:00
{
2021-09-03 14:05:54 +02:00
/* This function should not be called if the fence has been initialized. */
if ( ! WARN_ON_ONCE ( fence - > sched ) )
kmem_cache_free ( sched_fence_slab , fence ) ;
2015-11-05 11:41:50 +08:00
}
2016-03-15 13:58:14 +01:00
/**
2018-05-16 18:54:18 +05:30
* drm_sched_fence_release_scheduled - callback that fence can be freed
2016-03-15 13:58:14 +01:00
*
2020-09-09 15:57:05 +08:00
* @ f : fence
2016-03-15 13:58:14 +01:00
*
* This function is called when the reference count becomes zero .
* It just RCU schedules freeing up the fence .
*/
2017-12-06 17:49:39 +01:00
static void drm_sched_fence_release_scheduled ( struct dma_fence * f )
2016-05-20 12:53:52 +02:00
{
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2016-05-20 12:53:52 +02:00
2018-05-23 15:53:03 +08:00
dma_fence_put ( fence - > parent ) ;
2021-09-03 14:05:54 +02:00
call_rcu ( & fence - > finished . rcu , drm_sched_fence_free_rcu ) ;
2016-05-20 12:53:52 +02:00
}
/**
2018-05-16 18:54:18 +05:30
* drm_sched_fence_release_finished - drop extra reference
2016-05-20 12:53:52 +02:00
*
* @ f : fence
*
* Drop the extra reference from the scheduled fence to the base fence .
*/
2017-12-06 17:49:39 +01:00
static void drm_sched_fence_release_finished ( struct dma_fence * f )
2016-03-15 13:58:14 +01:00
{
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * fence = to_drm_sched_fence ( f ) ;
2016-05-20 12:53:52 +02:00
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > scheduled ) ;
2016-03-15 13:58:14 +01:00
}
2019-10-09 13:14:47 +01:00
static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
2017-12-06 17:49:39 +01:00
. get_driver_name = drm_sched_fence_get_driver_name ,
. get_timeline_name = drm_sched_fence_get_timeline_name ,
. release = drm_sched_fence_release_scheduled ,
2016-05-20 12:53:52 +02:00
} ;
2019-10-09 13:14:47 +01:00
static const struct dma_fence_ops drm_sched_fence_ops_finished = {
2017-12-06 17:49:39 +01:00
. get_driver_name = drm_sched_fence_get_driver_name ,
. get_timeline_name = drm_sched_fence_get_timeline_name ,
. release = drm_sched_fence_release_finished ,
2015-08-02 11:18:04 +08:00
} ;
2017-12-06 17:49:39 +01:00
struct drm_sched_fence * to_drm_sched_fence ( struct dma_fence * f )
{
if ( f - > ops = = & drm_sched_fence_ops_scheduled )
return container_of ( f , struct drm_sched_fence , scheduled ) ;
if ( f - > ops = = & drm_sched_fence_ops_finished )
return container_of ( f , struct drm_sched_fence , finished ) ;
return NULL ;
}
EXPORT_SYMBOL ( to_drm_sched_fence ) ;
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 10:49:16 +02:00
struct drm_sched_fence * drm_sched_fence_alloc ( struct drm_sched_entity * entity ,
void * owner )
2017-12-06 17:49:39 +01:00
{
struct drm_sched_fence * fence = NULL ;
fence = kmem_cache_zalloc ( sched_fence_slab , GFP_KERNEL ) ;
if ( fence = = NULL )
return NULL ;
fence - > owner = owner ;
spin_lock_init ( & fence - > lock ) ;
drm/sched: Split drm_sched_job_init
This is a very confusingly named function, because not just does it
init an object, it arms it and provides a point of no return for
pushing a job into the scheduler. It would be nice if that's a bit
clearer in the interface.
But the real reason is that I want to push the dependency tracking
helpers into the scheduler code, and that means drm_sched_job_init
must be called a lot earlier, without arming the job.
v2:
- don't change .gitignore (Steven)
- don't forget v3d (Emma)
v3: Emma noticed that I leak the memory allocated in
drm_sched_job_init if we bail out before the point of no return in
subsequent driver patches. To be able to fix this change
drm_sched_job_cleanup() so it can handle being called both before and
after drm_sched_job_arm().
Also improve the kerneldoc for this.
v4:
- Fix the drm_sched_job_cleanup logic, I inverted the booleans, as
usual (Melissa)
- Christian pointed out that drm_sched_entity_select_rq() also needs
to be moved into drm_sched_job_arm, which made me realize that the
job->id definitely needs to be moved too.
Shuffle things to fit between job_init and job_arm.
v5:
Reshuffle the split between init/arm once more, amdgpu abuses
drm_sched.ready to signal gpu reset failures. Also document this
somewhat. (Christian)
v6:
Rebase on top of the msm drm/sched support. Note that the
drm_sched_job_init() call is completely misplaced, and hence also the
split-out drm_sched_entity_push_job(). I've put in a FIXME which the next
patch will address.
v7: Drop the FIXME in msm, after discussions with Rob I agree it shouldn't
be a problem where it is now.
Acked-by: Christian König <christian.koenig@amd.com>
Acked-by: Melissa Wen <mwen@igalia.com>
Cc: Melissa Wen <melissa.srw@gmail.com>
Acked-by: Emma Anholt <emma@anholt.net>
Acked-by: Steven Price <steven.price@arm.com> (v2)
Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com> (v5)
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Cc: Lucas Stach <l.stach@pengutronix.de>
Cc: Russell King <linux+etnaviv@armlinux.org.uk>
Cc: Christian Gmeiner <christian.gmeiner@gmail.com>
Cc: Qiang Yu <yuq825@gmail.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Cc: Steven Price <steven.price@arm.com>
Cc: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: "Christian König" <christian.koenig@amd.com>
Cc: Masahiro Yamada <masahiroy@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Adam Borowski <kilobyte@angband.pl>
Cc: Nick Terrell <terrelln@fb.com>
Cc: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
Cc: Paul Menzel <pmenzel@molgen.mpg.de>
Cc: Sami Tolvanen <samitolvanen@google.com>
Cc: Viresh Kumar <viresh.kumar@linaro.org>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Dave Airlie <airlied@redhat.com>
Cc: Nirmoy Das <nirmoy.das@amd.com>
Cc: Deepak R Varma <mh12gx2825@gmail.com>
Cc: Lee Jones <lee.jones@linaro.org>
Cc: Kevin Wang <kevin1.wang@amd.com>
Cc: Chen Li <chenli@uniontech.com>
Cc: Luben Tuikov <luben.tuikov@amd.com>
Cc: "Marek Olšák" <marek.olsak@amd.com>
Cc: Dennis Li <Dennis.Li@amd.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Cc: Sonny Jiang <sonny.jiang@amd.com>
Cc: Boris Brezillon <boris.brezillon@collabora.com>
Cc: Tian Tao <tiantao6@hisilicon.com>
Cc: etnaviv@lists.freedesktop.org
Cc: lima@lists.freedesktop.org
Cc: linux-media@vger.kernel.org
Cc: linaro-mm-sig@lists.linaro.org
Cc: Emma Anholt <emma@anholt.net>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Sean Paul <sean@poorly.run>
Cc: linux-arm-msm@vger.kernel.org
Cc: freedreno@lists.freedesktop.org
Link: https://patchwork.freedesktop.org/patch/msgid/20210817084917.3555822-1-daniel.vetter@ffwll.ch
2021-08-17 10:49:16 +02:00
return fence ;
}
void drm_sched_fence_init ( struct drm_sched_fence * fence ,
struct drm_sched_entity * entity )
{
unsigned seq ;
fence - > sched = entity - > rq - > sched ;
2017-12-06 17:49:39 +01:00
seq = atomic_inc_return ( & entity - > fence_seq ) ;
dma_fence_init ( & fence - > scheduled , & drm_sched_fence_ops_scheduled ,
& fence - > lock , entity - > fence_context , seq ) ;
dma_fence_init ( & fence - > finished , & drm_sched_fence_ops_finished ,
& fence - > lock , entity - > fence_context + 1 , seq ) ;
}
2017-12-06 17:49:40 +01:00
module_init ( drm_sched_fence_slab_init ) ;
module_exit ( drm_sched_fence_slab_fini ) ;
MODULE_DESCRIPTION ( " DRM GPU scheduler " ) ;
MODULE_LICENSE ( " GPL and additional rights " ) ;