2023-03-30 17:31:57 -04:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
# include "xe_sched_job.h"
# include <linux/dma-fence-array.h>
# include <linux/slab.h>
2023-01-12 17:25:14 -05:00
# include "xe_device.h"
2023-08-01 12:28:14 +02:00
# include "xe_exec_queue.h"
2023-03-30 17:31:57 -04:00
# include "xe_gt.h"
# include "xe_hw_engine_types.h"
# include "xe_hw_fence.h"
# include "xe_lrc.h"
# include "xe_macros.h"
# include "xe_trace.h"
# include "xe_vm.h"
static struct kmem_cache * xe_sched_job_slab ;
static struct kmem_cache * xe_sched_job_parallel_slab ;
int __init xe_sched_job_module_init ( void )
{
xe_sched_job_slab =
kmem_cache_create ( " xe_sched_job " ,
sizeof ( struct xe_sched_job ) +
sizeof ( u64 ) , 0 ,
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! xe_sched_job_slab )
return - ENOMEM ;
xe_sched_job_parallel_slab =
kmem_cache_create ( " xe_sched_job_parallel " ,
sizeof ( struct xe_sched_job ) +
sizeof ( u64 ) *
2023-07-11 16:24:30 +02:00
XE_HW_ENGINE_MAX_INSTANCE , 0 ,
2023-03-30 17:31:57 -04:00
SLAB_HWCACHE_ALIGN , NULL ) ;
if ( ! xe_sched_job_parallel_slab ) {
kmem_cache_destroy ( xe_sched_job_slab ) ;
return - ENOMEM ;
}
return 0 ;
}
void xe_sched_job_module_exit ( void )
{
kmem_cache_destroy ( xe_sched_job_slab ) ;
kmem_cache_destroy ( xe_sched_job_parallel_slab ) ;
}
static struct xe_sched_job * job_alloc ( bool parallel )
{
return kmem_cache_zalloc ( parallel ? xe_sched_job_parallel_slab :
xe_sched_job_slab , GFP_KERNEL ) ;
}
2023-07-31 17:30:02 +02:00
bool xe_sched_job_is_migration ( struct xe_exec_queue * q )
2023-03-30 17:31:57 -04:00
{
2023-08-22 10:33:34 -07:00
return q - > vm & & ( q - > vm - > flags & XE_VM_FLAG_MIGRATION ) ;
2023-03-30 17:31:57 -04:00
}
static void job_free ( struct xe_sched_job * job )
{
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * q = job - > q ;
bool is_migration = xe_sched_job_is_migration ( q ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
kmem_cache_free ( xe_exec_queue_is_parallel ( job - > q ) | | is_migration ?
2023-03-30 17:31:57 -04:00
xe_sched_job_parallel_slab : xe_sched_job_slab , job ) ;
}
2023-01-12 17:25:14 -05:00
static struct xe_device * job_to_xe ( struct xe_sched_job * job )
{
2023-07-31 17:30:02 +02:00
return gt_to_xe ( job - > q - > gt ) ;
2023-01-12 17:25:14 -05:00
}
2023-07-31 17:30:02 +02:00
struct xe_sched_job * xe_sched_job_create ( struct xe_exec_queue * q ,
2023-03-30 17:31:57 -04:00
u64 * batch_addr )
{
struct xe_sched_job * job ;
struct dma_fence * * fences ;
2023-07-31 17:30:02 +02:00
bool is_migration = xe_sched_job_is_migration ( q ) ;
2023-03-30 17:31:57 -04:00
int err ;
int i , j ;
u32 width ;
2023-08-22 10:33:32 -07:00
/* only a kernel context can submit a vm-less job */
XE_WARN_ON ( ! q - > vm & & ! ( q - > flags & EXEC_QUEUE_FLAG_KERNEL ) ) ;
2023-03-30 17:31:57 -04:00
/* Migration and kernel engines have their own locking */
2023-08-22 10:33:34 -07:00
if ( ! ( q - > flags & ( EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM ) ) ) {
2023-07-31 17:30:02 +02:00
lockdep_assert_held ( & q - > vm - > lock ) ;
if ( ! xe_vm_no_dma_fences ( q - > vm ) )
xe_vm_assert_held ( q - > vm ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
job = job_alloc ( xe_exec_queue_is_parallel ( q ) | | is_migration ) ;
2023-03-30 17:31:57 -04:00
if ( ! job )
return ERR_PTR ( - ENOMEM ) ;
2023-07-31 17:30:02 +02:00
job - > q = q ;
2023-03-30 17:31:57 -04:00
kref_init ( & job - > refcount ) ;
2023-07-31 17:30:02 +02:00
xe_exec_queue_get ( job - > q ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
err = drm_sched_job_init ( & job - > drm , q - > entity , 1 , NULL ) ;
2023-03-30 17:31:57 -04:00
if ( err )
goto err_free ;
2023-07-31 17:30:02 +02:00
if ( ! xe_exec_queue_is_parallel ( q ) ) {
job - > fence = xe_lrc_create_seqno_fence ( q - > lrc ) ;
2023-03-30 17:31:57 -04:00
if ( IS_ERR ( job - > fence ) ) {
err = PTR_ERR ( job - > fence ) ;
goto err_sched_job ;
}
} else {
struct dma_fence_array * cf ;
2023-07-31 17:30:02 +02:00
fences = kmalloc_array ( q - > width , sizeof ( * fences ) , GFP_KERNEL ) ;
2023-03-30 17:31:57 -04:00
if ( ! fences ) {
err = - ENOMEM ;
goto err_sched_job ;
}
2023-07-31 17:30:02 +02:00
for ( j = 0 ; j < q - > width ; + + j ) {
fences [ j ] = xe_lrc_create_seqno_fence ( q - > lrc + j ) ;
2023-03-30 17:31:57 -04:00
if ( IS_ERR ( fences [ j ] ) ) {
err = PTR_ERR ( fences [ j ] ) ;
goto err_fences ;
}
}
2023-07-31 17:30:02 +02:00
cf = dma_fence_array_create ( q - > width , fences ,
q - > parallel . composite_fence_ctx ,
q - > parallel . composite_fence_seqno + + ,
2023-03-30 17:31:57 -04:00
false ) ;
if ( ! cf ) {
2023-07-31 17:30:02 +02:00
- - q - > parallel . composite_fence_seqno ;
2023-03-30 17:31:57 -04:00
err = - ENOMEM ;
goto err_fences ;
}
/* Sanity check */
2023-07-31 17:30:02 +02:00
for ( j = 0 ; j < q - > width ; + + j )
2023-09-12 08:36:35 +00:00
xe_assert ( job_to_xe ( job ) , cf - > base . seqno = = fences [ j ] - > seqno ) ;
2023-03-30 17:31:57 -04:00
job - > fence = & cf - > base ;
}
2023-07-31 17:30:02 +02:00
width = q - > width ;
2023-03-30 17:31:57 -04:00
if ( is_migration )
width = 2 ;
for ( i = 0 ; i < width ; + + i )
job - > batch_addr [ i ] = batch_addr [ i ] ;
2023-01-12 17:25:14 -05:00
/* All other jobs require a VM to be open which has a ref */
2023-07-31 17:30:02 +02:00
if ( unlikely ( q - > flags & EXEC_QUEUE_FLAG_KERNEL ) )
2023-01-12 17:25:14 -05:00
xe_device_mem_access_get ( job_to_xe ( job ) ) ;
xe_device_assert_mem_access ( job_to_xe ( job ) ) ;
2023-03-30 17:31:57 -04:00
trace_xe_sched_job_create ( job ) ;
return job ;
err_fences :
for ( j = j - 1 ; j > = 0 ; - - j ) {
2023-07-31 17:30:02 +02:00
- - q - > lrc [ j ] . fence_ctx . next_seqno ;
2023-03-30 17:31:57 -04:00
dma_fence_put ( fences [ j ] ) ;
}
kfree ( fences ) ;
err_sched_job :
drm_sched_job_cleanup ( & job - > drm ) ;
err_free :
2023-07-31 17:30:02 +02:00
xe_exec_queue_put ( q ) ;
2023-03-30 17:31:57 -04:00
job_free ( job ) ;
return ERR_PTR ( err ) ;
}
/**
* xe_sched_job_destroy - Destroy XE schedule job
* @ ref : reference to XE schedule job
*
* Called when ref = = 0 , drop a reference to job ' s xe_engine + fence , cleanup
* base DRM schedule job , and free memory for XE schedule job .
*/
void xe_sched_job_destroy ( struct kref * ref )
{
struct xe_sched_job * job =
container_of ( ref , struct xe_sched_job , refcount ) ;
2023-07-31 17:30:02 +02:00
if ( unlikely ( job - > q - > flags & EXEC_QUEUE_FLAG_KERNEL ) )
2023-01-12 17:25:14 -05:00
xe_device_mem_access_put ( job_to_xe ( job ) ) ;
2023-07-31 17:30:02 +02:00
xe_exec_queue_put ( job - > q ) ;
2023-03-30 17:31:57 -04:00
dma_fence_put ( job - > fence ) ;
drm_sched_job_cleanup ( & job - > drm ) ;
job_free ( job ) ;
}
void xe_sched_job_set_error ( struct xe_sched_job * job , int error )
{
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT , & job - > fence - > flags ) )
return ;
dma_fence_set_error ( job - > fence , error ) ;
if ( dma_fence_is_array ( job - > fence ) ) {
struct dma_fence_array * array =
to_dma_fence_array ( job - > fence ) ;
struct dma_fence * * child = array - > fences ;
unsigned int nchild = array - > num_fences ;
do {
struct dma_fence * current_fence = * child + + ;
if ( test_bit ( DMA_FENCE_FLAG_SIGNALED_BIT ,
& current_fence - > flags ) )
continue ;
dma_fence_set_error ( current_fence , error ) ;
} while ( - - nchild ) ;
}
trace_xe_sched_job_set_error ( job ) ;
dma_fence_enable_sw_signaling ( job - > fence ) ;
2023-07-31 17:30:02 +02:00
xe_hw_fence_irq_run ( job - > q - > fence_irq ) ;
2023-03-30 17:31:57 -04:00
}
bool xe_sched_job_started ( struct xe_sched_job * job )
{
2023-07-31 17:30:02 +02:00
struct xe_lrc * lrc = job - > q - > lrc ;
2023-03-30 17:31:57 -04:00
2023-04-06 17:26:24 +01:00
return ! __dma_fence_is_later ( xe_sched_job_seqno ( job ) ,
xe_lrc_start_seqno ( lrc ) ,
job - > fence - > ops ) ;
2023-03-30 17:31:57 -04:00
}
bool xe_sched_job_completed ( struct xe_sched_job * job )
{
2023-07-31 17:30:02 +02:00
struct xe_lrc * lrc = job - > q - > lrc ;
2023-03-30 17:31:57 -04:00
/*
* Can safely check just LRC [ 0 ] seqno as that is last seqno written when
* parallel handshake is done .
*/
2023-04-06 17:26:24 +01:00
return ! __dma_fence_is_later ( xe_sched_job_seqno ( job ) , xe_lrc_seqno ( lrc ) ,
job - > fence - > ops ) ;
2023-03-30 17:31:57 -04:00
}
void xe_sched_job_arm ( struct xe_sched_job * job )
{
drm_sched_job_arm ( & job - > drm ) ;
}
void xe_sched_job_push ( struct xe_sched_job * job )
{
xe_sched_job_get ( job ) ;
trace_xe_sched_job_exec ( job ) ;
drm_sched_entity_push_job ( & job - > drm ) ;
xe_sched_job_put ( job ) ;
}