2023-03-30 17:31:57 -04:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2021 Intel Corporation
*/
2023-08-01 12:28:14 +02:00
# include "xe_exec_queue.h"
2023-03-30 17:31:57 -04:00
2023-02-24 16:15:38 -08:00
# include <linux/nospec.h>
2023-03-30 17:31:57 -04:00
# include <drm/drm_device.h>
# include <drm/drm_file.h>
# include <drm/xe_drm.h>
# include "xe_device.h"
# include "xe_gt.h"
2023-03-09 17:20:20 +01:00
# include "xe_hw_fence.h"
2023-03-30 17:31:57 -04:00
# include "xe_lrc.h"
# include "xe_macros.h"
# include "xe_migrate.h"
# include "xe_pm.h"
2023-05-21 18:24:20 -07:00
# include "xe_ring_ops_types.h"
2023-03-30 17:31:57 -04:00
# include "xe_trace.h"
# include "xe_vm.h"
2023-07-31 17:30:02 +02:00
static struct xe_exec_queue * __xe_exec_queue_create ( struct xe_device * xe ,
struct xe_vm * vm ,
u32 logical_mask ,
u16 width , struct xe_hw_engine * hwe ,
u32 flags )
2023-03-30 17:31:57 -04:00
{
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * q ;
2023-03-30 17:31:57 -04:00
struct xe_gt * gt = hwe - > gt ;
int err ;
int i ;
2023-07-31 17:30:02 +02:00
q = kzalloc ( sizeof ( * q ) + sizeof ( struct xe_lrc ) * width , GFP_KERNEL ) ;
if ( ! q )
2023-03-30 17:31:57 -04:00
return ERR_PTR ( - ENOMEM ) ;
2023-07-31 17:30:02 +02:00
kref_init ( & q - > refcount ) ;
q - > flags = flags ;
q - > hwe = hwe ;
q - > gt = gt ;
2023-03-30 17:31:57 -04:00
if ( vm )
2023-07-31 17:30:02 +02:00
q - > vm = xe_vm_get ( vm ) ;
q - > class = hwe - > class ;
q - > width = width ;
q - > logical_mask = logical_mask ;
q - > fence_irq = & gt - > fence_irq [ hwe - > class ] ;
q - > ring_ops = gt - > ring_ops [ hwe - > class ] ;
q - > ops = gt - > exec_queue_ops ;
INIT_LIST_HEAD ( & q - > persistent . link ) ;
INIT_LIST_HEAD ( & q - > compute . link ) ;
INIT_LIST_HEAD ( & q - > multi_gt_link ) ;
2023-03-30 17:31:57 -04:00
/* FIXME: Wire up to configurable default value */
2023-07-31 17:30:02 +02:00
q - > sched_props . timeslice_us = 1 * 1000 ;
q - > sched_props . preempt_timeout_us = 640 * 1000 ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
if ( xe_exec_queue_is_parallel ( q ) ) {
q - > parallel . composite_fence_ctx = dma_fence_context_alloc ( 1 ) ;
q - > parallel . composite_fence_seqno = XE_FENCE_INITIAL_SEQNO ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
if ( q - > flags & EXEC_QUEUE_FLAG_VM ) {
q - > bind . fence_ctx = dma_fence_context_alloc ( 1 ) ;
q - > bind . fence_seqno = XE_FENCE_INITIAL_SEQNO ;
2023-03-30 17:31:57 -04:00
}
for ( i = 0 ; i < width ; + + i ) {
2023-07-31 17:30:02 +02:00
err = xe_lrc_init ( q - > lrc + i , hwe , q , vm , SZ_16K ) ;
2023-03-30 17:31:57 -04:00
if ( err )
goto err_lrc ;
}
2023-07-31 17:30:02 +02:00
err = q - > ops - > init ( q ) ;
2023-03-30 17:31:57 -04:00
if ( err )
goto err_lrc ;
2023-07-26 10:23:49 +01:00
/*
* Normally the user vm holds an rpm ref to keep the device
* awake , and the context holds a ref for the vm , however for
* some engines we use the kernels migrate vm underneath which
* offers no such rpm ref . Make sure we keep a ref here , so we
* can perform GuC CT actions when needed . Caller is expected to
* have already grabbed the rpm ref outside any sensitive locks .
*/
2023-07-31 17:30:02 +02:00
if ( q - > flags & EXEC_QUEUE_FLAG_VM )
2023-07-26 10:23:49 +01:00
drm_WARN_ON ( & xe - > drm , ! xe_device_mem_access_get_if_ongoing ( xe ) ) ;
2023-07-31 17:30:02 +02:00
return q ;
2023-03-30 17:31:57 -04:00
err_lrc :
for ( i = i - 1 ; i > = 0 ; - - i )
2023-07-31 17:30:02 +02:00
xe_lrc_finish ( q - > lrc + i ) ;
kfree ( q ) ;
2023-03-30 17:31:57 -04:00
return ERR_PTR ( err ) ;
}
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * xe_exec_queue_create ( struct xe_device * xe , struct xe_vm * vm ,
u32 logical_mask , u16 width ,
struct xe_hw_engine * hwe , u32 flags )
2023-03-30 17:31:57 -04:00
{
struct ww_acquire_ctx ww ;
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * q ;
2023-03-30 17:31:57 -04:00
int err ;
if ( vm ) {
err = xe_vm_lock ( vm , & ww , 0 , true ) ;
if ( err )
return ERR_PTR ( err ) ;
}
2023-07-31 17:30:02 +02:00
q = __xe_exec_queue_create ( xe , vm , logical_mask , width , hwe , flags ) ;
2023-03-30 17:31:57 -04:00
if ( vm )
xe_vm_unlock ( vm , & ww ) ;
2023-07-31 17:30:02 +02:00
return q ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * xe_exec_queue_create_class ( struct xe_device * xe , struct xe_gt * gt ,
struct xe_vm * vm ,
enum xe_engine_class class , u32 flags )
2023-03-30 17:31:57 -04:00
{
struct xe_hw_engine * hwe , * hwe0 = NULL ;
enum xe_hw_engine_id id ;
u32 logical_mask = 0 ;
for_each_hw_engine ( hwe , gt , id ) {
if ( xe_hw_engine_is_reserved ( hwe ) )
continue ;
if ( hwe - > class = = class ) {
logical_mask | = BIT ( hwe - > logical_instance ) ;
if ( ! hwe0 )
hwe0 = hwe ;
}
}
if ( ! logical_mask )
return ERR_PTR ( - ENODEV ) ;
2023-07-31 17:30:02 +02:00
return xe_exec_queue_create ( xe , vm , logical_mask , 1 , hwe0 , flags ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
void xe_exec_queue_destroy ( struct kref * ref )
2023-03-30 17:31:57 -04:00
{
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * q = container_of ( ref , struct xe_exec_queue , refcount ) ;
struct xe_exec_queue * eq , * next ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
if ( ! ( q - > flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD ) ) {
list_for_each_entry_safe ( eq , next , & q - > multi_gt_list ,
2023-03-30 17:31:57 -04:00
multi_gt_link )
2023-07-31 17:30:02 +02:00
xe_exec_queue_put ( eq ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
q - > ops - > fini ( q ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
void xe_exec_queue_fini ( struct xe_exec_queue * q )
2023-03-30 17:31:57 -04:00
{
int i ;
2023-07-31 17:30:02 +02:00
for ( i = 0 ; i < q - > width ; + + i )
xe_lrc_finish ( q - > lrc + i ) ;
if ( q - > vm )
xe_vm_put ( q - > vm ) ;
if ( q - > flags & EXEC_QUEUE_FLAG_VM )
xe_device_mem_access_put ( gt_to_xe ( q - > gt ) ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
kfree ( q ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * xe_exec_queue_lookup ( struct xe_file * xef , u32 id )
2023-03-30 17:31:57 -04:00
{
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * q ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
mutex_lock ( & xef - > exec_queue . lock ) ;
q = xa_load ( & xef - > exec_queue . xa , id ) ;
if ( q )
xe_exec_queue_get ( q ) ;
mutex_unlock ( & xef - > exec_queue . lock ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
return q ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
enum xe_exec_queue_priority
xe_exec_queue_device_get_max_priority ( struct xe_device * xe )
2023-03-23 12:24:59 -07:00
{
2023-07-31 17:30:02 +02:00
return capable ( CAP_SYS_NICE ) ? XE_EXEC_QUEUE_PRIORITY_HIGH :
XE_EXEC_QUEUE_PRIORITY_NORMAL ;
2023-03-23 12:24:59 -07:00
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_priority ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , value > XE_EXEC_QUEUE_PRIORITY_HIGH ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , value > xe_exec_queue_device_get_max_priority ( xe ) ) )
2023-03-30 17:31:57 -04:00
return - EPERM ;
2023-07-31 17:30:02 +02:00
return q - > ops - > set_priority ( q , value ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_timeslice ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
if ( ! capable ( CAP_SYS_NICE ) )
return - EPERM ;
2023-07-31 17:30:02 +02:00
return q - > ops - > set_timeslice ( q , value ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_preemption_timeout ( struct xe_device * xe ,
struct xe_exec_queue * q , u64 value ,
bool create )
2023-03-30 17:31:57 -04:00
{
if ( ! capable ( CAP_SYS_NICE ) )
return - EPERM ;
2023-07-31 17:30:02 +02:00
return q - > ops - > set_preempt_timeout ( q , value ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_compute_mode ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! create ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , q - > flags & EXEC_QUEUE_FLAG_COMPUTE_MODE ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , q - > flags & EXEC_QUEUE_FLAG_VM ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
if ( value ) {
2023-07-31 17:30:02 +02:00
struct xe_vm * vm = q - > vm ;
2023-03-30 17:31:57 -04:00
int err ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , xe_vm_in_fault_mode ( vm ) ) )
2023-03-30 17:31:57 -04:00
return - EOPNOTSUPP ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! xe_vm_in_compute_mode ( vm ) ) )
2023-03-30 17:31:57 -04:00
return - EOPNOTSUPP ;
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , q - > width ! = 1 ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
q - > compute . context = dma_fence_context_alloc ( 1 ) ;
spin_lock_init ( & q - > compute . lock ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
err = xe_vm_add_compute_exec_queue ( vm , q ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , err ) )
2023-03-30 17:31:57 -04:00
return err ;
2023-07-31 17:30:02 +02:00
q - > flags | = EXEC_QUEUE_FLAG_COMPUTE_MODE ;
q - > flags & = ~ EXEC_QUEUE_FLAG_PERSISTENT ;
2023-03-30 17:31:57 -04:00
}
return 0 ;
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_persistence ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! create ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , q - > flags & EXEC_QUEUE_FLAG_COMPUTE_MODE ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
if ( value )
2023-07-31 17:30:02 +02:00
q - > flags | = EXEC_QUEUE_FLAG_PERSISTENT ;
2023-03-30 17:31:57 -04:00
else
2023-07-31 17:30:02 +02:00
q - > flags & = ~ EXEC_QUEUE_FLAG_PERSISTENT ;
2023-03-30 17:31:57 -04:00
return 0 ;
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_job_timeout ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! create ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
if ( ! capable ( CAP_SYS_NICE ) )
return - EPERM ;
2023-07-31 17:30:02 +02:00
return q - > ops - > set_job_timeout ( q , value ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_acc_trigger ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! create ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! xe - > info . supports_usm ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
q - > usm . acc_trigger = value ;
2023-03-30 17:31:57 -04:00
return 0 ;
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_acc_notify ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! create ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! xe - > info . supports_usm ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
q - > usm . acc_notify = value ;
2023-03-30 17:31:57 -04:00
return 0 ;
}
2023-07-31 17:30:02 +02:00
static int exec_queue_set_acc_granularity ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 value , bool create )
2023-03-30 17:31:57 -04:00
{
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! create ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! xe - > info . supports_usm ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
q - > usm . acc_granularity = value ;
2023-03-30 17:31:57 -04:00
return 0 ;
}
2023-07-31 17:30:02 +02:00
typedef int ( * xe_exec_queue_set_property_fn ) ( struct xe_device * xe ,
struct xe_exec_queue * q ,
u64 value , bool create ) ;
static const xe_exec_queue_set_property_fn exec_queue_set_property_funcs [ ] = {
[ XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY ] = exec_queue_set_priority ,
[ XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE ] = exec_queue_set_timeslice ,
[ XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT ] = exec_queue_set_preemption_timeout ,
[ XE_EXEC_QUEUE_SET_PROPERTY_COMPUTE_MODE ] = exec_queue_set_compute_mode ,
[ XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE ] = exec_queue_set_persistence ,
[ XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT ] = exec_queue_set_job_timeout ,
[ XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER ] = exec_queue_set_acc_trigger ,
[ XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY ] = exec_queue_set_acc_notify ,
[ XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY ] = exec_queue_set_acc_granularity ,
2023-03-30 17:31:57 -04:00
} ;
2023-07-31 17:30:02 +02:00
static int exec_queue_user_ext_set_property ( struct xe_device * xe ,
struct xe_exec_queue * q ,
u64 extension ,
bool create )
2023-03-30 17:31:57 -04:00
{
u64 __user * address = u64_to_user_ptr ( extension ) ;
2023-07-31 17:30:02 +02:00
struct drm_xe_ext_exec_queue_set_property ext ;
2023-03-30 17:31:57 -04:00
int err ;
u32 idx ;
err = __copy_from_user ( & ext , address , sizeof ( ext ) ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , err ) )
2023-03-30 17:31:57 -04:00
return - EFAULT ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ext . property > =
2023-07-31 17:30:02 +02:00
ARRAY_SIZE ( exec_queue_set_property_funcs ) ) | |
2023-07-17 10:20:18 +02:00
XE_IOCTL_DBG ( xe , ext . pad ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
idx = array_index_nospec ( ext . property , ARRAY_SIZE ( exec_queue_set_property_funcs ) ) ;
return exec_queue_set_property_funcs [ idx ] ( xe , q , ext . value , create ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
typedef int ( * xe_exec_queue_user_extension_fn ) ( struct xe_device * xe ,
struct xe_exec_queue * q ,
u64 extension ,
bool create ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
static const xe_exec_queue_set_property_fn exec_queue_user_extension_funcs [ ] = {
[ XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY ] = exec_queue_user_ext_set_property ,
2023-03-30 17:31:57 -04:00
} ;
# define MAX_USER_EXTENSIONS 16
2023-07-31 17:30:02 +02:00
static int exec_queue_user_extensions ( struct xe_device * xe , struct xe_exec_queue * q ,
u64 extensions , int ext_number , bool create )
2023-03-30 17:31:57 -04:00
{
u64 __user * address = u64_to_user_ptr ( extensions ) ;
struct xe_user_extension ext ;
int err ;
u32 idx ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ext_number > = MAX_USER_EXTENSIONS ) )
2023-03-30 17:31:57 -04:00
return - E2BIG ;
err = __copy_from_user ( & ext , address , sizeof ( ext ) ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , err ) )
2023-03-30 17:31:57 -04:00
return - EFAULT ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ext . pad ) | |
XE_IOCTL_DBG ( xe , ext . name > =
2023-07-31 17:30:02 +02:00
ARRAY_SIZE ( exec_queue_user_extension_funcs ) ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
idx = array_index_nospec ( ext . name ,
2023-07-31 17:30:02 +02:00
ARRAY_SIZE ( exec_queue_user_extension_funcs ) ) ;
err = exec_queue_user_extension_funcs [ idx ] ( xe , q , extensions , create ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , err ) )
2023-03-30 17:31:57 -04:00
return err ;
if ( ext . next_extension )
2023-07-31 17:30:02 +02:00
return exec_queue_user_extensions ( xe , q , ext . next_extension ,
2023-03-30 17:31:57 -04:00
+ + ext_number , create ) ;
return 0 ;
}
static const enum xe_engine_class user_to_xe_engine_class [ ] = {
[ DRM_XE_ENGINE_CLASS_RENDER ] = XE_ENGINE_CLASS_RENDER ,
[ DRM_XE_ENGINE_CLASS_COPY ] = XE_ENGINE_CLASS_COPY ,
[ DRM_XE_ENGINE_CLASS_VIDEO_DECODE ] = XE_ENGINE_CLASS_VIDEO_DECODE ,
[ DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ] = XE_ENGINE_CLASS_VIDEO_ENHANCE ,
[ DRM_XE_ENGINE_CLASS_COMPUTE ] = XE_ENGINE_CLASS_COMPUTE ,
} ;
static struct xe_hw_engine *
find_hw_engine ( struct xe_device * xe ,
struct drm_xe_engine_class_instance eci )
{
u32 idx ;
if ( eci . engine_class > ARRAY_SIZE ( user_to_xe_engine_class ) )
return NULL ;
2023-07-24 17:34:35 -07:00
if ( eci . gt_id > = xe - > info . gt_count )
2023-03-30 17:31:57 -04:00
return NULL ;
idx = array_index_nospec ( eci . engine_class ,
ARRAY_SIZE ( user_to_xe_engine_class ) ) ;
return xe_gt_hw_engine ( xe_device_get_gt ( xe , eci . gt_id ) ,
user_to_xe_engine_class [ idx ] ,
eci . engine_instance , true ) ;
}
2023-07-31 17:30:02 +02:00
static u32 bind_exec_queue_logical_mask ( struct xe_device * xe , struct xe_gt * gt ,
struct drm_xe_engine_class_instance * eci ,
u16 width , u16 num_placements )
2023-03-30 17:31:57 -04:00
{
struct xe_hw_engine * hwe ;
enum xe_hw_engine_id id ;
u32 logical_mask = 0 ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , width ! = 1 ) )
2023-03-30 17:31:57 -04:00
return 0 ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , num_placements ! = 1 ) )
2023-03-30 17:31:57 -04:00
return 0 ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , eci [ 0 ] . engine_instance ! = 0 ) )
2023-03-30 17:31:57 -04:00
return 0 ;
eci [ 0 ] . engine_class = DRM_XE_ENGINE_CLASS_COPY ;
for_each_hw_engine ( hwe , gt , id ) {
if ( xe_hw_engine_is_reserved ( hwe ) )
continue ;
if ( hwe - > class = =
user_to_xe_engine_class [ DRM_XE_ENGINE_CLASS_COPY ] )
logical_mask | = BIT ( hwe - > logical_instance ) ;
}
return logical_mask ;
}
static u32 calc_validate_logical_mask ( struct xe_device * xe , struct xe_gt * gt ,
struct drm_xe_engine_class_instance * eci ,
u16 width , u16 num_placements )
{
int len = width * num_placements ;
int i , j , n ;
u16 class ;
u16 gt_id ;
u32 return_mask = 0 , prev_mask ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! xe_device_guc_submission_enabled ( xe ) & &
2023-03-30 17:31:57 -04:00
len > 1 ) )
return 0 ;
for ( i = 0 ; i < width ; + + i ) {
u32 current_mask = 0 ;
for ( j = 0 ; j < num_placements ; + + j ) {
struct xe_hw_engine * hwe ;
n = j * width + i ;
hwe = find_hw_engine ( xe , eci [ n ] ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! hwe ) )
2023-03-30 17:31:57 -04:00
return 0 ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , xe_hw_engine_is_reserved ( hwe ) ) )
2023-03-30 17:31:57 -04:00
return 0 ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , n & & eci [ n ] . gt_id ! = gt_id ) | |
XE_IOCTL_DBG ( xe , n & & eci [ n ] . engine_class ! = class ) )
2023-03-30 17:31:57 -04:00
return 0 ;
class = eci [ n ] . engine_class ;
gt_id = eci [ n ] . gt_id ;
if ( width = = 1 | | ! i )
return_mask | = BIT ( eci [ n ] . engine_instance ) ;
current_mask | = BIT ( eci [ n ] . engine_instance ) ;
}
/* Parallel submissions must be logically contiguous */
2023-07-17 10:20:18 +02:00
if ( i & & XE_IOCTL_DBG ( xe , current_mask ! = prev_mask < < 1 ) )
2023-03-30 17:31:57 -04:00
return 0 ;
prev_mask = current_mask ;
}
return return_mask ;
}
2023-07-31 17:30:02 +02:00
int xe_exec_queue_create_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file )
2023-03-30 17:31:57 -04:00
{
struct xe_device * xe = to_xe_device ( dev ) ;
struct xe_file * xef = to_xe_file ( file ) ;
2023-07-31 17:30:02 +02:00
struct drm_xe_exec_queue_create * args = data ;
2023-03-30 17:31:57 -04:00
struct drm_xe_engine_class_instance eci [ XE_HW_ENGINE_MAX_INSTANCE ] ;
struct drm_xe_engine_class_instance __user * user_eci =
u64_to_user_ptr ( args - > instances ) ;
struct xe_hw_engine * hwe ;
struct xe_vm * vm , * migrate_vm ;
struct xe_gt * gt ;
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * q = NULL ;
2023-03-30 17:31:57 -04:00
u32 logical_mask ;
u32 id ;
2023-06-26 14:22:20 -07:00
u32 len ;
2023-03-30 17:31:57 -04:00
int err ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , args - > flags ) | |
XE_IOCTL_DBG ( xe , args - > reserved [ 0 ] | | args - > reserved [ 1 ] ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
len = args - > width * args - > num_placements ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! len | | len > XE_HW_ENGINE_MAX_INSTANCE ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
err = __copy_from_user ( eci , user_eci ,
sizeof ( struct drm_xe_engine_class_instance ) *
len ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , err ) )
2023-03-30 17:31:57 -04:00
return - EFAULT ;
2023-07-24 17:34:35 -07:00
if ( XE_IOCTL_DBG ( xe , eci [ 0 ] . gt_id > = xe - > info . gt_count ) )
2023-07-11 17:35:57 +02:00
return - EINVAL ;
2023-03-30 17:31:57 -04:00
if ( eci [ 0 ] . engine_class = = DRM_XE_ENGINE_CLASS_VM_BIND ) {
for_each_gt ( gt , xe , id ) {
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * new ;
2023-03-30 17:31:57 -04:00
if ( xe_gt_is_media_type ( gt ) )
continue ;
eci [ 0 ] . gt_id = gt - > info . id ;
2023-07-31 17:30:02 +02:00
logical_mask = bind_exec_queue_logical_mask ( xe , gt , eci ,
args - > width ,
args - > num_placements ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! logical_mask ) )
2023-04-10 14:26:58 -07:00
return - EINVAL ;
2023-03-30 17:31:57 -04:00
hwe = find_hw_engine ( xe , eci [ 0 ] ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! hwe ) )
2023-04-10 14:26:58 -07:00
return - EINVAL ;
2023-03-30 17:31:57 -04:00
2023-07-26 10:23:49 +01:00
/* The migration vm doesn't hold rpm ref */
xe_device_mem_access_get ( xe ) ;
2023-06-01 14:52:27 -07:00
migrate_vm = xe_migrate_get_vm ( gt_to_tile ( gt ) - > migrate ) ;
2023-07-31 17:30:02 +02:00
new = xe_exec_queue_create ( xe , migrate_vm , logical_mask ,
args - > width , hwe ,
EXEC_QUEUE_FLAG_PERSISTENT |
EXEC_QUEUE_FLAG_VM |
( id ?
EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD :
0 ) ) ;
2023-07-26 10:23:49 +01:00
xe_device_mem_access_put ( xe ) ; /* now held by engine */
2023-03-30 17:31:57 -04:00
xe_vm_put ( migrate_vm ) ;
if ( IS_ERR ( new ) ) {
err = PTR_ERR ( new ) ;
2023-07-31 17:30:02 +02:00
if ( q )
goto put_exec_queue ;
2023-04-10 14:26:58 -07:00
return err ;
2023-03-30 17:31:57 -04:00
}
if ( id = = 0 )
2023-07-31 17:30:02 +02:00
q = new ;
2023-03-30 17:31:57 -04:00
else
list_add_tail ( & new - > multi_gt_list ,
2023-07-31 17:30:02 +02:00
& q - > multi_gt_link ) ;
2023-03-30 17:31:57 -04:00
}
} else {
gt = xe_device_get_gt ( xe , eci [ 0 ] . gt_id ) ;
logical_mask = calc_validate_logical_mask ( xe , gt , eci ,
args - > width ,
args - > num_placements ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! logical_mask ) )
2023-04-10 14:26:58 -07:00
return - EINVAL ;
2023-03-30 17:31:57 -04:00
hwe = find_hw_engine ( xe , eci [ 0 ] ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! hwe ) )
2023-04-10 14:26:58 -07:00
return - EINVAL ;
2023-03-30 17:31:57 -04:00
vm = xe_vm_lookup ( xef , args - > vm_id ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ! vm ) )
2023-04-10 14:26:58 -07:00
return - ENOENT ;
2023-03-30 17:31:57 -04:00
2023-06-22 12:39:48 -07:00
err = down_read_interruptible ( & vm - > lock ) ;
if ( err ) {
xe_vm_put ( vm ) ;
return err ;
}
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , xe_vm_is_closed_or_banned ( vm ) ) ) {
2023-06-22 12:39:48 -07:00
up_read ( & vm - > lock ) ;
xe_vm_put ( vm ) ;
return - ENOENT ;
}
2023-07-31 17:30:02 +02:00
q = xe_exec_queue_create ( xe , vm , logical_mask ,
args - > width , hwe ,
xe_vm_no_dma_fences ( vm ) ? 0 :
EXEC_QUEUE_FLAG_PERSISTENT ) ;
2023-06-22 12:39:48 -07:00
up_read ( & vm - > lock ) ;
2023-03-30 17:31:57 -04:00
xe_vm_put ( vm ) ;
2023-07-31 17:30:02 +02:00
if ( IS_ERR ( q ) )
return PTR_ERR ( q ) ;
2023-03-30 17:31:57 -04:00
}
if ( args - > extensions ) {
2023-07-31 17:30:02 +02:00
err = exec_queue_user_extensions ( xe , q , args - > extensions , 0 , true ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , err ) )
2023-07-31 17:30:02 +02:00
goto put_exec_queue ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
if ( XE_IOCTL_DBG ( xe , q - > vm & & xe_vm_in_compute_mode ( q - > vm ) ! =
! ! ( q - > flags & EXEC_QUEUE_FLAG_COMPUTE_MODE ) ) ) {
2023-06-13 15:07:40 +05:30
err = - EOPNOTSUPP ;
2023-07-31 17:30:02 +02:00
goto put_exec_queue ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
q - > persistent . xef = xef ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
mutex_lock ( & xef - > exec_queue . lock ) ;
err = xa_alloc ( & xef - > exec_queue . xa , & id , q , xa_limit_32b , GFP_KERNEL ) ;
mutex_unlock ( & xef - > exec_queue . lock ) ;
2023-03-30 17:31:57 -04:00
if ( err )
2023-07-31 17:30:02 +02:00
goto put_exec_queue ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
args - > exec_queue_id = id ;
2023-03-30 17:31:57 -04:00
return 0 ;
2023-07-31 17:30:02 +02:00
put_exec_queue :
xe_exec_queue_kill ( q ) ;
xe_exec_queue_put ( q ) ;
2023-03-30 17:31:57 -04:00
return err ;
}
2023-07-31 17:30:02 +02:00
int xe_exec_queue_get_property_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file )
2023-01-23 09:11:32 -08:00
{
struct xe_device * xe = to_xe_device ( dev ) ;
struct xe_file * xef = to_xe_file ( file ) ;
2023-07-31 17:30:02 +02:00
struct drm_xe_exec_queue_get_property * args = data ;
struct xe_exec_queue * q ;
2023-06-02 20:27:32 +03:00
int ret ;
2023-01-23 09:11:32 -08:00
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , args - > reserved [ 0 ] | | args - > reserved [ 1 ] ) )
2023-05-24 18:56:07 -07:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
q = xe_exec_queue_lookup ( xef , args - > exec_queue_id ) ;
if ( XE_IOCTL_DBG ( xe , ! q ) )
2023-01-23 09:11:32 -08:00
return - ENOENT ;
switch ( args - > property ) {
2023-07-31 17:30:02 +02:00
case XE_EXEC_QUEUE_GET_PROPERTY_BAN :
args - > value = ! ! ( q - > flags & EXEC_QUEUE_FLAG_BANNED ) ;
2023-06-02 20:27:32 +03:00
ret = 0 ;
2023-01-23 09:11:32 -08:00
break ;
default :
2023-06-02 20:27:32 +03:00
ret = - EINVAL ;
2023-01-23 09:11:32 -08:00
}
2023-07-31 17:30:02 +02:00
xe_exec_queue_put ( q ) ;
2023-06-02 20:27:32 +03:00
return ret ;
2023-01-23 09:11:32 -08:00
}
2023-07-31 17:30:02 +02:00
static void exec_queue_kill_compute ( struct xe_exec_queue * q )
2023-03-30 17:31:57 -04:00
{
2023-07-31 17:30:02 +02:00
if ( ! xe_vm_in_compute_mode ( q - > vm ) )
2023-03-30 17:31:57 -04:00
return ;
2023-07-31 17:30:02 +02:00
down_write ( & q - > vm - > lock ) ;
list_del ( & q - > compute . link ) ;
- - q - > vm - > preempt . num_exec_queues ;
if ( q - > compute . pfence ) {
dma_fence_enable_sw_signaling ( q - > compute . pfence ) ;
dma_fence_put ( q - > compute . pfence ) ;
q - > compute . pfence = NULL ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
up_write ( & q - > vm - > lock ) ;
2023-03-30 17:31:57 -04:00
}
2023-05-21 18:24:20 -07:00
/**
2023-07-31 17:30:02 +02:00
* xe_exec_queue_is_lr ( ) - Whether an exec_queue is long - running
* @ q : The exec_queue
2023-05-21 18:24:20 -07:00
*
2023-07-31 17:30:02 +02:00
* Return : True if the exec_queue is long - running , false otherwise .
2023-05-21 18:24:20 -07:00
*/
2023-07-31 17:30:02 +02:00
bool xe_exec_queue_is_lr ( struct xe_exec_queue * q )
2023-05-21 18:24:20 -07:00
{
2023-07-31 17:30:02 +02:00
return q - > vm & & xe_vm_no_dma_fences ( q - > vm ) & &
! ( q - > flags & EXEC_QUEUE_FLAG_VM ) ;
2023-05-21 18:24:20 -07:00
}
2023-07-31 17:30:02 +02:00
static s32 xe_exec_queue_num_job_inflight ( struct xe_exec_queue * q )
2023-05-21 18:24:20 -07:00
{
2023-07-31 17:30:02 +02:00
return q - > lrc - > fence_ctx . next_seqno - xe_lrc_seqno ( q - > lrc ) - 1 ;
2023-05-21 18:24:20 -07:00
}
/**
2023-07-31 17:30:02 +02:00
* xe_exec_queue_ring_full ( ) - Whether an exec_queue ' s ring is full
* @ q : The exec_queue
2023-05-21 18:24:20 -07:00
*
2023-07-31 17:30:02 +02:00
* Return : True if the exec_queue ' s ring is full , false otherwise .
2023-05-21 18:24:20 -07:00
*/
2023-07-31 17:30:02 +02:00
bool xe_exec_queue_ring_full ( struct xe_exec_queue * q )
2023-05-21 18:24:20 -07:00
{
2023-07-31 17:30:02 +02:00
struct xe_lrc * lrc = q - > lrc ;
2023-05-21 18:24:20 -07:00
s32 max_job = lrc - > ring . size / MAX_JOB_SIZE_BYTES ;
2023-07-31 17:30:02 +02:00
return xe_exec_queue_num_job_inflight ( q ) > = max_job ;
2023-05-21 18:24:20 -07:00
}
2023-03-10 12:03:47 +01:00
/**
2023-07-31 17:30:02 +02:00
* xe_exec_queue_is_idle ( ) - Whether an exec_queue is idle .
* @ q : The exec_queue
2023-03-10 12:03:47 +01:00
*
* FIXME : Need to determine what to use as the short - lived
2023-07-31 17:30:02 +02:00
* timeline lock for the exec_queues , so that the return value
2023-03-10 12:03:47 +01:00
* of this function becomes more than just an advisory
* snapshot in time . The timeline lock must protect the
2023-07-31 17:30:02 +02:00
* seqno from racing submissions on the same exec_queue .
2023-03-10 12:03:47 +01:00
* Typically vm - > resv , but user - created timeline locks use the migrate vm
* and never grabs the migrate vm - > resv so we have a race there .
*
2023-07-31 17:30:02 +02:00
* Return : True if the exec_queue is idle , false otherwise .
2023-03-10 12:03:47 +01:00
*/
2023-07-31 17:30:02 +02:00
bool xe_exec_queue_is_idle ( struct xe_exec_queue * q )
2023-03-10 12:03:47 +01:00
{
2023-07-31 17:30:02 +02:00
if ( XE_WARN_ON ( xe_exec_queue_is_parallel ( q ) ) )
2023-03-10 12:03:47 +01:00
return false ;
2023-07-31 17:30:02 +02:00
return xe_lrc_seqno ( & q - > lrc [ 0 ] ) = =
q - > lrc [ 0 ] . fence_ctx . next_seqno - 1 ;
2023-03-10 12:03:47 +01:00
}
2023-07-31 17:30:02 +02:00
void xe_exec_queue_kill ( struct xe_exec_queue * q )
2023-03-30 17:31:57 -04:00
{
2023-07-31 17:30:02 +02:00
struct xe_exec_queue * eq = q , * next ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
list_for_each_entry_safe ( eq , next , & eq - > multi_gt_list ,
2023-03-30 17:31:57 -04:00
multi_gt_link ) {
2023-07-31 17:30:02 +02:00
q - > ops - > kill ( eq ) ;
exec_queue_kill_compute ( eq ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
q - > ops - > kill ( q ) ;
exec_queue_kill_compute ( q ) ;
2023-03-30 17:31:57 -04:00
}
2023-07-31 17:30:02 +02:00
int xe_exec_queue_destroy_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file )
2023-03-30 17:31:57 -04:00
{
struct xe_device * xe = to_xe_device ( dev ) ;
struct xe_file * xef = to_xe_file ( file ) ;
2023-07-31 17:30:02 +02:00
struct drm_xe_exec_queue_destroy * args = data ;
struct xe_exec_queue * q ;
2023-03-30 17:31:57 -04:00
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , args - > pad ) | |
XE_IOCTL_DBG ( xe , args - > reserved [ 0 ] | | args - > reserved [ 1 ] ) )
2023-03-30 17:31:57 -04:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
mutex_lock ( & xef - > exec_queue . lock ) ;
q = xa_erase ( & xef - > exec_queue . xa , args - > exec_queue_id ) ;
mutex_unlock ( & xef - > exec_queue . lock ) ;
if ( XE_IOCTL_DBG ( xe , ! q ) )
2023-03-30 17:31:57 -04:00
return - ENOENT ;
2023-07-31 17:30:02 +02:00
if ( ! ( q - > flags & EXEC_QUEUE_FLAG_PERSISTENT ) )
xe_exec_queue_kill ( q ) ;
2023-03-30 17:31:57 -04:00
else
2023-07-31 17:30:02 +02:00
xe_device_add_persistent_exec_queues ( xe , q ) ;
2023-03-30 17:31:57 -04:00
2023-07-31 17:30:02 +02:00
trace_xe_exec_queue_close ( q ) ;
xe_exec_queue_put ( q ) ;
2023-03-30 17:31:57 -04:00
return 0 ;
}
2023-07-31 17:30:02 +02:00
int xe_exec_queue_set_property_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file )
2023-03-30 17:31:57 -04:00
{
struct xe_device * xe = to_xe_device ( dev ) ;
struct xe_file * xef = to_xe_file ( file ) ;
2023-07-31 17:30:02 +02:00
struct drm_xe_exec_queue_set_property * args = data ;
struct xe_exec_queue * q ;
2023-03-30 17:31:57 -04:00
int ret ;
u32 idx ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , args - > reserved [ 0 ] | | args - > reserved [ 1 ] ) )
2023-05-24 18:56:07 -07:00
return - EINVAL ;
2023-07-31 17:30:02 +02:00
q = xe_exec_queue_lookup ( xef , args - > exec_queue_id ) ;
if ( XE_IOCTL_DBG ( xe , ! q ) )
2023-03-30 17:31:57 -04:00
return - ENOENT ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , args - > property > =
2023-07-31 17:30:02 +02:00
ARRAY_SIZE ( exec_queue_set_property_funcs ) ) ) {
2023-03-30 17:31:57 -04:00
ret = - EINVAL ;
goto out ;
}
idx = array_index_nospec ( args - > property ,
2023-07-31 17:30:02 +02:00
ARRAY_SIZE ( exec_queue_set_property_funcs ) ) ;
ret = exec_queue_set_property_funcs [ idx ] ( xe , q , args - > value , false ) ;
2023-07-17 10:20:18 +02:00
if ( XE_IOCTL_DBG ( xe , ret ) )
2023-03-30 17:31:57 -04:00
goto out ;
if ( args - > extensions )
2023-07-31 17:30:02 +02:00
ret = exec_queue_user_extensions ( xe , q , args - > extensions , 0 ,
false ) ;
2023-03-30 17:31:57 -04:00
out :
2023-07-31 17:30:02 +02:00
xe_exec_queue_put ( q ) ;
2023-03-30 17:31:57 -04:00
return ret ;
}