2016-05-01 02:48:25 -04:00
/*
* Copyright ( c ) 2011 - 2016 Intel Corporation . All rights reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
* Authors :
* Zhiyuan Lv < zhiyuan . lv @ intel . com >
* Zhi Wang < zhi . a . wang @ intel . com >
*
* Contributors :
* Min He < min . he @ intel . com >
* Bing Niu < bing . niu @ intel . com >
* Ping Gao < ping . a . gao @ intel . com >
* Tina Zhang < tina . zhang @ intel . com >
*
*/
# include "i915_drv.h"
2016-10-20 17:15:03 +08:00
# include "gvt.h"
2016-05-01 02:48:25 -04:00
# define _EL_OFFSET_STATUS 0x234
# define _EL_OFFSET_STATUS_BUF 0x370
# define _EL_OFFSET_STATUS_PTR 0x3A0
2020-03-04 11:23:06 +08:00
# define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
2016-05-01 02:48:25 -04:00
# define valid_context(ctx) ((ctx)->valid)
# define same_context(a, b) (((a)->context_id == (b)->context_id) && \
( ( a ) - > lrca = = ( b ) - > lrca ) )
static int context_switch_events [ ] = {
2019-03-05 18:03:30 +00:00
[ RCS0 ] = RCS_AS_CONTEXT_SWITCH ,
[ BCS0 ] = BCS_AS_CONTEXT_SWITCH ,
[ VCS0 ] = VCS_AS_CONTEXT_SWITCH ,
[ VCS1 ] = VCS2_AS_CONTEXT_SWITCH ,
[ VECS0 ] = VECS_AS_CONTEXT_SWITCH ,
2016-05-01 02:48:25 -04:00
} ;
2020-03-04 11:23:06 +08:00
static int to_context_switch_event ( const struct intel_engine_cs * engine )
2016-05-01 02:48:25 -04:00
{
2020-03-04 11:23:06 +08:00
if ( WARN_ON ( engine - > id > = ARRAY_SIZE ( context_switch_events ) ) )
2016-05-01 02:48:25 -04:00
return - EINVAL ;
2020-03-04 11:23:06 +08:00
return context_switch_events [ engine - > id ] ;
2016-05-01 02:48:25 -04:00
}
static void switch_virtual_execlist_slot ( struct intel_vgpu_execlist * execlist )
{
gvt_dbg_el ( " [before] running slot %d/context %x pending slot %d \n " ,
execlist - > running_slot ?
execlist - > running_slot - > index : - 1 ,
execlist - > running_context ?
execlist - > running_context - > context_id : 0 ,
execlist - > pending_slot ?
execlist - > pending_slot - > index : - 1 ) ;
execlist - > running_slot = execlist - > pending_slot ;
execlist - > pending_slot = NULL ;
execlist - > running_context = execlist - > running_context ?
& execlist - > running_slot - > ctx [ 0 ] : NULL ;
gvt_dbg_el ( " [after] running slot %d/context %x pending slot %d \n " ,
execlist - > running_slot ?
execlist - > running_slot - > index : - 1 ,
execlist - > running_context ?
execlist - > running_context - > context_id : 0 ,
execlist - > pending_slot ?
execlist - > pending_slot - > index : - 1 ) ;
}
static void emulate_execlist_status ( struct intel_vgpu_execlist * execlist )
{
struct intel_vgpu_execlist_slot * running = execlist - > running_slot ;
struct intel_vgpu_execlist_slot * pending = execlist - > pending_slot ;
struct execlist_ctx_descriptor_format * desc = execlist - > running_context ;
struct intel_vgpu * vgpu = execlist - > vgpu ;
struct execlist_status_format status ;
2020-03-04 11:23:06 +08:00
u32 status_reg =
execlist_ring_mmio ( execlist - > engine , _EL_OFFSET_STATUS ) ;
2016-05-01 02:48:25 -04:00
status . ldw = vgpu_vreg ( vgpu , status_reg ) ;
status . udw = vgpu_vreg ( vgpu , status_reg + 4 ) ;
if ( running ) {
status . current_execlist_pointer = ! ! running - > index ;
status . execlist_write_pointer = ! ! ! running - > index ;
status . execlist_0_active = status . execlist_0_valid =
! ! ! ( running - > index ) ;
status . execlist_1_active = status . execlist_1_valid =
! ! ( running - > index ) ;
} else {
status . context_id = 0 ;
status . execlist_0_active = status . execlist_0_valid = 0 ;
status . execlist_1_active = status . execlist_1_valid = 0 ;
}
status . context_id = desc ? desc - > context_id : 0 ;
status . execlist_queue_full = ! ! ( pending ) ;
vgpu_vreg ( vgpu , status_reg ) = status . ldw ;
vgpu_vreg ( vgpu , status_reg + 4 ) = status . udw ;
gvt_dbg_el ( " vgpu%d: status reg offset %x ldw %x udw %x \n " ,
vgpu - > id , status_reg , status . ldw , status . udw ) ;
}
static void emulate_csb_update ( struct intel_vgpu_execlist * execlist ,
2020-03-04 11:23:06 +08:00
struct execlist_context_status_format * status ,
bool trigger_interrupt_later )
2016-05-01 02:48:25 -04:00
{
struct intel_vgpu * vgpu = execlist - > vgpu ;
struct execlist_context_status_pointer_format ctx_status_ptr ;
u32 write_pointer ;
u32 ctx_status_ptr_reg , ctx_status_buf_reg , offset ;
2017-10-20 15:16:46 +08:00
unsigned long hwsp_gpa ;
2016-05-01 02:48:25 -04:00
2020-03-04 11:23:06 +08:00
ctx_status_ptr_reg =
execlist_ring_mmio ( execlist - > engine , _EL_OFFSET_STATUS_PTR ) ;
ctx_status_buf_reg =
execlist_ring_mmio ( execlist - > engine , _EL_OFFSET_STATUS_BUF ) ;
2016-05-01 02:48:25 -04:00
ctx_status_ptr . dw = vgpu_vreg ( vgpu , ctx_status_ptr_reg ) ;
write_pointer = ctx_status_ptr . write_ptr ;
if ( write_pointer = = 0x7 )
write_pointer = 0 ;
else {
+ + write_pointer ;
write_pointer % = 0x6 ;
}
offset = ctx_status_buf_reg + write_pointer * 8 ;
vgpu_vreg ( vgpu , offset ) = status - > ldw ;
vgpu_vreg ( vgpu , offset + 4 ) = status - > udw ;
ctx_status_ptr . write_ptr = write_pointer ;
vgpu_vreg ( vgpu , ctx_status_ptr_reg ) = ctx_status_ptr . dw ;
2017-10-20 15:16:46 +08:00
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa ( vgpu - > gtt . ggtt_mm ,
2020-03-04 11:23:06 +08:00
vgpu - > hws_pga [ execlist - > engine - > id ] ) ;
2017-10-20 15:16:46 +08:00
if ( hwsp_gpa ! = INTEL_GVT_INVALID_ADDR ) {
intel_gvt_hypervisor_write_gpa ( vgpu ,
2020-03-04 11:23:06 +08:00
hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8 ,
status , 8 ) ;
2017-10-20 15:16:46 +08:00
intel_gvt_hypervisor_write_gpa ( vgpu ,
2020-03-04 11:23:06 +08:00
hwsp_gpa + intel_hws_csb_write_index ( execlist - > engine - > i915 ) * 4 ,
& write_pointer , 4 ) ;
2017-10-20 15:16:46 +08:00
}
2016-05-01 02:48:25 -04:00
gvt_dbg_el ( " vgpu%d: w pointer %u reg %x csb l %x csb h %x \n " ,
2020-03-04 11:23:06 +08:00
vgpu - > id , write_pointer , offset , status - > ldw , status - > udw ) ;
2016-05-01 02:48:25 -04:00
if ( trigger_interrupt_later )
return ;
intel_vgpu_trigger_virtual_event ( vgpu ,
2020-03-04 11:23:06 +08:00
to_context_switch_event ( execlist - > engine ) ) ;
2016-05-01 02:48:25 -04:00
}
2016-05-01 05:22:47 -04:00
static int emulate_execlist_ctx_schedule_out (
2016-05-01 02:48:25 -04:00
struct intel_vgpu_execlist * execlist ,
struct execlist_ctx_descriptor_format * ctx )
{
2017-03-10 04:26:53 -05:00
struct intel_vgpu * vgpu = execlist - > vgpu ;
2016-05-01 02:48:25 -04:00
struct intel_vgpu_execlist_slot * running = execlist - > running_slot ;
struct intel_vgpu_execlist_slot * pending = execlist - > pending_slot ;
struct execlist_ctx_descriptor_format * ctx0 = & running - > ctx [ 0 ] ;
struct execlist_ctx_descriptor_format * ctx1 = & running - > ctx [ 1 ] ;
struct execlist_context_status_format status ;
memset ( & status , 0 , sizeof ( status ) ) ;
gvt_dbg_el ( " schedule out context id %x \n " , ctx - > context_id ) ;
if ( WARN_ON ( ! same_context ( ctx , execlist - > running_context ) ) ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " schedule out context is not running context, "
2016-05-01 02:48:25 -04:00
" ctx id %x running ctx id %x \n " ,
ctx - > context_id ,
execlist - > running_context - > context_id ) ;
return - EINVAL ;
}
/* ctx1 is valid, ctx0/ctx is scheduled-out -> element switch */
if ( valid_context ( ctx1 ) & & same_context ( ctx0 , ctx ) ) {
gvt_dbg_el ( " ctx 1 valid, ctx/ctx 0 is scheduled-out \n " ) ;
execlist - > running_context = ctx1 ;
emulate_execlist_status ( execlist ) ;
status . context_complete = status . element_switch = 1 ;
status . context_id = ctx - > context_id ;
emulate_csb_update ( execlist , & status , false ) ;
/*
* ctx1 is not valid , ctx = = ctx0
* ctx1 is valid , ctx1 = = ctx
* - - > last element is finished
* emulate :
* active - to - idle if there is * no * pending execlist
* context - complete if there * is * pending execlist
*/
} else if ( ( ! valid_context ( ctx1 ) & & same_context ( ctx0 , ctx ) )
| | ( valid_context ( ctx1 ) & & same_context ( ctx1 , ctx ) ) ) {
gvt_dbg_el ( " need to switch virtual execlist slot \n " ) ;
switch_virtual_execlist_slot ( execlist ) ;
emulate_execlist_status ( execlist ) ;
status . context_complete = status . active_to_idle = 1 ;
status . context_id = ctx - > context_id ;
if ( ! pending ) {
emulate_csb_update ( execlist , & status , false ) ;
} else {
emulate_csb_update ( execlist , & status , true ) ;
memset ( & status , 0 , sizeof ( status ) ) ;
status . idle_to_active = 1 ;
status . context_id = 0 ;
emulate_csb_update ( execlist , & status , false ) ;
}
} else {
WARN_ON ( 1 ) ;
return - EINVAL ;
}
return 0 ;
}
static struct intel_vgpu_execlist_slot * get_next_execlist_slot (
struct intel_vgpu_execlist * execlist )
{
struct intel_vgpu * vgpu = execlist - > vgpu ;
2020-03-04 11:23:06 +08:00
u32 status_reg =
execlist_ring_mmio ( execlist - > engine , _EL_OFFSET_STATUS ) ;
2016-05-01 02:48:25 -04:00
struct execlist_status_format status ;
status . ldw = vgpu_vreg ( vgpu , status_reg ) ;
status . udw = vgpu_vreg ( vgpu , status_reg + 4 ) ;
if ( status . execlist_queue_full ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " virtual execlist slots are full \n " ) ;
2016-05-01 02:48:25 -04:00
return NULL ;
}
return & execlist - > slot [ status . execlist_write_pointer ] ;
}
2016-05-01 05:22:47 -04:00
static int emulate_execlist_schedule_in ( struct intel_vgpu_execlist * execlist ,
2016-05-01 02:48:25 -04:00
struct execlist_ctx_descriptor_format ctx [ 2 ] )
{
struct intel_vgpu_execlist_slot * running = execlist - > running_slot ;
struct intel_vgpu_execlist_slot * slot =
get_next_execlist_slot ( execlist ) ;
struct execlist_ctx_descriptor_format * ctx0 , * ctx1 ;
struct execlist_context_status_format status ;
2017-03-10 04:26:53 -05:00
struct intel_vgpu * vgpu = execlist - > vgpu ;
2016-05-01 02:48:25 -04:00
gvt_dbg_el ( " emulate schedule-in \n " ) ;
if ( ! slot ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " no available execlist slot \n " ) ;
2016-05-01 02:48:25 -04:00
return - EINVAL ;
}
memset ( & status , 0 , sizeof ( status ) ) ;
memset ( slot - > ctx , 0 , sizeof ( slot - > ctx ) ) ;
slot - > ctx [ 0 ] = ctx [ 0 ] ;
slot - > ctx [ 1 ] = ctx [ 1 ] ;
gvt_dbg_el ( " alloc slot index %d ctx 0 %x ctx 1 %x \n " ,
slot - > index , ctx [ 0 ] . context_id ,
ctx [ 1 ] . context_id ) ;
/*
* no running execlist , make this write bundle as running execlist
* - > idle - to - active
*/
if ( ! running ) {
gvt_dbg_el ( " no current running execlist \n " ) ;
execlist - > running_slot = slot ;
execlist - > pending_slot = NULL ;
execlist - > running_context = & slot - > ctx [ 0 ] ;
gvt_dbg_el ( " running slot index %d running context %x \n " ,
execlist - > running_slot - > index ,
execlist - > running_context - > context_id ) ;
emulate_execlist_status ( execlist ) ;
status . idle_to_active = 1 ;
status . context_id = 0 ;
emulate_csb_update ( execlist , & status , false ) ;
return 0 ;
}
ctx0 = & running - > ctx [ 0 ] ;
ctx1 = & running - > ctx [ 1 ] ;
gvt_dbg_el ( " current running slot index %d ctx 0 %x ctx 1 %x \n " ,
running - > index , ctx0 - > context_id , ctx1 - > context_id ) ;
/*
* already has an running execlist
* a . running ctx1 is valid ,
* ctx0 is finished , and running ctx1 = = new execlist ctx [ 0 ]
* b . running ctx1 is not valid ,
* ctx0 = = new execlist ctx [ 0 ]
* - - - - > lite - restore + preempted
*/
if ( ( valid_context ( ctx1 ) & & same_context ( ctx1 , & slot - > ctx [ 0 ] ) & &
/* condition a */
( ! same_context ( ctx0 , execlist - > running_context ) ) ) | |
( ! valid_context ( ctx1 ) & &
same_context ( ctx0 , & slot - > ctx [ 0 ] ) ) ) { /* condition b */
gvt_dbg_el ( " need to switch virtual execlist slot \n " ) ;
execlist - > pending_slot = slot ;
switch_virtual_execlist_slot ( execlist ) ;
emulate_execlist_status ( execlist ) ;
status . lite_restore = status . preempted = 1 ;
status . context_id = ctx [ 0 ] . context_id ;
emulate_csb_update ( execlist , & status , false ) ;
} else {
gvt_dbg_el ( " emulate as pending slot \n " ) ;
/*
* otherwise
* - - > emulate pending execlist exist + but no preemption case
*/
execlist - > pending_slot = slot ;
emulate_execlist_status ( execlist ) ;
}
return 0 ;
}
2016-05-01 05:22:47 -04:00
# define get_desc_from_elsp_dwords(ed, i) \
( ( struct execlist_ctx_descriptor_format * ) & ( ( ed ) - > data [ i * 2 ] ) )
2017-08-18 15:41:08 +08:00
static int prepare_execlist_workload ( struct intel_vgpu_workload * workload )
{
struct intel_vgpu * vgpu = workload - > vgpu ;
2017-09-10 21:15:18 +08:00
struct intel_vgpu_submission * s = & vgpu - > submission ;
2017-08-18 15:41:08 +08:00
struct execlist_ctx_descriptor_format ctx [ 2 ] ;
int ret ;
if ( ! workload - > emulate_schedule_in )
return 0 ;
2017-09-10 16:40:04 +08:00
ctx [ 0 ] = * get_desc_from_elsp_dwords ( & workload - > elsp_dwords , 0 ) ;
ctx [ 1 ] = * get_desc_from_elsp_dwords ( & workload - > elsp_dwords , 1 ) ;
2017-08-18 15:41:08 +08:00
2020-03-04 11:23:06 +08:00
ret = emulate_execlist_schedule_in ( & s - > execlist [ workload - > engine - > id ] ,
ctx ) ;
2017-09-12 22:06:39 +08:00
if ( ret ) {
2017-08-18 15:41:08 +08:00
gvt_vgpu_err ( " fail to emulate execlist schedule in \n " ) ;
2017-09-12 22:06:39 +08:00
return ret ;
}
return 0 ;
2017-08-18 15:41:08 +08:00
}
2016-05-01 05:22:47 -04:00
static int complete_execlist_workload ( struct intel_vgpu_workload * workload )
{
struct intel_vgpu * vgpu = workload - > vgpu ;
2017-09-10 21:15:18 +08:00
struct intel_vgpu_submission * s = & vgpu - > submission ;
2020-03-04 11:23:06 +08:00
struct intel_vgpu_execlist * execlist =
& s - > execlist [ workload - > engine - > id ] ;
2016-05-01 05:22:47 -04:00
struct intel_vgpu_workload * next_workload ;
2020-03-04 11:23:06 +08:00
struct list_head * next = workload_q_head ( vgpu , workload - > engine ) - > next ;
2016-05-01 05:22:47 -04:00
bool lite_restore = false ;
2017-09-19 15:11:29 +08:00
int ret = 0 ;
2016-05-01 05:22:47 -04:00
2020-03-04 11:23:06 +08:00
gvt_dbg_el ( " complete workload %p status %d \n " ,
workload , workload - > status ) ;
2016-05-01 05:22:47 -04:00
2020-03-04 11:23:06 +08:00
if ( workload - > status | | vgpu - > resetting_eng & workload - > engine - > mask )
2016-05-01 05:22:47 -04:00
goto out ;
2020-03-04 11:23:06 +08:00
if ( ! list_empty ( workload_q_head ( vgpu , workload - > engine ) ) ) {
2016-05-01 05:22:47 -04:00
struct execlist_ctx_descriptor_format * this_desc , * next_desc ;
next_workload = container_of ( next ,
struct intel_vgpu_workload , list ) ;
this_desc = & workload - > ctx_desc ;
next_desc = & next_workload - > ctx_desc ;
lite_restore = same_context ( this_desc , next_desc ) ;
}
if ( lite_restore ) {
gvt_dbg_el ( " next context == current - no schedule-out \n " ) ;
2017-09-19 15:11:29 +08:00
goto out ;
2016-05-01 05:22:47 -04:00
}
ret = emulate_execlist_ctx_schedule_out ( execlist , & workload - > ctx_desc ) ;
out :
return ret ;
}
2020-03-04 11:23:06 +08:00
static int submit_context ( struct intel_vgpu * vgpu ,
const struct intel_engine_cs * engine ,
struct execlist_ctx_descriptor_format * desc ,
bool emulate_schedule_in )
2016-05-01 05:22:47 -04:00
{
2017-09-10 21:15:18 +08:00
struct intel_vgpu_submission * s = & vgpu - > submission ;
2016-05-01 05:22:47 -04:00
struct intel_vgpu_workload * workload = NULL ;
2020-03-04 11:23:06 +08:00
workload = intel_vgpu_create_workload ( vgpu , engine , desc ) ;
2017-09-12 21:42:09 +08:00
if ( IS_ERR ( workload ) )
return PTR_ERR ( workload ) ;
2016-05-01 05:22:47 -04:00
workload - > prepare = prepare_execlist_workload ;
workload - > complete = complete_execlist_workload ;
workload - > emulate_schedule_in = emulate_schedule_in ;
if ( emulate_schedule_in )
2020-03-04 11:23:06 +08:00
workload - > elsp_dwords = s - > execlist [ engine - > id ] . elsp_dwords ;
2016-05-01 05:22:47 -04:00
gvt_dbg_el ( " workload %p emulate schedule_in %d \n " , workload ,
2020-03-04 11:23:06 +08:00
emulate_schedule_in ) ;
2016-05-01 05:22:47 -04:00
2017-11-29 15:40:06 +08:00
intel_vgpu_queue_workload ( workload ) ;
2017-09-12 22:33:12 +08:00
return 0 ;
2016-05-01 05:22:47 -04:00
}
2020-03-04 11:23:06 +08:00
int intel_vgpu_submit_execlist ( struct intel_vgpu * vgpu ,
const struct intel_engine_cs * engine )
2016-05-01 05:22:47 -04:00
{
2017-09-10 21:15:18 +08:00
struct intel_vgpu_submission * s = & vgpu - > submission ;
2020-03-04 11:23:06 +08:00
struct intel_vgpu_execlist * execlist = & s - > execlist [ engine - > id ] ;
2017-09-10 16:40:04 +08:00
struct execlist_ctx_descriptor_format * desc [ 2 ] ;
2017-05-04 18:36:54 +08:00
int i , ret ;
2016-05-01 05:22:47 -04:00
2017-09-10 16:40:04 +08:00
desc [ 0 ] = get_desc_from_elsp_dwords ( & execlist - > elsp_dwords , 0 ) ;
desc [ 1 ] = get_desc_from_elsp_dwords ( & execlist - > elsp_dwords , 1 ) ;
2016-05-01 05:22:47 -04:00
2017-09-10 16:40:04 +08:00
if ( ! desc [ 0 ] - > valid ) {
2017-05-04 18:36:54 +08:00
gvt_vgpu_err ( " invalid elsp submission, desc0 is invalid \n " ) ;
goto inv_desc ;
}
2016-05-01 05:22:47 -04:00
2017-05-04 18:36:54 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( desc ) ; i + + ) {
2017-09-10 16:40:04 +08:00
if ( ! desc [ i ] - > valid )
2016-05-01 05:22:47 -04:00
continue ;
2017-09-10 16:40:04 +08:00
if ( ! desc [ i ] - > privilege_access ) {
2017-03-10 04:26:53 -05:00
gvt_vgpu_err ( " unexpected GGTT elsp submission \n " ) ;
2017-05-04 18:36:54 +08:00
goto inv_desc ;
2016-05-01 05:22:47 -04:00
}
}
/* submit workload */
2017-05-04 18:36:54 +08:00
for ( i = 0 ; i < ARRAY_SIZE ( desc ) ; i + + ) {
2017-09-10 16:40:04 +08:00
if ( ! desc [ i ] - > valid )
2017-05-04 18:36:54 +08:00
continue ;
2020-03-04 11:23:06 +08:00
ret = submit_context ( vgpu , engine , desc [ i ] , i = = 0 ) ;
2016-05-01 05:22:47 -04:00
if ( ret ) {
2017-05-04 18:36:54 +08:00
gvt_vgpu_err ( " failed to submit desc %d \n " , i ) ;
2016-05-01 05:22:47 -04:00
return ret ;
}
}
2017-05-04 18:36:54 +08:00
2016-05-01 05:22:47 -04:00
return 0 ;
2017-05-04 18:36:54 +08:00
inv_desc :
gvt_vgpu_err ( " descriptors content: desc0 %08x %08x desc1 %08x %08x \n " ,
2017-09-10 16:40:04 +08:00
desc [ 0 ] - > udw , desc [ 0 ] - > ldw , desc [ 1 ] - > udw , desc [ 1 ] - > ldw ) ;
2017-05-04 18:36:54 +08:00
return - EINVAL ;
2016-05-01 05:22:47 -04:00
}
2020-03-04 11:23:06 +08:00
static void init_vgpu_execlist ( struct intel_vgpu * vgpu ,
const struct intel_engine_cs * engine )
2016-05-01 02:48:25 -04:00
{
2017-09-10 21:15:18 +08:00
struct intel_vgpu_submission * s = & vgpu - > submission ;
2020-03-04 11:23:06 +08:00
struct intel_vgpu_execlist * execlist = & s - > execlist [ engine - > id ] ;
2016-05-01 02:48:25 -04:00
struct execlist_context_status_pointer_format ctx_status_ptr ;
u32 ctx_status_ptr_reg ;
memset ( execlist , 0 , sizeof ( * execlist ) ) ;
execlist - > vgpu = vgpu ;
2020-03-04 11:23:06 +08:00
execlist - > engine = engine ;
2016-05-01 02:48:25 -04:00
execlist - > slot [ 0 ] . index = 0 ;
execlist - > slot [ 1 ] . index = 1 ;
2020-03-04 11:23:06 +08:00
ctx_status_ptr_reg = execlist_ring_mmio ( engine , _EL_OFFSET_STATUS_PTR ) ;
2016-05-01 02:48:25 -04:00
ctx_status_ptr . dw = vgpu_vreg ( vgpu , ctx_status_ptr_reg ) ;
2017-04-06 11:01:45 +08:00
ctx_status_ptr . read_ptr = 0 ;
ctx_status_ptr . write_ptr = 0x7 ;
2016-05-01 02:48:25 -04:00
vgpu_vreg ( vgpu , ctx_status_ptr_reg ) = ctx_status_ptr . dw ;
}
2019-04-01 17:26:39 +01:00
static void clean_execlist ( struct intel_vgpu * vgpu ,
intel_engine_mask_t engine_mask )
2016-05-01 05:22:47 -04:00
{
2020-03-06 10:08:10 +08:00
struct drm_i915_private * dev_priv = vgpu - > gvt - > gt - > i915 ;
2017-08-18 15:41:06 +08:00
struct intel_engine_cs * engine ;
2018-01-26 15:09:07 +08:00
struct intel_vgpu_submission * s = & vgpu - > submission ;
2019-04-01 17:26:39 +01:00
intel_engine_mask_t tmp ;
2017-08-18 15:41:06 +08:00
2019-10-17 17:18:52 +01:00
for_each_engine_masked ( engine , & dev_priv - > gt , engine_mask , tmp ) {
2018-01-26 15:09:07 +08:00
kfree ( s - > ring_scan_buffer [ engine - > id ] ) ;
s - > ring_scan_buffer [ engine - > id ] = NULL ;
s - > ring_scan_buffer_size [ engine - > id ] = 0 ;
2017-08-18 15:41:06 +08:00
}
2016-05-01 05:22:47 -04:00
}
2017-11-20 13:29:58 +00:00
static void reset_execlist ( struct intel_vgpu * vgpu ,
2019-04-01 17:26:39 +01:00
intel_engine_mask_t engine_mask )
2016-05-01 07:42:16 -04:00
{
2020-03-06 10:08:10 +08:00
struct drm_i915_private * dev_priv = vgpu - > gvt - > gt - > i915 ;
2016-11-11 16:33:06 +08:00
struct intel_engine_cs * engine ;
2019-04-01 17:26:39 +01:00
intel_engine_mask_t tmp ;
2016-05-01 07:42:16 -04:00
2019-10-17 17:18:52 +01:00
for_each_engine_masked ( engine , & dev_priv - > gt , engine_mask , tmp )
2020-03-04 11:23:06 +08:00
init_vgpu_execlist ( vgpu , engine ) ;
2016-05-01 07:42:16 -04:00
}
2017-09-13 00:31:29 +08:00
2018-01-26 15:09:07 +08:00
static int init_execlist ( struct intel_vgpu * vgpu ,
2019-04-01 17:26:39 +01:00
intel_engine_mask_t engine_mask )
2017-09-13 01:41:35 +08:00
{
2018-01-26 15:09:07 +08:00
reset_execlist ( vgpu , engine_mask ) ;
2017-09-13 01:41:35 +08:00
return 0 ;
}
2017-09-13 00:31:29 +08:00
const struct intel_vgpu_submission_ops intel_vgpu_execlist_submission_ops = {
. name = " execlist " ,
. init = init_execlist ,
2017-09-13 01:41:35 +08:00
. reset = reset_execlist ,
2017-09-13 00:31:29 +08:00
. clean = clean_execlist ,
} ;