2016-05-01 05:22:47 -04:00
/*
* Copyright ( c ) 2011 - 2016 Intel Corporation . All rights reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
2016-05-01 07:42:16 -04:00
*
* Authors :
* Zhi Wang < zhi . a . wang @ intel . com >
*
* Contributors :
* Ping Gao < ping . a . gao @ intel . com >
* Tina Zhang < tina . zhang @ intel . com >
* Chanbin Du < changbin . du @ intel . com >
* Min He < min . he @ intel . com >
* Bing Niu < bing . niu @ intel . com >
* Zhenyu Wang < zhenyuw @ linux . intel . com >
*
2016-05-01 05:22:47 -04:00
*/
# ifndef _GVT_SCHEDULER_H_
# define _GVT_SCHEDULER_H_
2020-12-08 12:29:13 +02:00
# include "gt/intel_engine_types.h"
# include "execlist.h"
# include "interrupt.h"
2016-05-01 05:22:47 -04:00
struct intel_gvt_workload_scheduler {
2016-05-01 07:42:16 -04:00
struct intel_vgpu * current_vgpu ;
struct intel_vgpu * next_vgpu ;
struct intel_vgpu_workload * current_workload [ I915_NUM_ENGINES ] ;
bool need_reschedule ;
2017-05-04 10:52:38 +08:00
spinlock_t mmio_context_lock ;
/* can be null when owner is host */
struct intel_vgpu * engine_owner [ I915_NUM_ENGINES ] ;
2016-05-01 07:42:16 -04:00
wait_queue_head_t workload_complete_wq ;
struct task_struct * thread [ I915_NUM_ENGINES ] ;
wait_queue_head_t waitq [ I915_NUM_ENGINES ] ;
2016-05-01 17:09:58 -04:00
void * sched_data ;
2021-12-04 11:55:22 +01:00
const struct intel_gvt_sched_policy_ops * sched_ops ;
2016-05-01 05:22:47 -04:00
} ;
2016-05-03 18:26:57 -04:00
# define INDIRECT_CTX_ADDR_MASK 0xffffffc0
# define INDIRECT_CTX_SIZE_MASK 0x3f
struct shadow_indirect_ctx {
struct drm_i915_gem_object * obj ;
unsigned long guest_gma ;
unsigned long shadow_gma ;
void * shadow_va ;
2019-01-21 11:51:41 +02:00
u32 size ;
2016-05-03 18:26:57 -04:00
} ;
# define PER_CTX_ADDR_MASK 0xfffff000
struct shadow_per_ctx {
unsigned long guest_gma ;
unsigned long shadow_gma ;
2017-10-19 13:54:06 +08:00
unsigned valid ;
2016-05-03 18:26:57 -04:00
} ;
struct intel_shadow_wa_ctx {
struct shadow_indirect_ctx indirect_ctx ;
struct shadow_per_ctx per_ctx ;
} ;
2016-05-01 05:22:47 -04:00
struct intel_vgpu_workload {
struct intel_vgpu * vgpu ;
2020-03-04 11:23:06 +08:00
const struct intel_engine_cs * engine ;
2018-02-21 09:56:36 +00:00
struct i915_request * req ;
2016-05-01 05:22:47 -04:00
/* if this workload has been dispatched to i915? */
bool dispatched ;
2018-12-29 11:13:10 +08:00
bool shadow ; /* if workload has done shadow of guest request */
2016-05-01 05:22:47 -04:00
int status ;
struct intel_vgpu_mm * shadow_mm ;
2020-05-08 11:14:09 +08:00
struct list_head lri_shadow_mm ; /* For PPGTT load cmd */
2016-05-01 05:22:47 -04:00
/* different submission model may need different handler */
int ( * prepare ) ( struct intel_vgpu_workload * ) ;
int ( * complete ) ( struct intel_vgpu_workload * ) ;
struct list_head list ;
2016-05-03 18:26:57 -04:00
DECLARE_BITMAP ( pending_events , INTEL_GVT_EVENT_MAX ) ;
void * shadow_ring_buffer_va ;
2016-05-01 05:22:47 -04:00
/* execlist context information */
struct execlist_ctx_descriptor_format ctx_desc ;
struct execlist_ring_context * ring_context ;
2016-05-03 18:26:57 -04:00
unsigned long rb_head , rb_tail , rb_ctl , rb_start , rb_len ;
2019-06-03 10:55:53 +08:00
unsigned long guest_rb_head ;
2016-05-01 07:42:16 -04:00
bool restore_inhibit ;
2016-05-01 05:22:47 -04:00
struct intel_vgpu_elsp_dwords elsp_dwords ;
bool emulate_schedule_in ;
atomic_t shadow_ctx_active ;
wait_queue_head_t shadow_ctx_status_wq ;
u64 ring_context_gpa ;
2016-05-03 18:26:57 -04:00
/* shadow batch buffer */
struct list_head shadow_bb ;
struct intel_shadow_wa_ctx wa_ctx ;
2018-03-02 10:00:25 +08:00
/* oa registers */
u32 oactxctrl ;
u32 flex_mmio [ 7 ] ;
2016-05-03 18:26:57 -04:00
} ;
2017-09-24 21:53:03 +08:00
struct intel_vgpu_shadow_bb {
2016-05-03 18:26:57 -04:00
struct list_head list ;
struct drm_i915_gem_object * obj ;
2017-09-24 21:53:03 +08:00
struct i915_vma * vma ;
2016-05-03 18:26:57 -04:00
void * va ;
2017-01-06 19:58:16 +00:00
u32 * bb_start_cmd_va ;
2018-03-15 13:21:10 +08:00
unsigned long bb_offset ;
2018-04-04 13:57:09 +08:00
bool ppgtt ;
2016-05-01 05:22:47 -04:00
} ;
2020-03-04 11:23:06 +08:00
# define workload_q_head(vgpu, e) \
( & ( vgpu ) - > submission . workload_q_head [ ( e ) - > id ] )
2016-05-01 05:22:47 -04:00
2017-11-29 15:40:06 +08:00
void intel_vgpu_queue_workload ( struct intel_vgpu_workload * workload ) ;
2016-05-01 07:42:16 -04:00
int intel_gvt_init_workload_scheduler ( struct intel_gvt * gvt ) ;
void intel_gvt_clean_workload_scheduler ( struct intel_gvt * gvt ) ;
void intel_gvt_wait_vgpu_idle ( struct intel_vgpu * vgpu ) ;
2017-09-10 20:08:18 +08:00
int intel_vgpu_setup_submission ( struct intel_vgpu * vgpu ) ;
2016-05-01 07:42:16 -04:00
2017-09-13 01:41:35 +08:00
void intel_vgpu_reset_submission ( struct intel_vgpu * vgpu ,
2019-04-01 17:26:39 +01:00
intel_engine_mask_t engine_mask ) ;
2017-09-13 01:41:35 +08:00
2017-09-10 20:08:18 +08:00
void intel_vgpu_clean_submission ( struct intel_vgpu * vgpu ) ;
2016-05-01 05:22:47 -04:00
2017-09-13 00:31:29 +08:00
int intel_vgpu_select_submission_ops ( struct intel_vgpu * vgpu ,
2019-04-01 17:26:39 +01:00
intel_engine_mask_t engine_mask ,
2017-09-13 00:31:29 +08:00
unsigned int interface ) ;
extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops ;
2017-09-12 21:42:09 +08:00
struct intel_vgpu_workload *
2020-03-04 11:23:06 +08:00
intel_vgpu_create_workload ( struct intel_vgpu * vgpu ,
const struct intel_engine_cs * engine ,
2017-09-12 22:33:12 +08:00
struct execlist_ctx_descriptor_format * desc ) ;
2017-09-12 21:42:09 +08:00
void intel_vgpu_destroy_workload ( struct intel_vgpu_workload * workload ) ;
2018-08-07 18:29:21 +08:00
void intel_vgpu_clean_workloads ( struct intel_vgpu * vgpu ,
2019-04-01 17:26:39 +01:00
intel_engine_mask_t engine_mask ) ;
2018-08-07 18:29:21 +08:00
2016-05-01 05:22:47 -04:00
# endif