2016-05-01 17:09:58 -04:00
/*
* Copyright ( c ) 2011 - 2016 Intel Corporation . All rights reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING FROM ,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE .
*
* Authors :
* Anhua Xu
* Kevin Tian < kevin . tian @ intel . com >
*
* Contributors :
* Min He < min . he @ intel . com >
* Bing Niu < bing . niu @ intel . com >
* Zhi Wang < zhi . a . wang @ intel . com >
*
*/
# include "i915_drv.h"
2016-10-20 17:15:03 +08:00
# include "gvt.h"
2016-05-01 17:09:58 -04:00
static bool vgpu_has_pending_workload ( struct intel_vgpu * vgpu )
{
2016-10-20 13:30:33 +08:00
enum intel_engine_id i ;
struct intel_engine_cs * engine ;
2016-05-01 17:09:58 -04:00
2016-10-20 13:30:33 +08:00
for_each_engine ( engine , vgpu - > gvt - > dev_priv , i ) {
2016-05-01 17:09:58 -04:00
if ( ! list_empty ( workload_q_head ( vgpu , i ) ) )
return true ;
}
return false ;
}
2018-08-30 11:33:43 +08:00
/* We give 2 seconds higher prio for vGPU during start */
# define GVT_SCHED_VGPU_PRI_TIME 2
2017-03-30 00:36:35 +08:00
struct vgpu_sched_data {
2017-03-30 00:36:36 +08:00
struct list_head lru_list ;
2017-03-30 00:36:35 +08:00
struct intel_vgpu * vgpu ;
2018-01-26 15:09:08 +08:00
bool active ;
2018-08-30 11:33:43 +08:00
bool pri_sched ;
ktime_t pri_time ;
2017-03-30 00:36:35 +08:00
ktime_t sched_in_time ;
ktime_t sched_time ;
ktime_t left_ts ;
ktime_t allocated_ts ;
struct vgpu_sched_ctl sched_ctl ;
} ;
struct gvt_sched_data {
struct intel_gvt * gvt ;
struct hrtimer timer ;
unsigned long period ;
2017-03-30 00:36:36 +08:00
struct list_head lru_runq_head ;
2018-04-04 08:43:52 +08:00
ktime_t expire_time ;
2017-03-30 00:36:35 +08:00
} ;
2018-04-04 08:43:53 +08:00
static void vgpu_update_timeslice ( struct intel_vgpu * vgpu , ktime_t cur_time )
2017-03-30 10:03:24 +08:00
{
ktime_t delta_ts ;
2018-04-04 08:43:53 +08:00
struct vgpu_sched_data * vgpu_data ;
2017-03-30 10:03:24 +08:00
2018-04-04 08:43:53 +08:00
if ( ! vgpu | | vgpu = = vgpu - > gvt - > idle_vgpu )
return ;
2017-03-30 10:03:24 +08:00
2018-04-04 08:43:53 +08:00
vgpu_data = vgpu - > sched_data ;
delta_ts = ktime_sub ( cur_time , vgpu_data - > sched_in_time ) ;
vgpu_data - > sched_time = ktime_add ( vgpu_data - > sched_time , delta_ts ) ;
vgpu_data - > left_ts = ktime_sub ( vgpu_data - > left_ts , delta_ts ) ;
vgpu_data - > sched_in_time = cur_time ;
2017-03-30 10:03:24 +08:00
}
# define GVT_TS_BALANCE_PERIOD_MS 100
# define GVT_TS_BALANCE_STAGE_NUM 10
static void gvt_balance_timeslice ( struct gvt_sched_data * sched_data )
{
struct vgpu_sched_data * vgpu_data ;
struct list_head * pos ;
static uint64_t stage_check ;
int stage = stage_check + + % GVT_TS_BALANCE_STAGE_NUM ;
/* The timeslice accumulation reset at stage 0, which is
* allocated again without adding previous debt .
*/
if ( stage = = 0 ) {
int total_weight = 0 ;
ktime_t fair_timeslice ;
list_for_each ( pos , & sched_data - > lru_runq_head ) {
vgpu_data = container_of ( pos , struct vgpu_sched_data , lru_list ) ;
total_weight + = vgpu_data - > sched_ctl . weight ;
}
list_for_each ( pos , & sched_data - > lru_runq_head ) {
vgpu_data = container_of ( pos , struct vgpu_sched_data , lru_list ) ;
2018-02-22 15:16:13 +08:00
fair_timeslice = ktime_divns ( ms_to_ktime ( GVT_TS_BALANCE_PERIOD_MS ) ,
total_weight ) * vgpu_data - > sched_ctl . weight ;
2017-03-30 10:03:24 +08:00
vgpu_data - > allocated_ts = fair_timeslice ;
vgpu_data - > left_ts = vgpu_data - > allocated_ts ;
}
} else {
list_for_each ( pos , & sched_data - > lru_runq_head ) {
vgpu_data = container_of ( pos , struct vgpu_sched_data , lru_list ) ;
/* timeslice for next 100ms should add the left/debt
* slice of previous stages .
*/
vgpu_data - > left_ts + = vgpu_data - > allocated_ts ;
}
}
}
2016-05-01 17:09:58 -04:00
static void try_to_schedule_next_vgpu ( struct intel_gvt * gvt )
{
struct intel_gvt_workload_scheduler * scheduler = & gvt - > scheduler ;
2016-10-20 13:30:33 +08:00
enum intel_engine_id i ;
struct intel_engine_cs * engine ;
2017-03-30 00:36:35 +08:00
struct vgpu_sched_data * vgpu_data ;
ktime_t cur_time ;
2016-05-01 17:09:58 -04:00
2017-04-19 14:02:31 +08:00
/* no need to schedule if next_vgpu is the same with current_vgpu,
* let scheduler chose next_vgpu again by setting it to NULL .
*/
if ( scheduler - > next_vgpu = = scheduler - > current_vgpu ) {
scheduler - > next_vgpu = NULL ;
2016-05-01 17:09:58 -04:00
return ;
2017-04-19 14:02:31 +08:00
}
2016-05-01 17:09:58 -04:00
/*
* after the flag is set , workload dispatch thread will
* stop dispatching workload for current vgpu
*/
scheduler - > need_reschedule = true ;
/* still have uncompleted workload? */
2016-10-20 13:30:33 +08:00
for_each_engine ( engine , gvt - > dev_priv , i ) {
2017-04-12 14:22:50 +08:00
if ( scheduler - > current_workload [ i ] )
2016-05-01 17:09:58 -04:00
return ;
}
2017-03-30 00:36:35 +08:00
cur_time = ktime_get ( ) ;
2018-04-04 08:43:53 +08:00
vgpu_update_timeslice ( scheduler - > current_vgpu , cur_time ) ;
2017-03-30 00:36:35 +08:00
vgpu_data = scheduler - > next_vgpu - > sched_data ;
vgpu_data - > sched_in_time = cur_time ;
2016-05-01 17:09:58 -04:00
/* switch current vgpu */
scheduler - > current_vgpu = scheduler - > next_vgpu ;
scheduler - > next_vgpu = NULL ;
scheduler - > need_reschedule = false ;
/* wake up workload dispatch thread */
2016-10-20 13:30:33 +08:00
for_each_engine ( engine , gvt - > dev_priv , i )
2016-05-01 17:09:58 -04:00
wake_up ( & scheduler - > waitq [ i ] ) ;
}
2017-03-30 00:36:36 +08:00
static struct intel_vgpu * find_busy_vgpu ( struct gvt_sched_data * sched_data )
2016-05-01 17:09:58 -04:00
{
2017-03-30 00:36:35 +08:00
struct vgpu_sched_data * vgpu_data ;
2016-05-01 17:09:58 -04:00
struct intel_vgpu * vgpu = NULL ;
2017-03-30 00:36:36 +08:00
struct list_head * head = & sched_data - > lru_runq_head ;
struct list_head * pos ;
2016-05-01 17:09:58 -04:00
/* search a vgpu with pending workload */
list_for_each ( pos , head ) {
2017-03-30 00:36:36 +08:00
vgpu_data = container_of ( pos , struct vgpu_sched_data , lru_list ) ;
2016-05-01 17:09:58 -04:00
if ( ! vgpu_has_pending_workload ( vgpu_data - > vgpu ) )
continue ;
2018-08-30 11:33:43 +08:00
if ( vgpu_data - > pri_sched ) {
if ( ktime_before ( ktime_get ( ) , vgpu_data - > pri_time ) ) {
vgpu = vgpu_data - > vgpu ;
break ;
} else
vgpu_data - > pri_sched = false ;
}
2017-03-30 00:36:40 +08:00
/* Return the vGPU only if it has time slice left */
if ( vgpu_data - > left_ts > 0 ) {
vgpu = vgpu_data - > vgpu ;
break ;
}
2016-05-01 17:09:58 -04:00
}
2017-03-30 00:36:36 +08:00
return vgpu ;
}
/* in nanosecond */
# define GVT_DEFAULT_TIME_SLICE 1000000
static void tbs_sched_func ( struct gvt_sched_data * sched_data )
{
struct intel_gvt * gvt = sched_data - > gvt ;
struct intel_gvt_workload_scheduler * scheduler = & gvt - > scheduler ;
struct vgpu_sched_data * vgpu_data ;
struct intel_vgpu * vgpu = NULL ;
2018-08-30 11:33:43 +08:00
2017-03-30 00:36:36 +08:00
/* no active vgpu or has already had a target */
if ( list_empty ( & sched_data - > lru_runq_head ) | | scheduler - > next_vgpu )
goto out ;
vgpu = find_busy_vgpu ( sched_data ) ;
2016-05-01 17:09:58 -04:00
if ( vgpu ) {
scheduler - > next_vgpu = vgpu ;
2017-03-30 00:36:36 +08:00
vgpu_data = vgpu - > sched_data ;
2018-08-30 11:33:43 +08:00
if ( ! vgpu_data - > pri_sched ) {
/* Move the last used vGPU to the tail of lru_list */
list_del_init ( & vgpu_data - > lru_list ) ;
list_add_tail ( & vgpu_data - > lru_list ,
& sched_data - > lru_runq_head ) ;
}
2017-03-30 00:36:40 +08:00
} else {
scheduler - > next_vgpu = gvt - > idle_vgpu ;
2016-05-01 17:09:58 -04:00
}
out :
2017-04-01 10:53:02 +08:00
if ( scheduler - > next_vgpu )
2016-05-01 17:09:58 -04:00
try_to_schedule_next_vgpu ( gvt ) ;
2017-03-30 00:36:34 +08:00
}
2016-05-01 17:09:58 -04:00
2017-03-30 00:36:34 +08:00
void intel_gvt_schedule ( struct intel_gvt * gvt )
{
2017-03-30 00:36:35 +08:00
struct gvt_sched_data * sched_data = gvt - > scheduler . sched_data ;
2018-04-04 08:43:53 +08:00
ktime_t cur_time ;
2016-05-01 17:09:58 -04:00
2018-05-19 12:28:55 +08:00
mutex_lock ( & gvt - > sched_lock ) ;
2018-04-04 08:43:53 +08:00
cur_time = ktime_get ( ) ;
2017-05-24 20:30:17 +08:00
if ( test_and_clear_bit ( INTEL_GVT_REQUEST_SCHED ,
( void * ) & gvt - > service_request ) ) {
2018-04-04 08:43:52 +08:00
if ( cur_time > = sched_data - > expire_time ) {
2017-05-24 20:30:17 +08:00
gvt_balance_timeslice ( sched_data ) ;
2018-04-04 08:43:52 +08:00
sched_data - > expire_time = ktime_add_ms (
cur_time , GVT_TS_BALANCE_PERIOD_MS ) ;
}
2017-05-24 20:30:17 +08:00
}
clear_bit ( INTEL_GVT_REQUEST_EVENT_SCHED , ( void * ) & gvt - > service_request ) ;
2018-04-04 08:43:53 +08:00
vgpu_update_timeslice ( gvt - > scheduler . current_vgpu , cur_time ) ;
2017-03-30 00:36:34 +08:00
tbs_sched_func ( sched_data ) ;
2017-05-24 20:30:17 +08:00
2018-05-19 12:28:55 +08:00
mutex_unlock ( & gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
}
2017-03-30 00:36:34 +08:00
static enum hrtimer_restart tbs_timer_fn ( struct hrtimer * timer_data )
{
2017-03-30 00:36:35 +08:00
struct gvt_sched_data * data ;
2017-03-30 00:36:34 +08:00
2017-03-30 00:36:35 +08:00
data = container_of ( timer_data , struct gvt_sched_data , timer ) ;
2017-03-30 00:36:34 +08:00
intel_gvt_request_service ( data - > gvt , INTEL_GVT_REQUEST_SCHED ) ;
hrtimer_add_expires_ns ( & data - > timer , data - > period ) ;
return HRTIMER_RESTART ;
}
2016-05-01 17:09:58 -04:00
static int tbs_sched_init ( struct intel_gvt * gvt )
{
struct intel_gvt_workload_scheduler * scheduler =
& gvt - > scheduler ;
2017-03-30 00:36:35 +08:00
struct gvt_sched_data * data ;
2016-05-01 17:09:58 -04:00
data = kzalloc ( sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
2017-03-30 00:36:36 +08:00
INIT_LIST_HEAD ( & data - > lru_runq_head ) ;
2017-03-30 00:36:34 +08:00
hrtimer_init ( & data - > timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS ) ;
data - > timer . function = tbs_timer_fn ;
2016-05-01 17:09:58 -04:00
data - > period = GVT_DEFAULT_TIME_SLICE ;
data - > gvt = gvt ;
scheduler - > sched_data = data ;
2017-03-30 00:36:34 +08:00
2016-05-01 17:09:58 -04:00
return 0 ;
}
static void tbs_sched_clean ( struct intel_gvt * gvt )
{
struct intel_gvt_workload_scheduler * scheduler =
& gvt - > scheduler ;
2017-03-30 00:36:35 +08:00
struct gvt_sched_data * data = scheduler - > sched_data ;
2016-05-01 17:09:58 -04:00
2017-03-30 00:36:34 +08:00
hrtimer_cancel ( & data - > timer ) ;
2016-05-01 17:09:58 -04:00
kfree ( data ) ;
scheduler - > sched_data = NULL ;
}
static int tbs_sched_init_vgpu ( struct intel_vgpu * vgpu )
{
2017-03-30 00:36:35 +08:00
struct vgpu_sched_data * data ;
2016-05-01 17:09:58 -04:00
data = kzalloc ( sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
2017-03-30 00:36:37 +08:00
data - > sched_ctl . weight = vgpu - > sched_ctl . weight ;
2016-05-01 17:09:58 -04:00
data - > vgpu = vgpu ;
2017-03-30 00:36:36 +08:00
INIT_LIST_HEAD ( & data - > lru_list ) ;
2016-05-01 17:09:58 -04:00
vgpu - > sched_data = data ;
2017-03-30 00:36:34 +08:00
2016-05-01 17:09:58 -04:00
return 0 ;
}
static void tbs_sched_clean_vgpu ( struct intel_vgpu * vgpu )
{
2018-01-15 16:36:12 +08:00
struct intel_gvt * gvt = vgpu - > gvt ;
struct gvt_sched_data * sched_data = gvt - > scheduler . sched_data ;
2016-05-01 17:09:58 -04:00
kfree ( vgpu - > sched_data ) ;
vgpu - > sched_data = NULL ;
2018-01-15 16:36:12 +08:00
/* this vgpu id has been removed */
if ( idr_is_empty ( & gvt - > vgpu_idr ) )
hrtimer_cancel ( & sched_data - > timer ) ;
2016-05-01 17:09:58 -04:00
}
static void tbs_sched_start_schedule ( struct intel_vgpu * vgpu )
{
2017-03-30 00:36:35 +08:00
struct gvt_sched_data * sched_data = vgpu - > gvt - > scheduler . sched_data ;
struct vgpu_sched_data * vgpu_data = vgpu - > sched_data ;
2018-08-30 11:33:43 +08:00
ktime_t now ;
2016-05-01 17:09:58 -04:00
2017-03-30 00:36:36 +08:00
if ( ! list_empty ( & vgpu_data - > lru_list ) )
2016-05-01 17:09:58 -04:00
return ;
2018-08-30 11:33:43 +08:00
now = ktime_get ( ) ;
vgpu_data - > pri_time = ktime_add ( now ,
ktime_set ( GVT_SCHED_VGPU_PRI_TIME , 0 ) ) ;
vgpu_data - > pri_sched = true ;
list_add ( & vgpu_data - > lru_list , & sched_data - > lru_runq_head ) ;
2017-03-30 00:36:34 +08:00
if ( ! hrtimer_active ( & sched_data - > timer ) )
hrtimer_start ( & sched_data - > timer , ktime_add_ns ( ktime_get ( ) ,
sched_data - > period ) , HRTIMER_MODE_ABS ) ;
2018-01-26 15:09:08 +08:00
vgpu_data - > active = true ;
2016-05-01 17:09:58 -04:00
}
static void tbs_sched_stop_schedule ( struct intel_vgpu * vgpu )
{
2017-03-30 00:36:35 +08:00
struct vgpu_sched_data * vgpu_data = vgpu - > sched_data ;
2016-05-01 17:09:58 -04:00
2017-03-30 00:36:36 +08:00
list_del_init ( & vgpu_data - > lru_list ) ;
2018-01-26 15:09:08 +08:00
vgpu_data - > active = false ;
2016-05-01 17:09:58 -04:00
}
2016-10-20 14:08:47 +08:00
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
2016-05-01 17:09:58 -04:00
. init = tbs_sched_init ,
. clean = tbs_sched_clean ,
. init_vgpu = tbs_sched_init_vgpu ,
. clean_vgpu = tbs_sched_clean_vgpu ,
. start_schedule = tbs_sched_start_schedule ,
. stop_schedule = tbs_sched_stop_schedule ,
} ;
int intel_gvt_init_sched_policy ( struct intel_gvt * gvt )
{
2018-05-19 12:28:55 +08:00
int ret ;
mutex_lock ( & gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
gvt - > scheduler . sched_ops = & tbs_schedule_ops ;
2018-05-19 12:28:55 +08:00
ret = gvt - > scheduler . sched_ops - > init ( gvt ) ;
mutex_unlock ( & gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
2018-05-19 12:28:55 +08:00
return ret ;
2016-05-01 17:09:58 -04:00
}
void intel_gvt_clean_sched_policy ( struct intel_gvt * gvt )
{
2018-05-19 12:28:55 +08:00
mutex_lock ( & gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
gvt - > scheduler . sched_ops - > clean ( gvt ) ;
2018-05-19 12:28:55 +08:00
mutex_unlock ( & gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
}
2018-05-19 12:28:55 +08:00
/* for per-vgpu scheduler policy, there are 2 per-vgpu data:
* sched_data , and sched_ctl . We see these 2 data as part of
* the global scheduler which are proteced by gvt - > sched_lock .
* Caller should make their decision if the vgpu_lock should
* be hold outside .
*/
2016-05-01 17:09:58 -04:00
int intel_vgpu_init_sched_policy ( struct intel_vgpu * vgpu )
{
2018-05-19 12:28:55 +08:00
int ret ;
mutex_lock ( & vgpu - > gvt - > sched_lock ) ;
ret = vgpu - > gvt - > scheduler . sched_ops - > init_vgpu ( vgpu ) ;
mutex_unlock ( & vgpu - > gvt - > sched_lock ) ;
return ret ;
2016-05-01 17:09:58 -04:00
}
void intel_vgpu_clean_sched_policy ( struct intel_vgpu * vgpu )
{
2018-05-19 12:28:55 +08:00
mutex_lock ( & vgpu - > gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
vgpu - > gvt - > scheduler . sched_ops - > clean_vgpu ( vgpu ) ;
2018-05-19 12:28:55 +08:00
mutex_unlock ( & vgpu - > gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
}
void intel_vgpu_start_schedule ( struct intel_vgpu * vgpu )
{
2018-01-26 15:09:08 +08:00
struct vgpu_sched_data * vgpu_data = vgpu - > sched_data ;
2016-05-01 17:09:58 -04:00
2018-05-19 12:28:55 +08:00
mutex_lock ( & vgpu - > gvt - > sched_lock ) ;
2018-01-26 15:09:08 +08:00
if ( ! vgpu_data - > active ) {
gvt_dbg_core ( " vgpu%d: start schedule \n " , vgpu - > id ) ;
vgpu - > gvt - > scheduler . sched_ops - > start_schedule ( vgpu ) ;
}
2018-05-19 12:28:55 +08:00
mutex_unlock ( & vgpu - > gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
}
2017-11-29 15:40:07 +08:00
void intel_gvt_kick_schedule ( struct intel_gvt * gvt )
{
2018-05-19 12:28:55 +08:00
mutex_lock ( & gvt - > sched_lock ) ;
2017-11-29 15:40:07 +08:00
intel_gvt_request_service ( gvt , INTEL_GVT_REQUEST_EVENT_SCHED ) ;
2018-05-19 12:28:55 +08:00
mutex_unlock ( & gvt - > sched_lock ) ;
2017-11-29 15:40:07 +08:00
}
2016-05-01 17:09:58 -04:00
void intel_vgpu_stop_schedule ( struct intel_vgpu * vgpu )
{
struct intel_gvt_workload_scheduler * scheduler =
& vgpu - > gvt - > scheduler ;
2017-09-22 10:00:09 +08:00
int ring_id ;
2018-01-26 15:09:08 +08:00
struct vgpu_sched_data * vgpu_data = vgpu - > sched_data ;
2018-08-29 17:15:56 +08:00
struct drm_i915_private * dev_priv = vgpu - > gvt - > dev_priv ;
2018-01-26 15:09:08 +08:00
if ( ! vgpu_data - > active )
return ;
2016-05-01 17:09:58 -04:00
gvt_dbg_core ( " vgpu%d: stop schedule \n " , vgpu - > id ) ;
2018-05-19 12:28:55 +08:00
mutex_lock ( & vgpu - > gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
scheduler - > sched_ops - > stop_schedule ( vgpu ) ;
if ( scheduler - > next_vgpu = = vgpu )
scheduler - > next_vgpu = NULL ;
if ( scheduler - > current_vgpu = = vgpu ) {
/* stop workload dispatching */
scheduler - > need_reschedule = true ;
scheduler - > current_vgpu = NULL ;
}
2017-09-22 10:00:09 +08:00
2018-08-29 17:15:56 +08:00
intel_runtime_pm_get ( dev_priv ) ;
2017-09-22 10:00:09 +08:00
spin_lock_bh ( & scheduler - > mmio_context_lock ) ;
for ( ring_id = 0 ; ring_id < I915_NUM_ENGINES ; ring_id + + ) {
if ( scheduler - > engine_owner [ ring_id ] = = vgpu ) {
intel_gvt_switch_mmio ( vgpu , NULL , ring_id ) ;
scheduler - > engine_owner [ ring_id ] = NULL ;
}
}
spin_unlock_bh ( & scheduler - > mmio_context_lock ) ;
2018-08-29 17:15:56 +08:00
intel_runtime_pm_put ( dev_priv ) ;
2018-05-19 12:28:55 +08:00
mutex_unlock ( & vgpu - > gvt - > sched_lock ) ;
2016-05-01 17:09:58 -04:00
}