2019-08-12 09:29:35 +00:00
// SPDX-License-Identifier: MIT
2017-10-04 18:13:41 +00:00
/*
2019-08-12 09:29:35 +00:00
* Copyright © 2014 - 2019 Intel Corporation
2017-10-04 18:13:41 +00:00
*/
2019-07-13 11:00:14 +01:00
# include "gt/intel_gt.h"
2019-10-24 22:16:42 +01:00
# include "gt/intel_gt_irq.h"
# include "gt/intel_gt_pm_irq.h"
2017-10-04 18:13:41 +00:00
# include "intel_guc.h"
2018-01-02 13:20:24 -08:00
# include "intel_guc_ads.h"
2017-11-16 19:02:41 +05:30
# include "intel_guc_submission.h"
2017-10-04 18:13:41 +00:00
# include "i915_drv.h"
2019-10-14 11:36:01 -07:00
/**
* DOC : GuC
*
* The GuC is a microcontroller inside the GT HW , introduced in gen9 . The GuC is
* designed to offload some of the functionality usually performed by the host
* driver ; currently the main operations it can take care of are :
*
* - Authentication of the HuC , which is required to fully enable HuC usage .
* - Low latency graphics context scheduling ( a . k . a . GuC submission ) .
* - GT Power management .
*
* The enable_guc module parameter can be used to select which of those
* operations to enable within GuC . Note that not all the operations are
* supported on all gen9 + platforms .
*
* Enabling the GuC is not mandatory and therefore the firmware is only loaded
* if at least one of the operations is selected . However , not loading the GuC
* might result in the loss of some features that do require the GuC ( currently
* just the HuC , but more are expected to land in the future ) .
*/
2019-12-16 17:23:15 -08:00
void intel_guc_notify ( struct intel_guc * guc )
2017-10-04 18:13:41 +00:00
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2017-10-04 18:13:41 +00:00
2019-12-16 17:23:15 -08:00
/*
* On Gen11 + , the value written to the register is passes as a payload
* to the FW . However , the FW currently treats all values the same way
* ( H2G interrupt ) , so we can just write the value that the HW expects
* on older gens .
*/
intel_uncore_write ( gt - > uncore , guc - > notify_reg , GUC_SEND_TRIGGER ) ;
2019-05-27 18:36:04 +00:00
}
2017-10-04 18:13:41 +00:00
static inline i915_reg_t guc_send_reg ( struct intel_guc * guc , u32 i )
{
GEM_BUG_ON ( ! guc - > send_regs . base ) ;
GEM_BUG_ON ( ! guc - > send_regs . count ) ;
GEM_BUG_ON ( i > = guc - > send_regs . count ) ;
return _MMIO ( guc - > send_regs . base + 4 * i ) ;
}
void intel_guc_init_send_regs ( struct intel_guc * guc )
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2017-10-04 18:13:41 +00:00
enum forcewake_domains fw_domains = 0 ;
unsigned int i ;
2019-07-13 11:00:14 +01:00
if ( INTEL_GEN ( gt - > i915 ) > = 11 ) {
2019-05-27 18:36:05 +00:00
guc - > send_regs . base =
i915_mmio_reg_offset ( GEN11_SOFT_SCRATCH ( 0 ) ) ;
guc - > send_regs . count = GEN11_SOFT_SCRATCH_COUNT ;
} else {
guc - > send_regs . base = i915_mmio_reg_offset ( SOFT_SCRATCH ( 0 ) ) ;
guc - > send_regs . count = GUC_MAX_MMIO_MSG_LEN ;
BUILD_BUG_ON ( GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT ) ;
}
2017-10-04 18:13:41 +00:00
for ( i = 0 ; i < guc - > send_regs . count ; i + + ) {
2019-07-13 11:00:14 +01:00
fw_domains | = intel_uncore_forcewake_for_reg ( gt - > uncore ,
2017-10-04 18:13:41 +00:00
guc_send_reg ( guc , i ) ,
FW_REG_READ | FW_REG_WRITE ) ;
}
guc - > send_regs . fw_domains = fw_domains ;
}
2019-10-24 22:16:42 +01:00
static void gen9_reset_guc_interrupts ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
assert_rpm_wakelock_held ( & gt - > i915 - > runtime_pm ) ;
spin_lock_irq ( & gt - > irq_lock ) ;
gen6_gt_pm_reset_iir ( gt , gt - > pm_guc_events ) ;
spin_unlock_irq ( & gt - > irq_lock ) ;
}
static void gen9_enable_guc_interrupts ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
assert_rpm_wakelock_held ( & gt - > i915 - > runtime_pm ) ;
spin_lock_irq ( & gt - > irq_lock ) ;
if ( ! guc - > interrupts . enabled ) {
WARN_ON_ONCE ( intel_uncore_read ( gt - > uncore , GEN8_GT_IIR ( 2 ) ) &
gt - > pm_guc_events ) ;
guc - > interrupts . enabled = true ;
gen6_gt_pm_enable_irq ( gt , gt - > pm_guc_events ) ;
}
spin_unlock_irq ( & gt - > irq_lock ) ;
}
static void gen9_disable_guc_interrupts ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
assert_rpm_wakelock_held ( & gt - > i915 - > runtime_pm ) ;
spin_lock_irq ( & gt - > irq_lock ) ;
guc - > interrupts . enabled = false ;
gen6_gt_pm_disable_irq ( gt , gt - > pm_guc_events ) ;
spin_unlock_irq ( & gt - > irq_lock ) ;
intel_synchronize_irq ( gt - > i915 ) ;
gen9_reset_guc_interrupts ( guc ) ;
}
static void gen11_reset_guc_interrupts ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
spin_lock_irq ( & gt - > irq_lock ) ;
gen11_gt_reset_one_iir ( gt , 0 , GEN11_GUC ) ;
spin_unlock_irq ( & gt - > irq_lock ) ;
}
static void gen11_enable_guc_interrupts ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
spin_lock_irq ( & gt - > irq_lock ) ;
if ( ! guc - > interrupts . enabled ) {
u32 events = REG_FIELD_PREP ( ENGINE1_MASK , GUC_INTR_GUC2HOST ) ;
WARN_ON_ONCE ( gen11_gt_reset_one_iir ( gt , 0 , GEN11_GUC ) ) ;
intel_uncore_write ( gt - > uncore ,
GEN11_GUC_SG_INTR_ENABLE , events ) ;
intel_uncore_write ( gt - > uncore ,
GEN11_GUC_SG_INTR_MASK , ~ events ) ;
guc - > interrupts . enabled = true ;
}
spin_unlock_irq ( & gt - > irq_lock ) ;
}
static void gen11_disable_guc_interrupts ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
spin_lock_irq ( & gt - > irq_lock ) ;
guc - > interrupts . enabled = false ;
intel_uncore_write ( gt - > uncore , GEN11_GUC_SG_INTR_MASK , ~ 0 ) ;
intel_uncore_write ( gt - > uncore , GEN11_GUC_SG_INTR_ENABLE , 0 ) ;
spin_unlock_irq ( & gt - > irq_lock ) ;
intel_synchronize_irq ( gt - > i915 ) ;
gen11_reset_guc_interrupts ( guc ) ;
}
2017-10-04 18:13:41 +00:00
void intel_guc_init_early ( struct intel_guc * guc )
{
2019-07-13 11:00:16 +01:00
struct drm_i915_private * i915 = guc_to_gt ( guc ) - > i915 ;
2019-05-27 18:36:04 +00:00
2017-12-06 13:53:11 +00:00
intel_guc_fw_init_early ( guc ) ;
2017-10-04 18:13:41 +00:00
intel_guc_ct_init_early ( & guc - > ct ) ;
2018-03-14 14:45:39 +00:00
intel_guc_log_init_early ( & guc - > log ) ;
2019-07-31 22:33:20 +00:00
intel_guc_submission_init_early ( guc ) ;
2017-10-04 18:13:41 +00:00
mutex_init ( & guc - > send_mutex ) ;
2018-03-19 10:53:36 +01:00
spin_lock_init ( & guc - > irq_lock ) ;
2019-05-27 18:36:07 +00:00
if ( INTEL_GEN ( i915 ) > = 11 ) {
2019-12-16 17:23:15 -08:00
guc - > notify_reg = GEN11_GUC_HOST_INTERRUPT ;
2019-05-27 18:36:08 +00:00
guc - > interrupts . reset = gen11_reset_guc_interrupts ;
guc - > interrupts . enable = gen11_enable_guc_interrupts ;
guc - > interrupts . disable = gen11_disable_guc_interrupts ;
2019-05-27 18:36:07 +00:00
} else {
2019-12-16 17:23:15 -08:00
guc - > notify_reg = GUC_SEND_INTERRUPT ;
2019-05-27 18:36:07 +00:00
guc - > interrupts . reset = gen9_reset_guc_interrupts ;
guc - > interrupts . enable = gen9_enable_guc_interrupts ;
guc - > interrupts . disable = gen9_disable_guc_interrupts ;
}
2017-10-04 18:13:41 +00:00
}
2018-06-04 16:19:41 +02:00
static u32 guc_ctl_debug_flags ( struct intel_guc * guc )
2018-01-11 15:24:40 +00:00
{
2018-06-04 16:19:41 +02:00
u32 level = intel_guc_log_get_level ( & guc - > log ) ;
2019-05-27 18:35:59 +00:00
u32 flags = 0 ;
2018-03-19 10:53:45 +01:00
2018-03-20 12:55:17 +01:00
if ( ! GUC_LOG_LEVEL_IS_VERBOSE ( level ) )
2018-03-19 10:53:45 +01:00
flags | = GUC_LOG_DISABLED ;
else
flags | = GUC_LOG_LEVEL_TO_VERBOSITY ( level ) < <
GUC_LOG_VERBOSITY_SHIFT ;
2018-01-11 15:24:40 +00:00
2018-03-19 10:53:45 +01:00
return flags ;
2018-01-11 15:24:40 +00:00
}
2018-06-04 16:19:43 +02:00
static u32 guc_ctl_feature_flags ( struct intel_guc * guc )
{
u32 flags = 0 ;
2019-08-04 19:50:49 +00:00
if ( ! intel_guc_is_submission_supported ( guc ) )
2018-06-04 16:19:43 +02:00
flags | = GUC_CTL_DISABLE_SCHEDULER ;
return flags ;
}
2018-06-04 16:19:44 +02:00
2018-06-04 16:19:45 +02:00
static u32 guc_ctl_ctxinfo_flags ( struct intel_guc * guc )
{
u32 flags = 0 ;
2019-08-04 19:50:49 +00:00
if ( intel_guc_is_submission_supported ( guc ) ) {
2018-06-04 16:19:45 +02:00
u32 ctxnum , base ;
base = intel_guc_ggtt_offset ( guc , guc - > stage_desc_pool ) ;
ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16 ;
base > > = PAGE_SHIFT ;
flags | = ( base < < GUC_CTL_BASE_ADDR_SHIFT ) |
( ctxnum < < GUC_CTL_CTXNUM_IN16_SHIFT ) ;
}
return flags ;
}
2018-06-04 16:19:44 +02:00
static u32 guc_ctl_log_params_flags ( struct intel_guc * guc )
{
u32 offset = intel_guc_ggtt_offset ( guc , guc - > log . vma ) > > PAGE_SHIFT ;
u32 flags ;
2018-06-05 17:13:30 +02:00
# if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
# define UNIT SZ_1M
# define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
# else
2018-06-05 17:13:29 +02:00
# define UNIT SZ_4K
2018-06-05 17:13:30 +02:00
# define FLAG 0
# endif
2018-06-05 17:13:29 +02:00
BUILD_BUG_ON ( ! CRASH_BUFFER_SIZE ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( CRASH_BUFFER_SIZE , UNIT ) ) ;
BUILD_BUG_ON ( ! DPC_BUFFER_SIZE ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( DPC_BUFFER_SIZE , UNIT ) ) ;
BUILD_BUG_ON ( ! ISR_BUFFER_SIZE ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( ISR_BUFFER_SIZE , UNIT ) ) ;
BUILD_BUG_ON ( ( CRASH_BUFFER_SIZE / UNIT - 1 ) >
( GUC_LOG_CRASH_MASK > > GUC_LOG_CRASH_SHIFT ) ) ;
BUILD_BUG_ON ( ( DPC_BUFFER_SIZE / UNIT - 1 ) >
( GUC_LOG_DPC_MASK > > GUC_LOG_DPC_SHIFT ) ) ;
BUILD_BUG_ON ( ( ISR_BUFFER_SIZE / UNIT - 1 ) >
( GUC_LOG_ISR_MASK > > GUC_LOG_ISR_SHIFT ) ) ;
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
2018-06-05 17:13:30 +02:00
FLAG |
2018-06-05 17:13:29 +02:00
( ( CRASH_BUFFER_SIZE / UNIT - 1 ) < < GUC_LOG_CRASH_SHIFT ) |
( ( DPC_BUFFER_SIZE / UNIT - 1 ) < < GUC_LOG_DPC_SHIFT ) |
( ( ISR_BUFFER_SIZE / UNIT - 1 ) < < GUC_LOG_ISR_SHIFT ) |
2018-06-04 16:19:44 +02:00
( offset < < GUC_LOG_BUF_ADDR_SHIFT ) ;
2018-06-05 17:13:29 +02:00
# undef UNIT
2018-06-05 17:13:30 +02:00
# undef FLAG
2018-06-05 17:13:29 +02:00
2018-06-04 16:19:44 +02:00
return flags ;
}
2019-05-27 18:35:59 +00:00
static u32 guc_ctl_ads_flags ( struct intel_guc * guc )
{
u32 ads = intel_guc_ggtt_offset ( guc , guc - > ads_vma ) > > PAGE_SHIFT ;
u32 flags = ads < < GUC_ADS_ADDR_SHIFT ;
return flags ;
}
2017-10-16 14:47:11 +00:00
/*
* Initialise the GuC parameter block before starting the firmware
* transfer . These parameters are read by the firmware on startup
* and cannot be changed thereafter .
*/
2019-07-24 09:58:49 +01:00
static void guc_init_params ( struct intel_guc * guc )
2017-10-16 14:47:11 +00:00
{
2019-07-24 09:58:49 +01:00
u32 * params = guc - > params ;
2017-10-16 14:47:11 +00:00
int i ;
2019-07-24 09:58:49 +01:00
BUILD_BUG_ON ( sizeof ( guc - > params ) ! = GUC_CTL_MAX_DWORDS * sizeof ( u32 ) ) ;
2017-10-16 14:47:11 +00:00
2019-05-27 18:35:59 +00:00
params [ GUC_CTL_CTXINFO ] = guc_ctl_ctxinfo_flags ( guc ) ;
params [ GUC_CTL_LOG_PARAMS ] = guc_ctl_log_params_flags ( guc ) ;
2018-06-04 16:19:43 +02:00
params [ GUC_CTL_FEATURE ] = guc_ctl_feature_flags ( guc ) ;
2018-06-04 16:19:41 +02:00
params [ GUC_CTL_DEBUG ] = guc_ctl_debug_flags ( guc ) ;
2019-05-27 18:35:59 +00:00
params [ GUC_CTL_ADS ] = guc_ctl_ads_flags ( guc ) ;
2017-10-16 14:47:11 +00:00
2018-06-18 11:18:20 +00:00
for ( i = 0 ; i < GUC_CTL_MAX_DWORDS ; i + + )
DRM_DEBUG_DRIVER ( " param[%2d] = %#x \n " , i , params [ i ] ) ;
2019-07-24 09:58:49 +01:00
}
/*
* Initialise the GuC parameter block before starting the firmware
* transfer . These parameters are read by the firmware on startup
* and cannot be changed thereafter .
*/
void intel_guc_write_params ( struct intel_guc * guc )
{
struct intel_uncore * uncore = guc_to_gt ( guc ) - > uncore ;
int i ;
2018-06-18 11:18:20 +00:00
2017-10-16 14:47:11 +00:00
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it ' s ok to release forcewake
* when we are done here and take it again at xfer time .
*/
2019-07-13 11:00:14 +01:00
intel_uncore_forcewake_get ( uncore , FORCEWAKE_BLITTER ) ;
2017-10-16 14:47:11 +00:00
2019-07-13 11:00:14 +01:00
intel_uncore_write ( uncore , SOFT_SCRATCH ( 0 ) , 0 ) ;
2017-10-16 14:47:11 +00:00
for ( i = 0 ; i < GUC_CTL_MAX_DWORDS ; i + + )
2019-07-24 09:58:49 +01:00
intel_uncore_write ( uncore , SOFT_SCRATCH ( 1 + i ) , guc - > params [ i ] ) ;
2017-10-16 14:47:11 +00:00
2019-07-13 11:00:14 +01:00
intel_uncore_forcewake_put ( uncore , FORCEWAKE_BLITTER ) ;
2017-10-16 14:47:11 +00:00
}
2019-07-24 09:58:49 +01:00
int intel_guc_init ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
int ret ;
ret = intel_uc_fw_init ( & guc - > fw ) ;
if ( ret )
goto err_fetch ;
ret = intel_guc_log_create ( & guc - > log ) ;
if ( ret )
2019-10-30 18:30:40 -07:00
goto err_fw ;
2019-07-24 09:58:49 +01:00
ret = intel_guc_ads_create ( guc ) ;
if ( ret )
goto err_log ;
GEM_BUG_ON ( ! guc - > ads_vma ) ;
ret = intel_guc_ct_init ( & guc - > ct ) ;
if ( ret )
goto err_ads ;
2019-08-04 19:50:49 +00:00
if ( intel_guc_is_submission_supported ( guc ) ) {
2019-07-25 10:46:55 -07:00
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init ( guc ) ;
if ( ret )
goto err_ct ;
}
2019-07-24 09:58:49 +01:00
/* now that everything is perma-pinned, initialize the parameters */
guc_init_params ( guc ) ;
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc ( gt - > ggtt ) ;
return 0 ;
2019-07-25 10:46:55 -07:00
err_ct :
intel_guc_ct_fini ( & guc - > ct ) ;
2019-07-24 09:58:49 +01:00
err_ads :
intel_guc_ads_destroy ( guc ) ;
err_log :
intel_guc_log_destroy ( & guc - > log ) ;
err_fw :
intel_uc_fw_fini ( & guc - > fw ) ;
err_fetch :
intel_uc_fw_cleanup_fetch ( & guc - > fw ) ;
2019-08-17 13:11:43 +00:00
DRM_DEV_DEBUG_DRIVER ( gt - > i915 - > drm . dev , " failed with %d \n " , ret ) ;
2019-07-24 09:58:49 +01:00
return ret ;
}
void intel_guc_fini ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
2019-08-17 13:11:44 +00:00
if ( ! intel_uc_fw_is_available ( & guc - > fw ) )
return ;
2019-07-24 09:58:49 +01:00
i915_ggtt_disable_guc ( gt - > ggtt ) ;
2019-08-04 19:50:49 +00:00
if ( intel_guc_is_submission_supported ( guc ) )
2019-07-25 10:46:55 -07:00
intel_guc_submission_fini ( guc ) ;
2019-07-24 09:58:49 +01:00
intel_guc_ct_fini ( & guc - > ct ) ;
intel_guc_ads_destroy ( guc ) ;
intel_guc_log_destroy ( & guc - > log ) ;
intel_uc_fw_fini ( & guc - > fw ) ;
intel_uc_fw_cleanup_fetch ( & guc - > fw ) ;
2019-12-22 12:07:52 +00:00
intel_uc_fw_change_status ( & guc - > fw , INTEL_UC_FIRMWARE_DISABLED ) ;
2019-07-24 09:58:49 +01:00
}
2017-10-04 18:13:41 +00:00
/*
* This function implements the MMIO based host to GuC interface .
*/
2018-03-26 19:48:20 +00:00
int intel_guc_send_mmio ( struct intel_guc * guc , const u32 * action , u32 len ,
u32 * response_buf , u32 response_buf_size )
2017-10-04 18:13:41 +00:00
{
2019-07-13 11:00:14 +01:00
struct intel_uncore * uncore = guc_to_gt ( guc ) - > uncore ;
2017-10-04 18:13:41 +00:00
u32 status ;
int i ;
int ret ;
GEM_BUG_ON ( ! len ) ;
GEM_BUG_ON ( len > guc - > send_regs . count ) ;
2018-03-26 19:48:18 +00:00
/* We expect only action code */
GEM_BUG_ON ( * action & ~ INTEL_GUC_MSG_CODE_MASK ) ;
2017-10-04 18:13:41 +00:00
/* If CT is available, we expect to use MMIO only during init/fini */
2019-06-06 15:42:24 -07:00
GEM_BUG_ON ( * action ! = INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER & &
* action ! = INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER ) ;
2017-10-04 18:13:41 +00:00
mutex_lock ( & guc - > send_mutex ) ;
2019-03-25 14:49:38 -07:00
intel_uncore_forcewake_get ( uncore , guc - > send_regs . fw_domains ) ;
2017-10-04 18:13:41 +00:00
for ( i = 0 ; i < len ; i + + )
2019-03-25 14:49:38 -07:00
intel_uncore_write ( uncore , guc_send_reg ( guc , i ) , action [ i ] ) ;
2017-10-04 18:13:41 +00:00
2019-03-25 14:49:38 -07:00
intel_uncore_posting_read ( uncore , guc_send_reg ( guc , i - 1 ) ) ;
2017-10-04 18:13:41 +00:00
intel_guc_notify ( guc ) ;
/*
* No GuC command should ever take longer than 10 ms .
* Fast commands should still complete in 10u s .
*/
2019-03-25 14:49:38 -07:00
ret = __intel_wait_for_register_fw ( uncore ,
2017-10-04 18:13:41 +00:00
guc_send_reg ( guc , 0 ) ,
2018-03-26 19:48:18 +00:00
INTEL_GUC_MSG_TYPE_MASK ,
INTEL_GUC_MSG_TYPE_RESPONSE < <
INTEL_GUC_MSG_TYPE_SHIFT ,
2017-10-04 18:13:41 +00:00
10 , 10 , & status ) ;
2018-03-26 19:48:18 +00:00
/* If GuC explicitly returned an error, convert it to -EIO */
if ( ! ret & & ! INTEL_GUC_MSG_IS_RESPONSE_SUCCESS ( status ) )
ret = - EIO ;
2017-10-04 18:13:41 +00:00
2018-03-26 19:48:18 +00:00
if ( ret ) {
2018-05-28 17:16:18 +00:00
DRM_ERROR ( " MMIO: GuC action %#x failed with error %d %#x \n " ,
action [ 0 ] , ret , status ) ;
2018-03-26 19:48:21 +00:00
goto out ;
2017-10-04 18:13:41 +00:00
}
2018-03-26 19:48:21 +00:00
if ( response_buf ) {
int count = min ( response_buf_size , guc - > send_regs . count - 1 ) ;
for ( i = 0 ; i < count ; i + + )
2019-07-13 11:00:14 +01:00
response_buf [ i ] = intel_uncore_read ( uncore ,
guc_send_reg ( guc , i + 1 ) ) ;
2018-03-26 19:48:21 +00:00
}
/* Use data from the GuC response as our return value */
ret = INTEL_GUC_MSG_TO_DATA ( status ) ;
out :
2019-03-25 14:49:38 -07:00
intel_uncore_forcewake_put ( uncore , guc - > send_regs . fw_domains ) ;
2017-10-04 18:13:41 +00:00
mutex_unlock ( & guc - > send_mutex ) ;
return ret ;
}
2019-03-21 12:00:04 +00:00
int intel_guc_to_host_process_recv_msg ( struct intel_guc * guc ,
const u32 * payload , u32 len )
2018-03-27 21:41:24 +00:00
{
2019-03-21 12:00:04 +00:00
u32 msg ;
if ( unlikely ( ! len ) )
return - EPROTO ;
2018-03-27 21:41:24 +00:00
/* Make sure to handle only enabled messages */
2019-03-21 12:00:04 +00:00
msg = payload [ 0 ] & guc - > msg_enabled_mask ;
2018-03-27 21:41:24 +00:00
2018-03-19 10:53:36 +01:00
if ( msg & ( INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
2018-03-19 10:53:44 +01:00
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED ) )
2018-03-19 12:50:49 +00:00
intel_guc_log_handle_flush_event ( & guc - > log ) ;
2019-03-21 12:00:04 +00:00
return 0 ;
2018-03-08 16:46:55 +01:00
}
2017-10-04 18:13:41 +00:00
int intel_guc_sample_forcewake ( struct intel_guc * guc )
{
2019-07-13 11:00:16 +01:00
struct drm_i915_private * dev_priv = guc_to_gt ( guc ) - > i915 ;
2017-10-04 18:13:41 +00:00
u32 action [ 2 ] ;
action [ 0 ] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE ;
2018-02-22 12:05:35 -08:00
/* WaRsDisableCoarsePowerGating:skl,cnl */
2017-12-01 11:30:30 +00:00
if ( ! HAS_RC6 ( dev_priv ) | | NEEDS_WaRsDisableCoarsePowerGating ( dev_priv ) )
2017-10-04 18:13:41 +00:00
action [ 1 ] = 0 ;
else
/* bit 0 and 1 are for Render and Media domain separately */
action [ 1 ] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA ;
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
}
/**
* intel_guc_auth_huc ( ) - Send action to GuC to authenticate HuC ucode
* @ guc : intel_guc structure
* @ rsa_offset : rsa offset w . r . t ggtt base of huc vma
*
* Triggers a HuC firmware authentication request to the GuC via intel_guc_send
* INTEL_GUC_ACTION_AUTHENTICATE_HUC interface . This function is invoked by
* intel_huc_auth ( ) .
*
* Return : non - zero code on error
*/
int intel_guc_auth_huc ( struct intel_guc * guc , u32 rsa_offset )
{
u32 action [ ] = {
INTEL_GUC_ACTION_AUTHENTICATE_HUC ,
rsa_offset
} ;
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
}
2019-05-27 18:35:59 +00:00
/**
* intel_guc_suspend ( ) - notify GuC entering suspend state
* @ guc : the guc
2018-10-16 15:46:47 -07:00
*/
2019-05-27 18:35:59 +00:00
int intel_guc_suspend ( struct intel_guc * guc )
2018-10-16 15:46:47 -07:00
{
2019-07-13 11:00:14 +01:00
struct intel_uncore * uncore = guc_to_gt ( guc ) - > uncore ;
2018-10-16 15:46:47 -07:00
int ret ;
u32 status ;
2019-05-27 18:35:59 +00:00
u32 action [ ] = {
INTEL_GUC_ACTION_ENTER_S_STATE ,
GUC_POWER_D1 , /* any value greater than GUC_POWER_D0 */
} ;
2019-11-15 15:15:38 -08:00
/*
* If GuC communication is enabled but submission is not supported ,
* we do not need to suspend the GuC .
*/
if ( ! intel_guc_submission_is_enabled ( guc ) )
return 0 ;
2019-05-27 18:35:59 +00:00
/*
* The ENTER_S_STATE action queues the save / restore operation in GuC FW
* and then returns , so waiting on the H2G is not enough to guarantee
* GuC is done . When all the processing is done , GuC writes
* INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14 , so we can poll
* on that . Note that GuC does not ensure that the value in the register
* is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
* in progress so we need to take care of that ourselves as well .
*/
2018-10-16 15:46:47 -07:00
2019-07-13 11:00:14 +01:00
intel_uncore_write ( uncore , SOFT_SCRATCH ( 14 ) ,
INTEL_GUC_SLEEP_STATE_INVALID_MASK ) ;
2018-10-16 15:46:47 -07:00
2019-05-27 18:35:59 +00:00
ret = intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
2018-10-16 15:46:47 -07:00
if ( ret )
return ret ;
2019-07-13 11:00:14 +01:00
ret = __intel_wait_for_register ( uncore , SOFT_SCRATCH ( 14 ) ,
2018-10-16 15:46:47 -07:00
INTEL_GUC_SLEEP_STATE_INVALID_MASK ,
0 , 0 , 10 , & status ) ;
if ( ret )
return ret ;
if ( status ! = INTEL_GUC_SLEEP_STATE_SUCCESS ) {
DRM_ERROR ( " GuC failed to change sleep state. "
" action=0x%x, err=%u \n " ,
action [ 0 ] , status ) ;
return - EIO ;
}
return 0 ;
}
2017-10-31 15:53:09 -07:00
/**
* intel_guc_reset_engine ( ) - ask GuC to reset an engine
* @ guc : intel_guc structure
* @ engine : engine to be reset
*/
int intel_guc_reset_engine ( struct intel_guc * guc ,
struct intel_engine_cs * engine )
{
2019-10-30 18:30:40 -07:00
/* XXX: to be implemented with submission interface rework */
2017-10-31 15:53:09 -07:00
2019-10-30 18:30:40 -07:00
return - ENODEV ;
2017-10-31 15:53:09 -07:00
}
2017-10-04 18:13:41 +00:00
/**
* intel_guc_resume ( ) - notify GuC resuming from suspend state
2018-03-02 11:15:49 +00:00
* @ guc : the guc
2017-10-04 18:13:41 +00:00
*/
2018-03-02 11:15:49 +00:00
int intel_guc_resume ( struct intel_guc * guc )
2017-10-04 18:13:41 +00:00
{
2019-05-27 18:35:59 +00:00
u32 action [ ] = {
2018-03-02 11:15:49 +00:00
INTEL_GUC_ACTION_EXIT_S_STATE ,
GUC_POWER_D0 ,
} ;
2017-10-04 18:13:41 +00:00
2019-11-15 15:15:38 -08:00
/*
* If GuC communication is enabled but submission is not supported ,
* we do not need to resume the GuC but we do need to enable the
* GuC communication on resume ( above ) .
*/
if ( ! intel_guc_submission_is_enabled ( guc ) )
return 0 ;
2019-05-27 18:35:59 +00:00
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
2017-10-04 18:13:41 +00:00
}
2018-03-13 17:32:50 -07:00
/**
2019-10-14 11:36:01 -07:00
* DOC : GuC Memory Management
2018-03-13 17:32:50 -07:00
*
2019-10-14 11:36:01 -07:00
* GuC can ' t allocate any memory for its own usage , so all the allocations must
* be handled by the host driver . GuC accesses the memory via the GGTT , with the
* exception of the top and bottom parts of the 4 GB address space , which are
* instead re - mapped by the GuC HW to memory location of the FW itself ( WOPCM )
* or other parts of the HW . The driver must take care not to place objects that
* the GuC is going to access in these reserved ranges . The layout of the GuC
* address space is shown below :
2018-03-13 17:32:50 -07:00
*
2018-03-22 16:59:22 -07:00
* : :
2018-03-13 17:32:50 -07:00
*
2018-07-27 16:11:43 +02:00
* + = = = = = = = = = = = > + = = = = = = = = = = = = = = = = = = = = + < = = FFFF_FFFF
* ^ | Reserved |
* | + = = = = = = = = = = = = = = = = = = = = + < = = GUC_GGTT_TOP
* | | |
* | | DRAM |
* GuC | |
* Address + = = = > + = = = = = = = = = = = = = = = = = = = = + < = = GuC ggtt_pin_bias
* Space ^ | |
* | | | |
* | GuC | GuC |
* | WOPCM | WOPCM |
* | Size | |
* | | | |
* v v | |
* + = = = = = = = + = = = > + = = = = = = = = = = = = = = = = = = = = + < = = 0000 _0000
2018-03-22 16:59:22 -07:00
*
2018-07-27 16:11:43 +02:00
* The lower part of GuC Address Space [ 0 , ggtt_pin_bias ) is mapped to GuC WOPCM
2018-03-22 16:59:22 -07:00
* while upper part of GuC Address Space [ ggtt_pin_bias , GUC_GGTT_TOP ) is mapped
2018-07-27 16:11:43 +02:00
* to DRAM . The value of the GuC ggtt_pin_bias is the GuC WOPCM size .
2018-03-13 17:32:50 -07:00
*/
2017-10-04 18:13:41 +00:00
/**
* intel_guc_allocate_vma ( ) - Allocate a GGTT VMA for GuC usage
* @ guc : the guc
* @ size : size of area to allocate ( both virtual space and memory )
*
* This is a wrapper to create an object for use with the GuC . In order to
* use it inside the GuC , an object needs to be pinned lifetime , so we allocate
* both some backing storage and a range inside the Global GTT . We must pin
2018-03-13 17:32:50 -07:00
* it in the GGTT somewhere other than than [ 0 , GUC ggtt_pin_bias ) because that
2017-10-04 18:13:41 +00:00
* range is reserved inside GuC .
*
* Return : A i915_vma if successful , otherwise an ERR_PTR .
*/
struct i915_vma * intel_guc_allocate_vma ( struct intel_guc * guc , u32 size )
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2017-10-04 18:13:41 +00:00
struct drm_i915_gem_object * obj ;
struct i915_vma * vma ;
2018-07-27 16:11:45 +02:00
u64 flags ;
2017-10-04 18:13:41 +00:00
int ret ;
2019-07-13 11:00:14 +01:00
obj = i915_gem_object_create_shmem ( gt - > i915 , size ) ;
2017-10-04 18:13:41 +00:00
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
2019-07-13 11:00:14 +01:00
vma = i915_vma_instance ( obj , & gt - > ggtt - > vm , NULL ) ;
2017-10-04 18:13:41 +00:00
if ( IS_ERR ( vma ) )
goto err ;
2018-07-27 16:11:45 +02:00
flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias ( vma ) ;
ret = i915_vma_pin ( vma , 0 , 0 , flags ) ;
2017-10-04 18:13:41 +00:00
if ( ret ) {
vma = ERR_PTR ( ret ) ;
goto err ;
}
2019-08-02 22:21:36 +01:00
return i915_vma_make_unshrinkable ( vma ) ;
2017-10-04 18:13:41 +00:00
err :
i915_gem_object_put ( obj ) ;
return vma ;
}
2019-12-05 14:02:40 -08:00
/**
* intel_guc_allocate_and_map_vma ( ) - Allocate and map VMA for GuC usage
* @ guc : the guc
* @ size : size of area to allocate ( both virtual space and memory )
* @ out_vma : return variable for the allocated vma pointer
* @ out_vaddr : return variable for the obj mapping
*
* This wrapper calls intel_guc_allocate_vma ( ) and then maps the allocated
* object with I915_MAP_WB .
*
* Return : 0 if successful , a negative errno code otherwise .
*/
int intel_guc_allocate_and_map_vma ( struct intel_guc * guc , u32 size ,
struct i915_vma * * out_vma , void * * out_vaddr )
{
struct i915_vma * vma ;
void * vaddr ;
vma = intel_guc_allocate_vma ( guc , size ) ;
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
vaddr = i915_gem_object_pin_map ( vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) ) {
i915_vma_unpin_and_release ( & vma , 0 ) ;
return PTR_ERR ( vaddr ) ;
}
* out_vma = vma ;
* out_vaddr = vaddr ;
return 0 ;
}