2017-10-04 18:13:41 +00:00
/*
* Copyright © 2014 - 2017 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
*/
2019-07-13 11:00:14 +01:00
# include "gt/intel_gt.h"
2017-10-04 18:13:41 +00:00
# include "intel_guc.h"
2018-01-02 13:20:24 -08:00
# include "intel_guc_ads.h"
2017-11-16 19:02:41 +05:30
# include "intel_guc_submission.h"
2017-10-04 18:13:41 +00:00
# include "i915_drv.h"
static void gen8_guc_raise_irq ( struct intel_guc * guc )
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2017-10-04 18:13:41 +00:00
2019-07-13 11:00:14 +01:00
intel_uncore_write ( gt - > uncore , GUC_SEND_INTERRUPT , GUC_SEND_TRIGGER ) ;
2017-10-04 18:13:41 +00:00
}
2019-05-27 18:36:04 +00:00
static void gen11_guc_raise_irq ( struct intel_guc * guc )
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2019-05-27 18:36:04 +00:00
2019-07-13 11:00:14 +01:00
intel_uncore_write ( gt - > uncore , GEN11_GUC_HOST_INTERRUPT , 0 ) ;
2019-05-27 18:36:04 +00:00
}
2017-10-04 18:13:41 +00:00
static inline i915_reg_t guc_send_reg ( struct intel_guc * guc , u32 i )
{
GEM_BUG_ON ( ! guc - > send_regs . base ) ;
GEM_BUG_ON ( ! guc - > send_regs . count ) ;
GEM_BUG_ON ( i > = guc - > send_regs . count ) ;
return _MMIO ( guc - > send_regs . base + 4 * i ) ;
}
void intel_guc_init_send_regs ( struct intel_guc * guc )
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2017-10-04 18:13:41 +00:00
enum forcewake_domains fw_domains = 0 ;
unsigned int i ;
2019-07-13 11:00:14 +01:00
if ( INTEL_GEN ( gt - > i915 ) > = 11 ) {
2019-05-27 18:36:05 +00:00
guc - > send_regs . base =
i915_mmio_reg_offset ( GEN11_SOFT_SCRATCH ( 0 ) ) ;
guc - > send_regs . count = GEN11_SOFT_SCRATCH_COUNT ;
} else {
guc - > send_regs . base = i915_mmio_reg_offset ( SOFT_SCRATCH ( 0 ) ) ;
guc - > send_regs . count = GUC_MAX_MMIO_MSG_LEN ;
BUILD_BUG_ON ( GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT ) ;
}
2017-10-04 18:13:41 +00:00
for ( i = 0 ; i < guc - > send_regs . count ; i + + ) {
2019-07-13 11:00:14 +01:00
fw_domains | = intel_uncore_forcewake_for_reg ( gt - > uncore ,
2017-10-04 18:13:41 +00:00
guc_send_reg ( guc , i ) ,
FW_REG_READ | FW_REG_WRITE ) ;
}
guc - > send_regs . fw_domains = fw_domains ;
}
void intel_guc_init_early ( struct intel_guc * guc )
{
2019-07-13 11:00:16 +01:00
struct drm_i915_private * i915 = guc_to_gt ( guc ) - > i915 ;
2019-05-27 18:36:04 +00:00
2017-12-06 13:53:11 +00:00
intel_guc_fw_init_early ( guc ) ;
2017-10-04 18:13:41 +00:00
intel_guc_ct_init_early ( & guc - > ct ) ;
2018-03-14 14:45:39 +00:00
intel_guc_log_init_early ( & guc - > log ) ;
2019-07-31 22:33:20 +00:00
intel_guc_submission_init_early ( guc ) ;
2017-10-04 18:13:41 +00:00
mutex_init ( & guc - > send_mutex ) ;
2018-03-19 10:53:36 +01:00
spin_lock_init ( & guc - > irq_lock ) ;
2017-10-04 18:13:41 +00:00
guc - > send = intel_guc_send_nop ;
2018-03-26 19:48:22 +00:00
guc - > handler = intel_guc_to_host_event_handler_nop ;
2019-05-27 18:36:07 +00:00
if ( INTEL_GEN ( i915 ) > = 11 ) {
2019-05-27 18:36:04 +00:00
guc - > notify = gen11_guc_raise_irq ;
2019-05-27 18:36:08 +00:00
guc - > interrupts . reset = gen11_reset_guc_interrupts ;
guc - > interrupts . enable = gen11_enable_guc_interrupts ;
guc - > interrupts . disable = gen11_disable_guc_interrupts ;
2019-05-27 18:36:07 +00:00
} else {
2019-05-27 18:36:04 +00:00
guc - > notify = gen8_guc_raise_irq ;
2019-05-27 18:36:07 +00:00
guc - > interrupts . reset = gen9_reset_guc_interrupts ;
guc - > interrupts . enable = gen9_enable_guc_interrupts ;
guc - > interrupts . disable = gen9_disable_guc_interrupts ;
}
2017-10-04 18:13:41 +00:00
}
2017-12-13 23:13:46 +01:00
static int guc_shared_data_create ( struct intel_guc * guc )
{
struct i915_vma * vma ;
void * vaddr ;
vma = intel_guc_allocate_vma ( guc , PAGE_SIZE ) ;
if ( IS_ERR ( vma ) )
return PTR_ERR ( vma ) ;
vaddr = i915_gem_object_pin_map ( vma - > obj , I915_MAP_WB ) ;
if ( IS_ERR ( vaddr ) ) {
2018-07-21 13:50:37 +01:00
i915_vma_unpin_and_release ( & vma , 0 ) ;
2017-12-13 23:13:46 +01:00
return PTR_ERR ( vaddr ) ;
}
guc - > shared_data = vma ;
guc - > shared_data_vaddr = vaddr ;
return 0 ;
}
static void guc_shared_data_destroy ( struct intel_guc * guc )
{
2018-07-21 13:50:37 +01:00
i915_vma_unpin_and_release ( & guc - > shared_data , I915_VMA_RELEASE_MAP ) ;
2017-12-13 23:13:46 +01:00
}
2018-06-04 16:19:41 +02:00
static u32 guc_ctl_debug_flags ( struct intel_guc * guc )
2018-01-11 15:24:40 +00:00
{
2018-06-04 16:19:41 +02:00
u32 level = intel_guc_log_get_level ( & guc - > log ) ;
2019-05-27 18:35:59 +00:00
u32 flags = 0 ;
2018-03-19 10:53:45 +01:00
2018-03-20 12:55:17 +01:00
if ( ! GUC_LOG_LEVEL_IS_VERBOSE ( level ) )
2018-03-19 10:53:45 +01:00
flags | = GUC_LOG_DISABLED ;
else
flags | = GUC_LOG_LEVEL_TO_VERBOSITY ( level ) < <
GUC_LOG_VERBOSITY_SHIFT ;
2018-01-11 15:24:40 +00:00
2018-03-19 10:53:45 +01:00
return flags ;
2018-01-11 15:24:40 +00:00
}
2018-06-04 16:19:43 +02:00
static u32 guc_ctl_feature_flags ( struct intel_guc * guc )
{
u32 flags = 0 ;
2019-07-31 22:33:18 +00:00
if ( ! intel_uc_supports_guc_submission ( & guc_to_gt ( guc ) - > uc ) )
2018-06-04 16:19:43 +02:00
flags | = GUC_CTL_DISABLE_SCHEDULER ;
return flags ;
}
2018-06-04 16:19:44 +02:00
2018-06-04 16:19:45 +02:00
static u32 guc_ctl_ctxinfo_flags ( struct intel_guc * guc )
{
u32 flags = 0 ;
2019-07-31 22:33:18 +00:00
if ( intel_uc_supports_guc_submission ( & guc_to_gt ( guc ) - > uc ) ) {
2018-06-04 16:19:45 +02:00
u32 ctxnum , base ;
base = intel_guc_ggtt_offset ( guc , guc - > stage_desc_pool ) ;
ctxnum = GUC_MAX_STAGE_DESCRIPTORS / 16 ;
base > > = PAGE_SHIFT ;
flags | = ( base < < GUC_CTL_BASE_ADDR_SHIFT ) |
( ctxnum < < GUC_CTL_CTXNUM_IN16_SHIFT ) ;
}
return flags ;
}
2018-06-04 16:19:44 +02:00
static u32 guc_ctl_log_params_flags ( struct intel_guc * guc )
{
u32 offset = intel_guc_ggtt_offset ( guc , guc - > log . vma ) > > PAGE_SHIFT ;
u32 flags ;
2018-06-05 17:13:30 +02:00
# if (((CRASH_BUFFER_SIZE) % SZ_1M) == 0)
# define UNIT SZ_1M
# define FLAG GUC_LOG_ALLOC_IN_MEGABYTE
# else
2018-06-05 17:13:29 +02:00
# define UNIT SZ_4K
2018-06-05 17:13:30 +02:00
# define FLAG 0
# endif
2018-06-05 17:13:29 +02:00
BUILD_BUG_ON ( ! CRASH_BUFFER_SIZE ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( CRASH_BUFFER_SIZE , UNIT ) ) ;
BUILD_BUG_ON ( ! DPC_BUFFER_SIZE ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( DPC_BUFFER_SIZE , UNIT ) ) ;
BUILD_BUG_ON ( ! ISR_BUFFER_SIZE ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( ISR_BUFFER_SIZE , UNIT ) ) ;
BUILD_BUG_ON ( ( CRASH_BUFFER_SIZE / UNIT - 1 ) >
( GUC_LOG_CRASH_MASK > > GUC_LOG_CRASH_SHIFT ) ) ;
BUILD_BUG_ON ( ( DPC_BUFFER_SIZE / UNIT - 1 ) >
( GUC_LOG_DPC_MASK > > GUC_LOG_DPC_SHIFT ) ) ;
BUILD_BUG_ON ( ( ISR_BUFFER_SIZE / UNIT - 1 ) >
( GUC_LOG_ISR_MASK > > GUC_LOG_ISR_SHIFT ) ) ;
flags = GUC_LOG_VALID |
GUC_LOG_NOTIFY_ON_HALF_FULL |
2018-06-05 17:13:30 +02:00
FLAG |
2018-06-05 17:13:29 +02:00
( ( CRASH_BUFFER_SIZE / UNIT - 1 ) < < GUC_LOG_CRASH_SHIFT ) |
( ( DPC_BUFFER_SIZE / UNIT - 1 ) < < GUC_LOG_DPC_SHIFT ) |
( ( ISR_BUFFER_SIZE / UNIT - 1 ) < < GUC_LOG_ISR_SHIFT ) |
2018-06-04 16:19:44 +02:00
( offset < < GUC_LOG_BUF_ADDR_SHIFT ) ;
2018-06-05 17:13:29 +02:00
# undef UNIT
2018-06-05 17:13:30 +02:00
# undef FLAG
2018-06-05 17:13:29 +02:00
2018-06-04 16:19:44 +02:00
return flags ;
}
2019-05-27 18:35:59 +00:00
static u32 guc_ctl_ads_flags ( struct intel_guc * guc )
{
u32 ads = intel_guc_ggtt_offset ( guc , guc - > ads_vma ) > > PAGE_SHIFT ;
u32 flags = ads < < GUC_ADS_ADDR_SHIFT ;
return flags ;
}
2017-10-16 14:47:11 +00:00
/*
* Initialise the GuC parameter block before starting the firmware
* transfer . These parameters are read by the firmware on startup
* and cannot be changed thereafter .
*/
2019-07-24 09:58:49 +01:00
static void guc_init_params ( struct intel_guc * guc )
2017-10-16 14:47:11 +00:00
{
2019-07-24 09:58:49 +01:00
u32 * params = guc - > params ;
2017-10-16 14:47:11 +00:00
int i ;
2019-07-24 09:58:49 +01:00
BUILD_BUG_ON ( sizeof ( guc - > params ) ! = GUC_CTL_MAX_DWORDS * sizeof ( u32 ) ) ;
2017-10-16 14:47:11 +00:00
2019-05-27 18:35:59 +00:00
params [ GUC_CTL_CTXINFO ] = guc_ctl_ctxinfo_flags ( guc ) ;
params [ GUC_CTL_LOG_PARAMS ] = guc_ctl_log_params_flags ( guc ) ;
2018-06-04 16:19:43 +02:00
params [ GUC_CTL_FEATURE ] = guc_ctl_feature_flags ( guc ) ;
2018-06-04 16:19:41 +02:00
params [ GUC_CTL_DEBUG ] = guc_ctl_debug_flags ( guc ) ;
2019-05-27 18:35:59 +00:00
params [ GUC_CTL_ADS ] = guc_ctl_ads_flags ( guc ) ;
2017-10-16 14:47:11 +00:00
2018-06-18 11:18:20 +00:00
for ( i = 0 ; i < GUC_CTL_MAX_DWORDS ; i + + )
DRM_DEBUG_DRIVER ( " param[%2d] = %#x \n " , i , params [ i ] ) ;
2019-07-24 09:58:49 +01:00
}
/*
* Initialise the GuC parameter block before starting the firmware
* transfer . These parameters are read by the firmware on startup
* and cannot be changed thereafter .
*/
void intel_guc_write_params ( struct intel_guc * guc )
{
struct intel_uncore * uncore = guc_to_gt ( guc ) - > uncore ;
int i ;
2018-06-18 11:18:20 +00:00
2017-10-16 14:47:11 +00:00
/*
* All SOFT_SCRATCH registers are in FORCEWAKE_BLITTER domain and
* they are power context saved so it ' s ok to release forcewake
* when we are done here and take it again at xfer time .
*/
2019-07-13 11:00:14 +01:00
intel_uncore_forcewake_get ( uncore , FORCEWAKE_BLITTER ) ;
2017-10-16 14:47:11 +00:00
2019-07-13 11:00:14 +01:00
intel_uncore_write ( uncore , SOFT_SCRATCH ( 0 ) , 0 ) ;
2017-10-16 14:47:11 +00:00
for ( i = 0 ; i < GUC_CTL_MAX_DWORDS ; i + + )
2019-07-24 09:58:49 +01:00
intel_uncore_write ( uncore , SOFT_SCRATCH ( 1 + i ) , guc - > params [ i ] ) ;
2017-10-16 14:47:11 +00:00
2019-07-13 11:00:14 +01:00
intel_uncore_forcewake_put ( uncore , FORCEWAKE_BLITTER ) ;
2017-10-16 14:47:11 +00:00
}
2019-07-24 09:58:49 +01:00
int intel_guc_init ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
int ret ;
ret = intel_uc_fw_init ( & guc - > fw ) ;
if ( ret )
goto err_fetch ;
ret = guc_shared_data_create ( guc ) ;
if ( ret )
goto err_fw ;
GEM_BUG_ON ( ! guc - > shared_data ) ;
ret = intel_guc_log_create ( & guc - > log ) ;
if ( ret )
goto err_shared ;
ret = intel_guc_ads_create ( guc ) ;
if ( ret )
goto err_log ;
GEM_BUG_ON ( ! guc - > ads_vma ) ;
ret = intel_guc_ct_init ( & guc - > ct ) ;
if ( ret )
goto err_ads ;
2019-07-31 22:33:18 +00:00
if ( intel_uc_supports_guc_submission ( & gt - > uc ) ) {
2019-07-25 10:46:55 -07:00
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
*/
ret = intel_guc_submission_init ( guc ) ;
if ( ret )
goto err_ct ;
}
2019-07-24 09:58:49 +01:00
/* now that everything is perma-pinned, initialize the parameters */
guc_init_params ( guc ) ;
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc ( gt - > ggtt ) ;
return 0 ;
2019-07-25 10:46:55 -07:00
err_ct :
intel_guc_ct_fini ( & guc - > ct ) ;
2019-07-24 09:58:49 +01:00
err_ads :
intel_guc_ads_destroy ( guc ) ;
err_log :
intel_guc_log_destroy ( & guc - > log ) ;
err_shared :
guc_shared_data_destroy ( guc ) ;
err_fw :
intel_uc_fw_fini ( & guc - > fw ) ;
err_fetch :
intel_uc_fw_cleanup_fetch ( & guc - > fw ) ;
return ret ;
}
void intel_guc_fini ( struct intel_guc * guc )
{
struct intel_gt * gt = guc_to_gt ( guc ) ;
i915_ggtt_disable_guc ( gt - > ggtt ) ;
2019-07-31 22:33:18 +00:00
if ( intel_uc_supports_guc_submission ( & gt - > uc ) )
2019-07-25 10:46:55 -07:00
intel_guc_submission_fini ( guc ) ;
2019-07-24 09:58:49 +01:00
intel_guc_ct_fini ( & guc - > ct ) ;
intel_guc_ads_destroy ( guc ) ;
intel_guc_log_destroy ( & guc - > log ) ;
guc_shared_data_destroy ( guc ) ;
intel_uc_fw_fini ( & guc - > fw ) ;
intel_uc_fw_cleanup_fetch ( & guc - > fw ) ;
}
2018-03-26 19:48:20 +00:00
int intel_guc_send_nop ( struct intel_guc * guc , const u32 * action , u32 len ,
u32 * response_buf , u32 response_buf_size )
2017-10-04 18:13:41 +00:00
{
WARN ( 1 , " Unexpected send: action=%#x \n " , * action ) ;
return - ENODEV ;
}
2018-03-26 19:48:22 +00:00
void intel_guc_to_host_event_handler_nop ( struct intel_guc * guc )
{
WARN ( 1 , " Unexpected event: no suitable handler \n " ) ;
}
2017-10-04 18:13:41 +00:00
/*
* This function implements the MMIO based host to GuC interface .
*/
2018-03-26 19:48:20 +00:00
int intel_guc_send_mmio ( struct intel_guc * guc , const u32 * action , u32 len ,
u32 * response_buf , u32 response_buf_size )
2017-10-04 18:13:41 +00:00
{
2019-07-13 11:00:14 +01:00
struct intel_uncore * uncore = guc_to_gt ( guc ) - > uncore ;
2017-10-04 18:13:41 +00:00
u32 status ;
int i ;
int ret ;
GEM_BUG_ON ( ! len ) ;
GEM_BUG_ON ( len > guc - > send_regs . count ) ;
2018-03-26 19:48:18 +00:00
/* We expect only action code */
GEM_BUG_ON ( * action & ~ INTEL_GUC_MSG_CODE_MASK ) ;
2017-10-04 18:13:41 +00:00
/* If CT is available, we expect to use MMIO only during init/fini */
2019-06-06 15:42:24 -07:00
GEM_BUG_ON ( * action ! = INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER & &
* action ! = INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER ) ;
2017-10-04 18:13:41 +00:00
mutex_lock ( & guc - > send_mutex ) ;
2019-03-25 14:49:38 -07:00
intel_uncore_forcewake_get ( uncore , guc - > send_regs . fw_domains ) ;
2017-10-04 18:13:41 +00:00
for ( i = 0 ; i < len ; i + + )
2019-03-25 14:49:38 -07:00
intel_uncore_write ( uncore , guc_send_reg ( guc , i ) , action [ i ] ) ;
2017-10-04 18:13:41 +00:00
2019-03-25 14:49:38 -07:00
intel_uncore_posting_read ( uncore , guc_send_reg ( guc , i - 1 ) ) ;
2017-10-04 18:13:41 +00:00
intel_guc_notify ( guc ) ;
/*
* No GuC command should ever take longer than 10 ms .
* Fast commands should still complete in 10u s .
*/
2019-03-25 14:49:38 -07:00
ret = __intel_wait_for_register_fw ( uncore ,
2017-10-04 18:13:41 +00:00
guc_send_reg ( guc , 0 ) ,
2018-03-26 19:48:18 +00:00
INTEL_GUC_MSG_TYPE_MASK ,
INTEL_GUC_MSG_TYPE_RESPONSE < <
INTEL_GUC_MSG_TYPE_SHIFT ,
2017-10-04 18:13:41 +00:00
10 , 10 , & status ) ;
2018-03-26 19:48:18 +00:00
/* If GuC explicitly returned an error, convert it to -EIO */
if ( ! ret & & ! INTEL_GUC_MSG_IS_RESPONSE_SUCCESS ( status ) )
ret = - EIO ;
2017-10-04 18:13:41 +00:00
2018-03-26 19:48:18 +00:00
if ( ret ) {
2018-05-28 17:16:18 +00:00
DRM_ERROR ( " MMIO: GuC action %#x failed with error %d %#x \n " ,
action [ 0 ] , ret , status ) ;
2018-03-26 19:48:21 +00:00
goto out ;
2017-10-04 18:13:41 +00:00
}
2018-03-26 19:48:21 +00:00
if ( response_buf ) {
int count = min ( response_buf_size , guc - > send_regs . count - 1 ) ;
for ( i = 0 ; i < count ; i + + )
2019-07-13 11:00:14 +01:00
response_buf [ i ] = intel_uncore_read ( uncore ,
guc_send_reg ( guc , i + 1 ) ) ;
2018-03-26 19:48:21 +00:00
}
/* Use data from the GuC response as our return value */
ret = INTEL_GUC_MSG_TO_DATA ( status ) ;
out :
2019-03-25 14:49:38 -07:00
intel_uncore_forcewake_put ( uncore , guc - > send_regs . fw_domains ) ;
2017-10-04 18:13:41 +00:00
mutex_unlock ( & guc - > send_mutex ) ;
return ret ;
}
2019-03-21 12:00:04 +00:00
int intel_guc_to_host_process_recv_msg ( struct intel_guc * guc ,
const u32 * payload , u32 len )
2018-03-27 21:41:24 +00:00
{
2019-03-21 12:00:04 +00:00
u32 msg ;
if ( unlikely ( ! len ) )
return - EPROTO ;
2018-03-27 21:41:24 +00:00
/* Make sure to handle only enabled messages */
2019-03-21 12:00:04 +00:00
msg = payload [ 0 ] & guc - > msg_enabled_mask ;
2018-03-27 21:41:24 +00:00
2018-03-19 10:53:36 +01:00
if ( msg & ( INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
2018-03-19 10:53:44 +01:00
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED ) )
2018-03-19 12:50:49 +00:00
intel_guc_log_handle_flush_event ( & guc - > log ) ;
2019-03-21 12:00:04 +00:00
return 0 ;
2018-03-08 16:46:55 +01:00
}
2017-10-04 18:13:41 +00:00
int intel_guc_sample_forcewake ( struct intel_guc * guc )
{
2019-07-13 11:00:16 +01:00
struct drm_i915_private * dev_priv = guc_to_gt ( guc ) - > i915 ;
2017-10-04 18:13:41 +00:00
u32 action [ 2 ] ;
action [ 0 ] = INTEL_GUC_ACTION_SAMPLE_FORCEWAKE ;
2018-02-22 12:05:35 -08:00
/* WaRsDisableCoarsePowerGating:skl,cnl */
2017-12-01 11:30:30 +00:00
if ( ! HAS_RC6 ( dev_priv ) | | NEEDS_WaRsDisableCoarsePowerGating ( dev_priv ) )
2017-10-04 18:13:41 +00:00
action [ 1 ] = 0 ;
else
/* bit 0 and 1 are for Render and Media domain separately */
action [ 1 ] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA ;
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
}
/**
* intel_guc_auth_huc ( ) - Send action to GuC to authenticate HuC ucode
* @ guc : intel_guc structure
* @ rsa_offset : rsa offset w . r . t ggtt base of huc vma
*
* Triggers a HuC firmware authentication request to the GuC via intel_guc_send
* INTEL_GUC_ACTION_AUTHENTICATE_HUC interface . This function is invoked by
* intel_huc_auth ( ) .
*
* Return : non - zero code on error
*/
int intel_guc_auth_huc ( struct intel_guc * guc , u32 rsa_offset )
{
u32 action [ ] = {
INTEL_GUC_ACTION_AUTHENTICATE_HUC ,
rsa_offset
} ;
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
}
2019-05-27 18:35:59 +00:00
/**
* intel_guc_suspend ( ) - notify GuC entering suspend state
* @ guc : the guc
2018-10-16 15:46:47 -07:00
*/
2019-05-27 18:35:59 +00:00
int intel_guc_suspend ( struct intel_guc * guc )
2018-10-16 15:46:47 -07:00
{
2019-07-13 11:00:14 +01:00
struct intel_uncore * uncore = guc_to_gt ( guc ) - > uncore ;
2018-10-16 15:46:47 -07:00
int ret ;
u32 status ;
2019-05-27 18:35:59 +00:00
u32 action [ ] = {
INTEL_GUC_ACTION_ENTER_S_STATE ,
GUC_POWER_D1 , /* any value greater than GUC_POWER_D0 */
} ;
/*
* The ENTER_S_STATE action queues the save / restore operation in GuC FW
* and then returns , so waiting on the H2G is not enough to guarantee
* GuC is done . When all the processing is done , GuC writes
* INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14 , so we can poll
* on that . Note that GuC does not ensure that the value in the register
* is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
* in progress so we need to take care of that ourselves as well .
*/
2018-10-16 15:46:47 -07:00
2019-07-13 11:00:14 +01:00
intel_uncore_write ( uncore , SOFT_SCRATCH ( 14 ) ,
INTEL_GUC_SLEEP_STATE_INVALID_MASK ) ;
2018-10-16 15:46:47 -07:00
2019-05-27 18:35:59 +00:00
ret = intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
2018-10-16 15:46:47 -07:00
if ( ret )
return ret ;
2019-07-13 11:00:14 +01:00
ret = __intel_wait_for_register ( uncore , SOFT_SCRATCH ( 14 ) ,
2018-10-16 15:46:47 -07:00
INTEL_GUC_SLEEP_STATE_INVALID_MASK ,
0 , 0 , 10 , & status ) ;
if ( ret )
return ret ;
if ( status ! = INTEL_GUC_SLEEP_STATE_SUCCESS ) {
DRM_ERROR ( " GuC failed to change sleep state. "
" action=0x%x, err=%u \n " ,
action [ 0 ] , status ) ;
return - EIO ;
}
return 0 ;
}
2017-10-31 15:53:09 -07:00
/**
* intel_guc_reset_engine ( ) - ask GuC to reset an engine
* @ guc : intel_guc structure
* @ engine : engine to be reset
*/
int intel_guc_reset_engine ( struct intel_guc * guc ,
struct intel_engine_cs * engine )
{
u32 data [ 7 ] ;
GEM_BUG_ON ( ! guc - > execbuf_client ) ;
data [ 0 ] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET ;
data [ 1 ] = engine - > guc_id ;
data [ 2 ] = 0 ;
data [ 3 ] = 0 ;
data [ 4 ] = 0 ;
data [ 5 ] = guc - > execbuf_client - > stage_id ;
2018-03-13 17:32:49 -07:00
data [ 6 ] = intel_guc_ggtt_offset ( guc , guc - > shared_data ) ;
2017-10-31 15:53:09 -07:00
return intel_guc_send ( guc , data , ARRAY_SIZE ( data ) ) ;
}
2017-10-04 18:13:41 +00:00
/**
* intel_guc_resume ( ) - notify GuC resuming from suspend state
2018-03-02 11:15:49 +00:00
* @ guc : the guc
2017-10-04 18:13:41 +00:00
*/
2018-03-02 11:15:49 +00:00
int intel_guc_resume ( struct intel_guc * guc )
2017-10-04 18:13:41 +00:00
{
2019-05-27 18:35:59 +00:00
u32 action [ ] = {
2018-03-02 11:15:49 +00:00
INTEL_GUC_ACTION_EXIT_S_STATE ,
GUC_POWER_D0 ,
} ;
2017-10-04 18:13:41 +00:00
2019-05-27 18:35:59 +00:00
return intel_guc_send ( guc , action , ARRAY_SIZE ( action ) ) ;
2017-10-04 18:13:41 +00:00
}
2018-03-13 17:32:50 -07:00
/**
* DOC : GuC Address Space
*
2018-03-22 16:59:22 -07:00
* The layout of GuC address space is shown below :
2018-03-13 17:32:50 -07:00
*
2018-03-22 16:59:22 -07:00
* : :
2018-03-13 17:32:50 -07:00
*
2018-07-27 16:11:43 +02:00
* + = = = = = = = = = = = > + = = = = = = = = = = = = = = = = = = = = + < = = FFFF_FFFF
* ^ | Reserved |
* | + = = = = = = = = = = = = = = = = = = = = + < = = GUC_GGTT_TOP
* | | |
* | | DRAM |
* GuC | |
* Address + = = = > + = = = = = = = = = = = = = = = = = = = = + < = = GuC ggtt_pin_bias
* Space ^ | |
* | | | |
* | GuC | GuC |
* | WOPCM | WOPCM |
* | Size | |
* | | | |
* v v | |
* + = = = = = = = + = = = > + = = = = = = = = = = = = = = = = = = = = + < = = 0000 _0000
2018-03-22 16:59:22 -07:00
*
2018-07-27 16:11:43 +02:00
* The lower part of GuC Address Space [ 0 , ggtt_pin_bias ) is mapped to GuC WOPCM
2018-03-22 16:59:22 -07:00
* while upper part of GuC Address Space [ ggtt_pin_bias , GUC_GGTT_TOP ) is mapped
2018-07-27 16:11:43 +02:00
* to DRAM . The value of the GuC ggtt_pin_bias is the GuC WOPCM size .
2018-03-13 17:32:50 -07:00
*/
2017-10-04 18:13:41 +00:00
/**
* intel_guc_allocate_vma ( ) - Allocate a GGTT VMA for GuC usage
* @ guc : the guc
* @ size : size of area to allocate ( both virtual space and memory )
*
* This is a wrapper to create an object for use with the GuC . In order to
* use it inside the GuC , an object needs to be pinned lifetime , so we allocate
* both some backing storage and a range inside the Global GTT . We must pin
2018-03-13 17:32:50 -07:00
* it in the GGTT somewhere other than than [ 0 , GUC ggtt_pin_bias ) because that
2017-10-04 18:13:41 +00:00
* range is reserved inside GuC .
*
* Return : A i915_vma if successful , otherwise an ERR_PTR .
*/
struct i915_vma * intel_guc_allocate_vma ( struct intel_guc * guc , u32 size )
{
2019-07-13 11:00:14 +01:00
struct intel_gt * gt = guc_to_gt ( guc ) ;
2017-10-04 18:13:41 +00:00
struct drm_i915_gem_object * obj ;
struct i915_vma * vma ;
2018-07-27 16:11:45 +02:00
u64 flags ;
2017-10-04 18:13:41 +00:00
int ret ;
2019-07-13 11:00:14 +01:00
obj = i915_gem_object_create_shmem ( gt - > i915 , size ) ;
2017-10-04 18:13:41 +00:00
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
2019-07-13 11:00:14 +01:00
vma = i915_vma_instance ( obj , & gt - > ggtt - > vm , NULL ) ;
2017-10-04 18:13:41 +00:00
if ( IS_ERR ( vma ) )
goto err ;
2018-07-27 16:11:45 +02:00
flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias ( vma ) ;
ret = i915_vma_pin ( vma , 0 , 0 , flags ) ;
2017-10-04 18:13:41 +00:00
if ( ret ) {
vma = ERR_PTR ( ret ) ;
goto err ;
}
return vma ;
err :
i915_gem_object_put ( obj ) ;
return vma ;
}