2019-08-12 09:29:35 +00:00
/* SPDX-License-Identifier: MIT */
2015-07-09 19:29:04 +01:00
/*
2019-08-12 09:29:35 +00:00
* Copyright © 2014 - 2019 Intel Corporation
2015-07-09 19:29:04 +01:00
*/
2019-08-12 09:29:35 +00:00
2015-07-09 19:29:04 +01:00
# ifndef _INTEL_GUC_FWIF_H
# define _INTEL_GUC_FWIF_H
2019-06-26 17:40:16 +03:00
# include <linux/bits.h>
# include <linux/compiler.h>
# include <linux/types.h>
2021-06-02 22:16:30 -07:00
# include "gt/intel_engine_types.h"
2019-06-26 17:40:16 +03:00
2021-06-02 22:16:15 -07:00
# include "abi/guc_actions_abi.h"
2021-07-30 13:21:08 -07:00
# include "abi/guc_actions_slpc_abi.h"
2021-06-02 22:16:15 -07:00
# include "abi/guc_errors_abi.h"
# include "abi/guc_communication_mmio_abi.h"
# include "abi/guc_communication_ctb_abi.h"
2022-01-06 16:06:21 -08:00
# include "abi/guc_klvs_abi.h"
2021-06-02 22:16:15 -07:00
# include "abi/guc_messages_abi.h"
2021-07-21 14:50:57 -07:00
/* Payload length only i.e. don't include G2H header length */
# define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET 2
# define G2H_LEN_DW_DEREGISTER_CONTEXT 1
2021-07-21 14:50:44 -07:00
# define GUC_CONTEXT_DISABLE 0
# define GUC_CONTEXT_ENABLE 1
2017-03-22 10:39:53 -07:00
# define GUC_CLIENT_PRIORITY_KMD_HIGH 0
# define GUC_CLIENT_PRIORITY_HIGH 1
# define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
# define GUC_CLIENT_PRIORITY_NORMAL 3
# define GUC_CLIENT_PRIORITY_NUM 4
2015-07-09 19:29:04 +01:00
2021-07-21 14:50:44 -07:00
# define GUC_MAX_LRC_DESCRIPTORS 65535
# define GUC_INVALID_LRC_ID GUC_MAX_LRC_DESCRIPTORS
2016-01-23 11:58:14 -08:00
# define GUC_RENDER_ENGINE 0
# define GUC_VIDEO_ENGINE 1
# define GUC_BLITTER_ENGINE 2
# define GUC_VIDEOENHANCE_ENGINE 3
# define GUC_VIDEO_ENGINE2 4
# define GUC_MAX_ENGINES_NUM (GUC_VIDEO_ENGINE2 + 1)
2021-06-02 22:16:30 -07:00
# define GUC_RENDER_CLASS 0
# define GUC_VIDEO_CLASS 1
# define GUC_VIDEOENHANCE_CLASS 2
# define GUC_BLITTER_CLASS 3
# define GUC_RESERVED_CLASS 4
# define GUC_LAST_ENGINE_CLASS GUC_RESERVED_CLASS
2020-10-28 07:58:24 -07:00
# define GUC_MAX_ENGINE_CLASSES 16
# define GUC_MAX_INSTANCES_PER_CLASS 32
2019-05-27 18:35:59 +00:00
2018-10-22 16:04:24 -07:00
# define GUC_DOORBELL_INVALID 256
2021-10-14 10:19:52 -07:00
/*
* Work queue item header definitions
*
* Work queue is circular buffer used to submit complex ( multi - lrc ) submissions
* to the GuC . A work queue item is an entry in the circular buffer .
*/
2015-07-09 19:29:04 +01:00
# define WQ_STATUS_ACTIVE 1
# define WQ_STATUS_SUSPENDED 2
# define WQ_STATUS_CMD_ERROR 3
# define WQ_STATUS_ENGINE_ID_NOT_USED 4
# define WQ_STATUS_SUSPENDED_FROM_RESET 5
2021-10-14 10:19:52 -07:00
# define WQ_TYPE_BATCH_BUF 0x1
# define WQ_TYPE_PSEUDO 0x2
# define WQ_TYPE_INORDER 0x3
# define WQ_TYPE_NOOP 0x4
# define WQ_TYPE_MULTI_LRC 0x5
# define WQ_TYPE_MASK GENMASK(7, 0)
# define WQ_LEN_MASK GENMASK(26, 16)
# define WQ_GUC_ID_MASK GENMASK(15, 0)
# define WQ_RING_TAIL_MASK GENMASK(28, 18)
2015-07-09 19:29:04 +01:00
2017-03-22 10:39:53 -07:00
# define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
# define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
# define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
# define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3)
# define GUC_STAGE_DESC_ATTR_RESET BIT(4)
# define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5)
# define GUC_STAGE_DESC_ATTR_PCH BIT(6)
# define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
2015-07-09 19:29:04 +01:00
2020-10-28 07:58:24 -07:00
# define GUC_CTL_LOG_PARAMS 0
2022-01-06 16:06:21 -08:00
# define GUC_LOG_VALID BIT(0)
# define GUC_LOG_NOTIFY_ON_HALF_FULL BIT(1)
# define GUC_LOG_CAPTURE_ALLOC_UNITS BIT(2)
# define GUC_LOG_LOG_ALLOC_UNITS BIT(3)
2015-07-09 19:29:04 +01:00
# define GUC_LOG_CRASH_SHIFT 4
2019-05-27 18:35:59 +00:00
# define GUC_LOG_CRASH_MASK (0x3 << GUC_LOG_CRASH_SHIFT)
2021-06-15 17:13:01 -07:00
# define GUC_LOG_DEBUG_SHIFT 6
# define GUC_LOG_DEBUG_MASK (0xF << GUC_LOG_DEBUG_SHIFT)
2022-01-06 16:06:21 -08:00
# define GUC_LOG_CAPTURE_SHIFT 10
# define GUC_LOG_CAPTURE_MASK (0x3 << GUC_LOG_CAPTURE_SHIFT)
2015-07-09 19:29:04 +01:00
# define GUC_LOG_BUF_ADDR_SHIFT 12
2015-12-18 12:00:09 -08:00
2020-10-28 07:58:24 -07:00
# define GUC_CTL_WA 1
2022-01-06 16:06:21 -08:00
# define GUC_WA_POLLCS BIT(18)
2020-10-28 07:58:24 -07:00
# define GUC_CTL_FEATURE 2
2021-07-30 13:21:08 -07:00
# define GUC_CTL_ENABLE_SLPC BIT(2)
2022-01-06 16:06:21 -08:00
# define GUC_CTL_DISABLE_SCHEDULER BIT(14)
2015-12-18 12:00:09 -08:00
2020-10-28 07:58:24 -07:00
# define GUC_CTL_DEBUG 3
2015-07-09 19:29:04 +01:00
# define GUC_LOG_VERBOSITY_SHIFT 0
# define GUC_LOG_VERBOSITY_LOW (0 << GUC_LOG_VERBOSITY_SHIFT)
# define GUC_LOG_VERBOSITY_MED (1 << GUC_LOG_VERBOSITY_SHIFT)
# define GUC_LOG_VERBOSITY_HIGH (2 << GUC_LOG_VERBOSITY_SHIFT)
# define GUC_LOG_VERBOSITY_ULTRA (3 << GUC_LOG_VERBOSITY_SHIFT)
/* Verbosity range-check limits, without the shift */
# define GUC_LOG_VERBOSITY_MIN 0
# define GUC_LOG_VERBOSITY_MAX 3
2015-12-18 12:00:09 -08:00
# define GUC_LOG_VERBOSITY_MASK 0x0000000f
# define GUC_LOG_DESTINATION_MASK (3 << 4)
# define GUC_LOG_DISABLED (1 << 6)
# define GUC_PROFILE_ENABLED (1 << 7)
2020-10-28 07:58:24 -07:00
# define GUC_CTL_ADS 4
2019-05-27 18:35:59 +00:00
# define GUC_ADS_ADDR_SHIFT 1
# define GUC_ADS_ADDR_MASK (0xFFFFF << GUC_ADS_ADDR_SHIFT)
2015-07-09 19:29:04 +01:00
2022-01-06 16:06:21 -08:00
# define GUC_CTL_DEVID 5
2015-12-18 12:00:09 -08:00
# define GUC_CTL_MAX_DWORDS (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
2015-07-09 19:29:04 +01:00
2020-10-28 07:58:24 -07:00
/* Generic GT SysInfo data types */
# define GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED 0
# define GUC_GENERIC_GT_SYSINFO_VDBOX_SFC_SUPPORT_MASK 1
# define GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI 2
# define GUC_GENERIC_GT_SYSINFO_MAX 16
/*
* The class goes in bits [ 0. .2 ] of the GuC ID , the instance in bits [ 3. .6 ] .
* Bit 7 can be used for operations that apply to all engine classes & instances .
*/
# define GUC_ENGINE_CLASS_SHIFT 0
# define GUC_ENGINE_CLASS_MASK (0x7 << GUC_ENGINE_CLASS_SHIFT)
# define GUC_ENGINE_INSTANCE_SHIFT 3
# define GUC_ENGINE_INSTANCE_MASK (0xf << GUC_ENGINE_INSTANCE_SHIFT)
# define GUC_ENGINE_ALL_INSTANCES BIT(7)
# define MAKE_GUC_ID(class, instance) \
( ( ( class ) < < GUC_ENGINE_CLASS_SHIFT ) | \
( ( instance ) < < GUC_ENGINE_INSTANCE_SHIFT ) )
# define GUC_ID_TO_ENGINE_CLASS(guc_id) \
( ( ( guc_id ) & GUC_ENGINE_CLASS_MASK ) > > GUC_ENGINE_CLASS_SHIFT )
# define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
( ( ( guc_id ) & GUC_ENGINE_INSTANCE_MASK ) > > GUC_ENGINE_INSTANCE_SHIFT )
2021-07-30 13:21:08 -07:00
# define SLPC_EVENT(id, c) (\
FIELD_PREP ( HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID , id ) | \
FIELD_PREP ( HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC , c ) \
)
2021-06-02 22:16:30 -07:00
static inline u8 engine_class_to_guc_class ( u8 class )
{
BUILD_BUG_ON ( GUC_RENDER_CLASS ! = RENDER_CLASS ) ;
BUILD_BUG_ON ( GUC_BLITTER_CLASS ! = COPY_ENGINE_CLASS ) ;
BUILD_BUG_ON ( GUC_VIDEO_CLASS ! = VIDEO_DECODE_CLASS ) ;
BUILD_BUG_ON ( GUC_VIDEOENHANCE_CLASS ! = VIDEO_ENHANCEMENT_CLASS ) ;
GEM_BUG_ON ( class > MAX_ENGINE_CLASS | | class = = OTHER_CLASS ) ;
return class ;
}
static inline u8 guc_class_to_engine_class ( u8 guc_class )
{
GEM_BUG_ON ( guc_class > GUC_LAST_ENGINE_CLASS ) ;
GEM_BUG_ON ( guc_class = = GUC_RESERVED_CLASS ) ;
return guc_class ;
}
2015-07-09 19:29:04 +01:00
/* Work item for submitting workloads into work queue of GuC. */
struct guc_wq_item {
u32 header ;
u32 context_desc ;
2017-03-22 10:39:50 -07:00
u32 submit_element_info ;
2015-07-09 19:29:04 +01:00
u32 fence_id ;
} __packed ;
struct guc_process_desc {
2017-03-22 10:39:53 -07:00
u32 stage_id ;
2015-07-09 19:29:04 +01:00
u64 db_base_addr ;
u32 head ;
u32 tail ;
u32 error_offset ;
u64 wq_base_addr ;
u32 wq_size_bytes ;
u32 wq_status ;
u32 engine_presence ;
u32 priority ;
2021-10-14 10:19:59 -07:00
u32 reserved [ 36 ] ;
2015-07-09 19:29:04 +01:00
} __packed ;
2021-07-21 14:50:44 -07:00
# define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
# define CONTEXT_POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
# define CONTEXT_POLICY_DEFAULT_PREEMPTION_TIME_US 500000
/* Preempt to idle on quantum expiry */
# define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE BIT(0)
/*
* GuC Context registration descriptor .
* FIXME : This is only required to exist during context registration .
* The current 1 : 1 between guc_lrc_desc and LRCs for the lifetime of the LRC
* is not required .
*/
struct guc_lrc_desc {
u32 hw_context_desc ;
u32 slpm_perf_mode_hint ; /* SPLC v1 only */
u32 slpm_freq_hint ;
u32 engine_submit_mask ; /* In logical space */
u8 engine_class ;
u8 reserved0 [ 3 ] ;
u32 priority ;
u32 process_desc ;
u32 wq_addr ;
u32 wq_size ;
u32 context_flags ; /* CONTEXT_REGISTRATION_* */
/* Time for one workload to execute. (in micro seconds) */
u32 execution_quantum ;
/* Time to wait for a preemption request to complete before issuing a
* reset . ( in micro seconds ) .
*/
u32 preemption_timeout ;
u32 policy_flags ; /* CONTEXT_POLICY_* */
u32 reserved1 [ 19 ] ;
} __packed ;
2015-09-30 09:46:37 -07:00
# define GUC_POWER_UNSPECIFIED 0
# define GUC_POWER_D0 1
# define GUC_POWER_D1 2
# define GUC_POWER_D2 3
# define GUC_POWER_D3 4
2015-12-18 12:00:10 -08:00
/* Scheduling policy settings */
2021-06-15 17:13:01 -07:00
# define GLOBAL_POLICY_MAX_NUM_WI 15
/* Don't reset an engine upon preemption failure */
# define GLOBAL_POLICY_DISABLE_ENGINE_RESET BIT(0)
# define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
2015-12-18 12:00:10 -08:00
struct guc_policies {
2019-05-27 18:35:59 +00:00
u32 submission_queue_depth [ GUC_MAX_ENGINE_CLASSES ] ;
2015-12-18 12:00:10 -08:00
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt ( to prevent DPC queue drain starving ) .
* Typically 1000 s of micro seconds ( example only , not granularity ) . */
u32 dpc_promote_time ;
/* Must be set to take these new values. */
u32 is_valid ;
/* Max number of WIs to process per call. A large value may keep CS
* idle . */
u32 max_num_work_items ;
2021-06-15 17:13:01 -07:00
u32 global_flags ;
2019-05-27 18:35:59 +00:00
u32 reserved [ 4 ] ;
2015-12-18 12:00:10 -08:00
} __packed ;
2015-12-18 12:00:11 -08:00
/* GuC MMIO reg state struct */
2019-05-27 18:35:59 +00:00
struct guc_mmio_reg {
u32 offset ;
u32 value ;
u32 flags ;
2022-01-06 16:06:21 -08:00
u32 mask ;
# define GUC_REGSET_MASKED BIT(0)
# define GUC_REGSET_MASKED_WITH_VALUE BIT(2)
# define GUC_REGSET_RESTORE_ONLY BIT(3)
2019-05-27 18:35:59 +00:00
} __packed ;
2015-12-18 12:00:11 -08:00
2019-05-27 18:35:59 +00:00
/* GuC register sets */
2020-10-28 07:58:24 -07:00
struct guc_mmio_reg_set {
u32 address ;
u16 count ;
u16 reserved ;
2017-03-22 10:39:54 -07:00
} __packed ;
2019-05-27 18:35:59 +00:00
/* HW info */
struct guc_gt_system_info {
2020-10-28 07:58:24 -07:00
u8 mapping_table [ GUC_MAX_ENGINE_CLASSES ] [ GUC_MAX_INSTANCES_PER_CLASS ] ;
u32 engine_enabled_masks [ GUC_MAX_ENGINE_CLASSES ] ;
u32 generic_gt_sysinfo [ GUC_GENERIC_GT_SYSINFO_MAX ] ;
2015-12-18 12:00:11 -08:00
} __packed ;
2022-01-06 16:06:21 -08:00
enum {
GUC_CAPTURE_LIST_INDEX_PF = 0 ,
GUC_CAPTURE_LIST_INDEX_VF = 1 ,
GUC_CAPTURE_LIST_INDEX_MAX = 2 ,
} ;
2019-05-27 18:35:59 +00:00
/* GuC Additional Data Struct */
2015-12-18 12:00:09 -08:00
struct guc_ads {
2020-10-28 07:58:24 -07:00
struct guc_mmio_reg_set reg_state_list [ GUC_MAX_ENGINE_CLASSES ] [ GUC_MAX_INSTANCES_PER_CLASS ] ;
u32 reserved0 ;
2015-12-18 12:00:09 -08:00
u32 scheduler_policies ;
2019-05-27 18:35:59 +00:00
u32 gt_system_info ;
2021-06-15 17:13:01 -07:00
u32 reserved1 ;
2019-05-27 18:35:59 +00:00
u32 control_data ;
u32 golden_context_lrca [ GUC_MAX_ENGINE_CLASSES ] ;
u32 eng_state_size [ GUC_MAX_ENGINE_CLASSES ] ;
2020-10-28 07:58:24 -07:00
u32 private_data ;
2022-01-06 16:06:21 -08:00
u32 reserved2 ;
u32 capture_instance [ GUC_CAPTURE_LIST_INDEX_MAX ] [ GUC_MAX_ENGINE_CLASSES ] ;
u32 capture_class [ GUC_CAPTURE_LIST_INDEX_MAX ] [ GUC_MAX_ENGINE_CLASSES ] ;
u32 capture_global [ GUC_CAPTURE_LIST_INDEX_MAX ] ;
u32 reserved [ 14 ] ;
2015-12-18 12:00:09 -08:00
} __packed ;
drm/i915/pmu: Connect engine busyness stats from GuC to pmu
With GuC handling scheduling, i915 is not aware of the time that a
context is scheduled in and out of the engine. Since i915 pmu relies on
this info to provide engine busyness to the user, GuC shares this info
with i915 for all engines using shared memory. For each engine, this
info contains:
- total busyness: total time that the context was running (total)
- id: id of the running context (id)
- start timestamp: timestamp when the context started running (start)
At the time (now) of sampling the engine busyness, if the id is valid
(!= ~0), and start is non-zero, then the context is considered to be
active and the engine busyness is calculated using the below equation
engine busyness = total + (now - start)
All times are obtained from the gt clock base. For inactive contexts,
engine busyness is just equal to the total.
The start and total values provided by GuC are 32 bits and wrap around
in a few minutes. Since perf pmu provides busyness as 64 bit
monotonically increasing values, there is a need for this implementation
to account for overflows and extend the time to 64 bits before returning
busyness to the user. In order to do that, a worker runs periodically at
frequency = 1/8th the time it takes for the timestamp to wrap. As an
example, that would be once in 27 seconds for a gt clock frequency of
19.2 MHz.
Note:
There might be an over-accounting of busyness due to the fact that GuC
may be updating the total and start values while kmd is reading them.
(i.e kmd may read the updated total and the stale start). In such a
case, user may see higher busyness value followed by smaller ones which
would eventually catch up to the higher value.
v2: (Tvrtko)
- Include details in commit message
- Move intel engine busyness function into execlist code
- Use union inside engine->stats
- Use natural type for ping delay jiffies
- Drop active_work condition checks
- Use for_each_engine if iterating all engines
- Drop seq locking, use spinlock at GuC level to update engine stats
- Document worker specific details
v3: (Tvrtko/Umesh)
- Demarcate GuC and execlist stat objects with comments
- Document known over-accounting issue in commit
- Provide a consistent view of GuC state
- Add hooks to gt park/unpark for GuC busyness
- Stop/start worker in gt park/unpark path
- Drop inline
- Move spinlock and worker inits to GuC initialization
- Drop helpers that are called only once
v4: (Tvrtko/Matt/Umesh)
- Drop addressed opens from commit message
- Get runtime pm in ping, remove from the park path
- Use cancel_delayed_work_sync in disable_submission path
- Update stats during reset prepare
- Skip ping if reset in progress
- Explicitly name execlists and GuC stats objects
- Since disable_submission is called from many places, move resetting
stats to intel_guc_submission_reset_prepare
v5: (Tvrtko)
- Add a trylock helper that does not sleep and synchronize PMU event
callbacks and worker with gt reset
v6: (CI BAT failures)
- DUTs using execlist submission failed to boot since __gt_unpark is
called during i915 load. This ends up calling the GuC busyness unpark
hook and results in kick-starting an uninitialized worker. Let
park/unpark hooks check if GuC submission has been initialized.
- drop cant_sleep() from trylock helper since rcu_read_lock takes care
of that.
v7: (CI) Fix igt@i915_selftest@live@gt_engines
- For GuC mode of submission the engine busyness is derived from gt time
domain. Use gt time elapsed as reference in the selftest.
- Increase busyness calculation to 10ms duration to ensure batch runs
longer and falls within the busyness tolerances in selftest.
v8:
- Use ktime_get in selftest as before
- intel_reset_trylock_no_wait results in a lockdep splat that is not
trivial to fix since the PMU callback runs in irq context and the
reset paths are tightly knit into the driver. The test that uncovers
this is igt@perf_pmu@faulting-read. Drop intel_reset_trylock_no_wait,
instead use the reset_count to synchronize with gt reset during pmu
callback. For the ping, continue to use intel_reset_trylock since ping
is not run in irq context.
- GuC PM timestamp does not tick when GuC is idle. This can potentially
result in wrong busyness values when a context is active on the
engine, but GuC is idle. Use the RING TIMESTAMP as GPU timestamp to
process the GuC busyness stats. This works since both GuC timestamp and
RING timestamp are synced with the same clock.
- The busyness stats may get updated after the batch starts running.
This delay causes the busyness reported for 100us duration to fall
below 95% in the selftest. The only option at this time is to wait for
GuC busyness to change from idle to active before we sample busyness
over a 100us period.
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Umesh Nerlige Ramappa <umesh.nerlige.ramappa@intel.com>
Acked-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211027004821.66097-2-umesh.nerlige.ramappa@intel.com
2021-10-26 17:48:21 -07:00
/* Engine usage stats */
struct guc_engine_usage_record {
u32 current_context_index ;
u32 last_switch_in_stamp ;
u32 reserved0 ;
u32 total_runtime ;
u32 reserved1 [ 4 ] ;
} __packed ;
struct guc_engine_usage {
struct guc_engine_usage_record engines [ GUC_MAX_ENGINE_CLASSES ] [ GUC_MAX_INSTANCES_PER_CLASS ] ;
} __packed ;
2016-10-12 21:54:28 +05:30
/* GuC logging structures */
enum guc_log_buffer_type {
2021-06-15 17:13:01 -07:00
GUC_DEBUG_LOG_BUFFER ,
2016-10-12 21:54:28 +05:30
GUC_CRASH_DUMP_LOG_BUFFER ,
2022-01-06 16:06:21 -08:00
GUC_CAPTURE_LOG_BUFFER ,
2016-10-12 21:54:28 +05:30
GUC_MAX_LOG_BUFFER
} ;
/**
2019-06-05 12:56:57 +03:00
* struct guc_log_buffer_state - GuC log buffer state
*
2016-10-12 21:54:28 +05:30
* Below state structure is used for coordination of retrieval of GuC firmware
* logs . Separate state is maintained for each log buffer type .
* read_ptr points to the location where i915 read last in log buffer and
* is read only for GuC firmware . write_ptr is incremented by GuC with number
* of bytes written for each log entry and is read only for i915 .
* When any type of log buffer becomes half full , GuC sends a flush interrupt .
* GuC firmware expects that while it is writing to 2 nd half of the buffer ,
* first half would get consumed by Host and then get a flush completed
* acknowledgment from Host , so that it does not end up doing any overwrite
* causing loss of logs . So when buffer gets half filled & i915 has requested
* for interrupt , GuC will set flush_to_file field , set the sampled_write_ptr
* to the value of write_ptr and raise the interrupt .
* On receiving the interrupt i915 should read the buffer , clear flush_to_file
* field and also update read_ptr with the value of sample_write_ptr , before
* sending an acknowledgment to GuC . marker & version fields are for internal
* usage of GuC and opaque to i915 . buffer_full_cnt field is incremented every
* time GuC detects the log buffer overflow .
*/
struct guc_log_buffer_state {
u32 marker [ 2 ] ;
u32 read_ptr ;
u32 write_ptr ;
u32 size ;
u32 sampled_write_ptr ;
2022-01-06 16:06:21 -08:00
u32 wrap_offset ;
2016-10-12 21:54:28 +05:30
union {
struct {
u32 flush_to_file : 1 ;
u32 buffer_full_cnt : 4 ;
u32 reserved : 27 ;
} ;
u32 flags ;
} ;
u32 version ;
} __packed ;
2017-10-25 22:00:12 +02:00
struct guc_ctx_report {
u32 report_return_status ;
u32 reserved1 [ 64 ] ;
u32 affected_count ;
u32 reserved2 [ 2 ] ;
} __packed ;
/* GuC Shared Context Data Struct */
struct guc_shared_ctx_data {
u32 addr_of_last_preempted_data_low ;
u32 addr_of_last_preempted_data_high ;
u32 addr_of_last_preempted_data_high_tmp ;
u32 padding ;
u32 is_mapped_to_proxy ;
u32 proxy_ctx_id ;
u32 engine_reset_ctx_id ;
u32 media_reset_count ;
u32 reserved1 [ 8 ] ;
u32 uk_last_ctx_switch_reason ;
u32 was_reset ;
u32 lrca_gpu_addr ;
u64 execlist_ctx ;
u32 reserved2 [ 66 ] ;
struct guc_ctx_report preempt_ctx_report [ GUC_MAX_ENGINES_NUM ] ;
} __packed ;
2016-10-12 21:54:28 +05:30
/* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
2016-11-25 18:59:34 +01:00
enum intel_guc_recv_message {
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT ( 1 ) ,
2022-01-06 16:06:21 -08:00
INTEL_GUC_RECV_MSG_EXCEPTION = BIT ( 30 ) ,
2016-10-12 21:54:28 +05:30
} ;
2015-07-09 19:29:04 +01:00
# endif