Delete GuC code unused in future patches that rewrite the GuC interface to work with the new firmware. Most of the code deleted relates to workqueues or execlist port. The code is safe to remove because we still don't allow GuC submission to be enabled, even when overriding the modparam, so it currently can't be reached. The defines + structs for the process descriptor and workqueue remain. Although the new GuC interface does not require either of these for the normal submission path multi-lrc submission does. The usage of the process descriptor and workqueue for multi-lrc will be quite different from the code that is deleted in this patch. A future patch will implement multi-lrc submission. v2: add a code in the commit message about the code being safe to remove (Chris) Signed-off-by: Matthew Brost <matthew.brost@intel.com> Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: John Harrison <john.c.harrison@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20210113021236.8164-2-daniele.ceraolospurio@intel.com
194 lines
5.2 KiB
C
194 lines
5.2 KiB
C
/* SPDX-License-Identifier: MIT */
|
|
/*
|
|
* Copyright © 2014-2019 Intel Corporation
|
|
*/
|
|
|
|
#ifndef _INTEL_GUC_H_
|
|
#define _INTEL_GUC_H_
|
|
|
|
#include "intel_uncore.h"
|
|
#include "intel_guc_fw.h"
|
|
#include "intel_guc_fwif.h"
|
|
#include "intel_guc_ct.h"
|
|
#include "intel_guc_log.h"
|
|
#include "intel_guc_reg.h"
|
|
#include "intel_uc_fw.h"
|
|
#include "i915_utils.h"
|
|
#include "i915_vma.h"
|
|
|
|
struct __guc_ads_blob;
|
|
|
|
/*
|
|
* Top level structure of GuC. It handles firmware loading and manages client
|
|
* pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
|
|
* submission.
|
|
*/
|
|
struct intel_guc {
|
|
struct intel_uc_fw fw;
|
|
struct intel_guc_log log;
|
|
struct intel_guc_ct ct;
|
|
|
|
/* intel_guc_recv interrupt related state */
|
|
spinlock_t irq_lock;
|
|
unsigned int msg_enabled_mask;
|
|
|
|
struct {
|
|
bool enabled;
|
|
void (*reset)(struct intel_guc *guc);
|
|
void (*enable)(struct intel_guc *guc);
|
|
void (*disable)(struct intel_guc *guc);
|
|
} interrupts;
|
|
|
|
bool submission_selected;
|
|
|
|
struct i915_vma *ads_vma;
|
|
struct __guc_ads_blob *ads_blob;
|
|
|
|
struct i915_vma *stage_desc_pool;
|
|
void *stage_desc_pool_vaddr;
|
|
|
|
/* Control params for fw initialization */
|
|
u32 params[GUC_CTL_MAX_DWORDS];
|
|
|
|
/* GuC's FW specific registers used in MMIO send */
|
|
struct {
|
|
u32 base;
|
|
unsigned int count;
|
|
enum forcewake_domains fw_domains;
|
|
} send_regs;
|
|
|
|
/* register used to send interrupts to the GuC FW */
|
|
i915_reg_t notify_reg;
|
|
|
|
/* Store msg (e.g. log flush) that we see while CTBs are disabled */
|
|
u32 mmio_msg;
|
|
|
|
/* To serialize the intel_guc_send actions */
|
|
struct mutex send_mutex;
|
|
};
|
|
|
|
static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
|
|
{
|
|
return container_of(log, struct intel_guc, log);
|
|
}
|
|
|
|
static
|
|
inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
|
|
{
|
|
return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
|
|
}
|
|
|
|
static inline int
|
|
intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
|
|
u32 *response_buf, u32 response_buf_size)
|
|
{
|
|
return intel_guc_ct_send(&guc->ct, action, len,
|
|
response_buf, response_buf_size);
|
|
}
|
|
|
|
static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
|
|
{
|
|
intel_guc_ct_event_handler(&guc->ct);
|
|
}
|
|
|
|
/* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
|
|
#define GUC_GGTT_TOP 0xFEE00000
|
|
|
|
/**
|
|
* intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
|
|
* @guc: intel_guc structure.
|
|
* @vma: i915 graphics virtual memory area.
|
|
*
|
|
* GuC does not allow any gfx GGTT address that falls into range
|
|
* [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
|
|
* Currently, in order to exclude [0, ggtt.pin_bias) address space from
|
|
* GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
|
|
* and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
|
|
*
|
|
* Return: GGTT offset of the @vma.
|
|
*/
|
|
static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
|
|
struct i915_vma *vma)
|
|
{
|
|
u32 offset = i915_ggtt_offset(vma);
|
|
|
|
GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
|
|
GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
|
|
|
|
return offset;
|
|
}
|
|
|
|
void intel_guc_init_early(struct intel_guc *guc);
|
|
void intel_guc_init_send_regs(struct intel_guc *guc);
|
|
void intel_guc_write_params(struct intel_guc *guc);
|
|
int intel_guc_init(struct intel_guc *guc);
|
|
void intel_guc_fini(struct intel_guc *guc);
|
|
void intel_guc_notify(struct intel_guc *guc);
|
|
int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
|
|
u32 *response_buf, u32 response_buf_size);
|
|
int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
|
|
const u32 *payload, u32 len);
|
|
int intel_guc_sample_forcewake(struct intel_guc *guc);
|
|
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
|
|
int intel_guc_suspend(struct intel_guc *guc);
|
|
int intel_guc_resume(struct intel_guc *guc);
|
|
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
|
|
int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
|
|
struct i915_vma **out_vma, void **out_vaddr);
|
|
|
|
static inline bool intel_guc_is_supported(struct intel_guc *guc)
|
|
{
|
|
return intel_uc_fw_is_supported(&guc->fw);
|
|
}
|
|
|
|
static inline bool intel_guc_is_wanted(struct intel_guc *guc)
|
|
{
|
|
return intel_uc_fw_is_enabled(&guc->fw);
|
|
}
|
|
|
|
static inline bool intel_guc_is_used(struct intel_guc *guc)
|
|
{
|
|
GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
|
|
return intel_uc_fw_is_available(&guc->fw);
|
|
}
|
|
|
|
static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
|
|
{
|
|
return intel_uc_fw_is_running(&guc->fw);
|
|
}
|
|
|
|
static inline bool intel_guc_is_ready(struct intel_guc *guc)
|
|
{
|
|
return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
|
|
}
|
|
|
|
static inline int intel_guc_sanitize(struct intel_guc *guc)
|
|
{
|
|
intel_uc_fw_sanitize(&guc->fw);
|
|
intel_guc_ct_sanitize(&guc->ct);
|
|
guc->mmio_msg = 0;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
|
|
{
|
|
spin_lock_irq(&guc->irq_lock);
|
|
guc->msg_enabled_mask |= mask;
|
|
spin_unlock_irq(&guc->irq_lock);
|
|
}
|
|
|
|
static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
|
|
{
|
|
spin_lock_irq(&guc->irq_lock);
|
|
guc->msg_enabled_mask &= ~mask;
|
|
spin_unlock_irq(&guc->irq_lock);
|
|
}
|
|
|
|
int intel_guc_reset_engine(struct intel_guc *guc,
|
|
struct intel_engine_cs *engine);
|
|
|
|
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
|
|
|
|
#endif
|