diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index b3426f328d21..6ef80889fddb 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -71,6 +71,7 @@ xe-y += xe_bb.o \ xe_mmio.o \ xe_mocs.o \ xe_module.o \ + xe_pat.o \ xe_pci.o \ xe_pcode.o \ xe_pm.o \ diff --git a/drivers/gpu/drm/xe/regs/xe_gt_regs.h b/drivers/gpu/drm/xe/regs/xe_gt_regs.h index 73b0c0bdde5d..e33885f429b5 100644 --- a/drivers/gpu/drm/xe/regs/xe_gt_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gt_regs.h @@ -69,7 +69,6 @@ #define GEN12_VE1_AUX_INV _MMIO(0x42b8) #define AUX_INV REG_BIT(0) -#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4) #define XEHP_TILE0_ADDR_RANGE MCR_REG(0x4900) #define XEHP_FLAT_CCS_BASE_ADDR MCR_REG(0x4910) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 07464ba42746..245117e67e9b 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -29,6 +29,7 @@ #include "xe_map.h" #include "xe_migrate.h" #include "xe_mmio.h" +#include "xe_pat.h" #include "xe_mocs.h" #include "xe_reg_sr.h" #include "xe_ring_ops.h" @@ -92,83 +93,6 @@ int xe_gt_alloc(struct xe_device *xe, struct xe_gt *gt) return 0; } -/* FIXME: These should be in a common file */ -#define CHV_PPAT_SNOOP REG_BIT(6) -#define GEN8_PPAT_AGE(x) ((x)<<4) -#define GEN8_PPAT_LLCeLLC (3<<2) -#define GEN8_PPAT_LLCELLC (2<<2) -#define GEN8_PPAT_LLC (1<<2) -#define GEN8_PPAT_WB (3<<0) -#define GEN8_PPAT_WT (2<<0) -#define GEN8_PPAT_WC (1<<0) -#define GEN8_PPAT_UC (0<<0) -#define GEN8_PPAT_ELLC_OVERRIDE (0<<2) -#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) -#define GEN12_PPAT_CLOS(x) ((x)<<2) - -static void tgl_setup_private_ppat(struct xe_gt *gt) -{ - /* TGL doesn't support LLC or AGE settings */ - xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC); - xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT); - xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_UC); - xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, GEN8_PPAT_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg, GEN8_PPAT_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg, GEN8_PPAT_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg, GEN8_PPAT_WB); -} - -static void pvc_setup_private_ppat(struct xe_gt *gt) -{ - xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_UC); - xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC); - xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT); - xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, - GEN12_PPAT_CLOS(1) | GEN8_PPAT_WT); - xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg, - GEN12_PPAT_CLOS(1) | GEN8_PPAT_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg, - GEN12_PPAT_CLOS(2) | GEN8_PPAT_WT); - xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg, - GEN12_PPAT_CLOS(2) | GEN8_PPAT_WB); -} - -#define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2) -#define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0) -#define MTL_PPAT_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3) -#define MTL_PPAT_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1) -#define MTL_PPAT_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0) -#define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3) -#define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2) -#define MTL_0_COH_NON REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 0) - -static void mtl_setup_private_ppat(struct xe_gt *gt) -{ - xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, MTL_PPAT_0_WB); - xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, - MTL_PPAT_1_WT | MTL_2_COH_1W); - xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, - MTL_PPAT_3_UC | MTL_2_COH_1W); - xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, - MTL_PPAT_0_WB | MTL_2_COH_1W); - xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, - MTL_PPAT_0_WB | MTL_3_COH_2W); -} - -static void setup_private_ppat(struct xe_gt *gt) -{ - struct xe_device *xe = gt_to_xe(gt); - - if (xe->info.platform == XE_METEORLAKE) - mtl_setup_private_ppat(gt); - else if (xe->info.platform == XE_PVC) - pvc_setup_private_ppat(gt); - else - tgl_setup_private_ppat(gt); -} - static int gt_ttm_mgr_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); @@ -447,7 +371,7 @@ static int gt_fw_domain_init(struct xe_gt *gt) if (err) goto err_hw_fence_irq; - setup_private_ppat(gt); + xe_pat_init(gt); if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt, gt->mem.ggtt); @@ -633,7 +557,7 @@ static int do_gt_restart(struct xe_gt *gt) enum xe_hw_engine_id id; int err; - setup_private_ppat(gt); + xe_pat_init(gt); xe_gt_mcr_set_implicit_defaults(gt); xe_reg_sr_apply_mmio(>->reg_sr, gt); diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c new file mode 100644 index 000000000000..1cbb3d6ea949 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_pat.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "xe_pat.h" + +#include "regs/xe_reg_defs.h" +#include "xe_gt.h" +#include "xe_mmio.h" + +#define GEN12_PAT_INDEX(index) _MMIO(0x4800 + (index) * 4) + +#define GEN8_PPAT_WB (3<<0) +#define GEN8_PPAT_WT (2<<0) +#define GEN8_PPAT_WC (1<<0) +#define GEN8_PPAT_UC (0<<0) +#define GEN12_PPAT_CLOS(x) ((x)<<2) + +static void tgl_setup_private_ppat(struct xe_gt *gt) +{ + /* TGL doesn't support LLC or AGE settings */ + xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC); + xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT); + xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_UC); + xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, GEN8_PPAT_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg, GEN8_PPAT_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg, GEN8_PPAT_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg, GEN8_PPAT_WB); +} + +static void pvc_setup_private_ppat(struct xe_gt *gt) +{ + xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, GEN8_PPAT_UC); + xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, GEN8_PPAT_WC); + xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, GEN8_PPAT_WT); + xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, GEN8_PPAT_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, + GEN12_PPAT_CLOS(1) | GEN8_PPAT_WT); + xe_mmio_write32(gt, GEN12_PAT_INDEX(5).reg, + GEN12_PPAT_CLOS(1) | GEN8_PPAT_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(6).reg, + GEN12_PPAT_CLOS(2) | GEN8_PPAT_WT); + xe_mmio_write32(gt, GEN12_PAT_INDEX(7).reg, + GEN12_PPAT_CLOS(2) | GEN8_PPAT_WB); +} + +#define MTL_PPAT_L4_CACHE_POLICY_MASK REG_GENMASK(3, 2) +#define MTL_PAT_INDEX_COH_MODE_MASK REG_GENMASK(1, 0) +#define MTL_PPAT_3_UC REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 3) +#define MTL_PPAT_1_WT REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 1) +#define MTL_PPAT_0_WB REG_FIELD_PREP(MTL_PPAT_L4_CACHE_POLICY_MASK, 0) +#define MTL_3_COH_2W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 3) +#define MTL_2_COH_1W REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 2) +#define MTL_0_COH_NON REG_FIELD_PREP(MTL_PAT_INDEX_COH_MODE_MASK, 0) + +static void mtl_setup_private_ppat(struct xe_gt *gt) +{ + xe_mmio_write32(gt, GEN12_PAT_INDEX(0).reg, MTL_PPAT_0_WB); + xe_mmio_write32(gt, GEN12_PAT_INDEX(1).reg, + MTL_PPAT_1_WT | MTL_2_COH_1W); + xe_mmio_write32(gt, GEN12_PAT_INDEX(2).reg, + MTL_PPAT_3_UC | MTL_2_COH_1W); + xe_mmio_write32(gt, GEN12_PAT_INDEX(3).reg, + MTL_PPAT_0_WB | MTL_2_COH_1W); + xe_mmio_write32(gt, GEN12_PAT_INDEX(4).reg, + MTL_PPAT_0_WB | MTL_3_COH_2W); +} + +void xe_pat_init(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + if (xe->info.platform == XE_METEORLAKE) + mtl_setup_private_ppat(gt); + else if (xe->info.platform == XE_PVC) + pvc_setup_private_ppat(gt); + else + tgl_setup_private_ppat(gt); +} diff --git a/drivers/gpu/drm/xe/xe_pat.h b/drivers/gpu/drm/xe/xe_pat.h new file mode 100644 index 000000000000..659de4008131 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_pat.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_PAT_H_ +#define _XE_PAT_H_ + +struct xe_gt; + +void xe_pat_init(struct xe_gt *gt); + +#endif