2020-02-25 13:15:07 +02:00
// SPDX-License-Identifier: MIT
/*
* Copyright © 2020 Intel Corporation
*/
# include "i915_drv.h"
2022-01-27 15:43:34 -08:00
# include "i915_reg.h"
2020-02-25 13:15:07 +02:00
# include "intel_dram.h"
2022-02-14 22:13:42 -08:00
# include "intel_mchbar_regs.h"
2021-10-14 13:28:57 +03:00
# include "intel_pcode.h"
2020-02-25 13:15:07 +02:00
2020-02-27 16:53:59 +02:00
struct dram_dimm_info {
2020-09-29 16:13:12 +03:00
u16 size ;
u8 width , ranks ;
2020-02-27 16:53:59 +02:00
} ;
struct dram_channel_info {
struct dram_dimm_info dimm_l , dimm_s ;
u8 ranks ;
bool is_16gb_dimm ;
} ;
2020-02-25 13:15:07 +02:00
# define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
static const char * intel_dram_type_str ( enum intel_dram_type type )
{
static const char * const str [ ] = {
DRAM_TYPE_STR ( UNKNOWN ) ,
DRAM_TYPE_STR ( DDR3 ) ,
DRAM_TYPE_STR ( DDR4 ) ,
DRAM_TYPE_STR ( LPDDR3 ) ,
DRAM_TYPE_STR ( LPDDR4 ) ,
} ;
if ( type > = ARRAY_SIZE ( str ) )
type = INTEL_DRAM_UNKNOWN ;
return str [ type ] ;
}
# undef DRAM_TYPE_STR
static int intel_dimm_num_devices ( const struct dram_dimm_info * dimm )
{
return dimm - > ranks * 64 / ( dimm - > width ? : 1 ) ;
}
2020-09-29 16:13:12 +03:00
/* Returns total Gb for the whole DIMM */
2020-02-25 13:15:07 +02:00
static int skl_get_dimm_size ( u16 val )
{
2020-09-29 16:13:12 +03:00
return ( val & SKL_DRAM_SIZE_MASK ) * 8 ;
2020-02-25 13:15:07 +02:00
}
static int skl_get_dimm_width ( u16 val )
{
if ( skl_get_dimm_size ( val ) = = 0 )
return 0 ;
switch ( val & SKL_DRAM_WIDTH_MASK ) {
case SKL_DRAM_WIDTH_X8 :
case SKL_DRAM_WIDTH_X16 :
case SKL_DRAM_WIDTH_X32 :
val = ( val & SKL_DRAM_WIDTH_MASK ) > > SKL_DRAM_WIDTH_SHIFT ;
return 8 < < val ;
default :
MISSING_CASE ( val ) ;
return 0 ;
}
}
static int skl_get_dimm_ranks ( u16 val )
{
if ( skl_get_dimm_size ( val ) = = 0 )
return 0 ;
val = ( val & SKL_DRAM_RANK_MASK ) > > SKL_DRAM_RANK_SHIFT ;
return val + 1 ;
}
2020-09-29 16:13:12 +03:00
/* Returns total Gb for the whole DIMM */
2021-07-28 14:59:41 -07:00
static int icl_get_dimm_size ( u16 val )
2020-02-25 13:15:07 +02:00
{
2021-07-28 14:59:41 -07:00
return ( val & ICL_DRAM_SIZE_MASK ) * 8 / 2 ;
2020-02-25 13:15:07 +02:00
}
2021-07-28 14:59:41 -07:00
static int icl_get_dimm_width ( u16 val )
2020-02-25 13:15:07 +02:00
{
2021-07-28 14:59:41 -07:00
if ( icl_get_dimm_size ( val ) = = 0 )
2020-02-25 13:15:07 +02:00
return 0 ;
2021-07-28 14:59:41 -07:00
switch ( val & ICL_DRAM_WIDTH_MASK ) {
case ICL_DRAM_WIDTH_X8 :
case ICL_DRAM_WIDTH_X16 :
case ICL_DRAM_WIDTH_X32 :
val = ( val & ICL_DRAM_WIDTH_MASK ) > > ICL_DRAM_WIDTH_SHIFT ;
2020-02-25 13:15:07 +02:00
return 8 < < val ;
default :
MISSING_CASE ( val ) ;
return 0 ;
}
}
2021-07-28 14:59:41 -07:00
static int icl_get_dimm_ranks ( u16 val )
2020-02-25 13:15:07 +02:00
{
2021-07-28 14:59:41 -07:00
if ( icl_get_dimm_size ( val ) = = 0 )
2020-02-25 13:15:07 +02:00
return 0 ;
2021-07-28 14:59:41 -07:00
val = ( val & ICL_DRAM_RANK_MASK ) > > ICL_DRAM_RANK_SHIFT ;
2020-02-25 13:15:07 +02:00
return val + 1 ;
}
static bool
skl_is_16gb_dimm ( const struct dram_dimm_info * dimm )
{
2020-09-29 16:13:12 +03:00
/* Convert total Gb to Gb per DRAM device */
return dimm - > size / ( intel_dimm_num_devices ( dimm ) ? : 1 ) = = 16 ;
2020-02-25 13:15:07 +02:00
}
static void
skl_dram_get_dimm_info ( struct drm_i915_private * i915 ,
struct dram_dimm_info * dimm ,
int channel , char dimm_name , u16 val )
{
2021-07-28 14:59:41 -07:00
if ( GRAPHICS_VER ( i915 ) > = 11 ) {
dimm - > size = icl_get_dimm_size ( val ) ;
dimm - > width = icl_get_dimm_width ( val ) ;
dimm - > ranks = icl_get_dimm_ranks ( val ) ;
2020-02-25 13:15:07 +02:00
} else {
dimm - > size = skl_get_dimm_size ( val ) ;
dimm - > width = skl_get_dimm_width ( val ) ;
dimm - > ranks = skl_get_dimm_ranks ( val ) ;
}
drm_dbg_kms ( & i915 - > drm ,
2020-09-29 16:13:12 +03:00
" CH%u DIMM %c size: %u Gb, width: X%u, ranks: %u, 16Gb DIMMs: %s \n " ,
2020-02-25 13:15:07 +02:00
channel , dimm_name , dimm - > size , dimm - > width , dimm - > ranks ,
yesno ( skl_is_16gb_dimm ( dimm ) ) ) ;
}
static int
skl_dram_get_channel_info ( struct drm_i915_private * i915 ,
struct dram_channel_info * ch ,
int channel , u32 val )
{
skl_dram_get_dimm_info ( i915 , & ch - > dimm_l ,
channel , ' L ' , val & 0xffff ) ;
skl_dram_get_dimm_info ( i915 , & ch - > dimm_s ,
channel , ' S ' , val > > 16 ) ;
if ( ch - > dimm_l . size = = 0 & & ch - > dimm_s . size = = 0 ) {
drm_dbg_kms ( & i915 - > drm , " CH%u not populated \n " , channel ) ;
return - EINVAL ;
}
if ( ch - > dimm_l . ranks = = 2 | | ch - > dimm_s . ranks = = 2 )
ch - > ranks = 2 ;
else if ( ch - > dimm_l . ranks = = 1 & & ch - > dimm_s . ranks = = 1 )
ch - > ranks = 2 ;
else
ch - > ranks = 1 ;
ch - > is_16gb_dimm = skl_is_16gb_dimm ( & ch - > dimm_l ) | |
skl_is_16gb_dimm ( & ch - > dimm_s ) ;
drm_dbg_kms ( & i915 - > drm , " CH%u ranks: %u, 16Gb DIMMs: %s \n " ,
channel , ch - > ranks , yesno ( ch - > is_16gb_dimm ) ) ;
return 0 ;
}
static bool
intel_is_dram_symmetric ( const struct dram_channel_info * ch0 ,
const struct dram_channel_info * ch1 )
{
return ! memcmp ( ch0 , ch1 , sizeof ( * ch0 ) ) & &
( ch0 - > dimm_s . size = = 0 | |
! memcmp ( & ch0 - > dimm_l , & ch0 - > dimm_s , sizeof ( ch0 - > dimm_l ) ) ) ;
}
static int
2020-02-25 13:15:08 +02:00
skl_dram_get_channels_info ( struct drm_i915_private * i915 )
2020-02-25 13:15:07 +02:00
{
2020-02-25 13:15:08 +02:00
struct dram_info * dram_info = & i915 - > dram_info ;
2020-02-25 13:15:07 +02:00
struct dram_channel_info ch0 = { } , ch1 = { } ;
u32 val ;
int ret ;
2020-02-25 13:15:08 +02:00
val = intel_uncore_read ( & i915 - > uncore ,
SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN ) ;
ret = skl_dram_get_channel_info ( i915 , & ch0 , 0 , val ) ;
2020-02-25 13:15:07 +02:00
if ( ret = = 0 )
dram_info - > num_channels + + ;
2020-02-25 13:15:08 +02:00
val = intel_uncore_read ( & i915 - > uncore ,
SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN ) ;
ret = skl_dram_get_channel_info ( i915 , & ch1 , 1 , val ) ;
2020-02-25 13:15:07 +02:00
if ( ret = = 0 )
dram_info - > num_channels + + ;
if ( dram_info - > num_channels = = 0 ) {
2020-02-25 13:15:08 +02:00
drm_info ( & i915 - > drm , " Number of memory channels is zero \n " ) ;
2020-02-25 13:15:07 +02:00
return - EINVAL ;
}
2021-01-28 08:43:10 -08:00
if ( ch0 . ranks = = 0 & & ch1 . ranks = = 0 ) {
2020-02-25 13:15:08 +02:00
drm_info ( & i915 - > drm , " couldn't get memory rank information \n " ) ;
2020-02-25 13:15:07 +02:00
return - EINVAL ;
}
2021-01-28 08:43:12 -08:00
dram_info - > wm_lv_0_adjust_needed = ch0 . is_16gb_dimm | | ch1 . is_16gb_dimm ;
2020-02-25 13:15:07 +02:00
dram_info - > symmetric_memory = intel_is_dram_symmetric ( & ch0 , & ch1 ) ;
2020-02-25 13:15:08 +02:00
drm_dbg_kms ( & i915 - > drm , " Memory configuration is symmetric? %s \n " ,
2020-02-25 13:15:07 +02:00
yesno ( dram_info - > symmetric_memory ) ) ;
return 0 ;
}
static enum intel_dram_type
2020-02-25 13:15:08 +02:00
skl_get_dram_type ( struct drm_i915_private * i915 )
2020-02-25 13:15:07 +02:00
{
u32 val ;
2020-02-25 13:15:08 +02:00
val = intel_uncore_read ( & i915 - > uncore ,
SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN ) ;
2020-02-25 13:15:07 +02:00
switch ( val & SKL_DRAM_DDR_TYPE_MASK ) {
case SKL_DRAM_DDR_TYPE_DDR3 :
return INTEL_DRAM_DDR3 ;
case SKL_DRAM_DDR_TYPE_DDR4 :
return INTEL_DRAM_DDR4 ;
case SKL_DRAM_DDR_TYPE_LPDDR3 :
return INTEL_DRAM_LPDDR3 ;
case SKL_DRAM_DDR_TYPE_LPDDR4 :
return INTEL_DRAM_LPDDR4 ;
default :
MISSING_CASE ( val ) ;
return INTEL_DRAM_UNKNOWN ;
}
}
static int
2020-02-25 13:15:08 +02:00
skl_get_dram_info ( struct drm_i915_private * i915 )
2020-02-25 13:15:07 +02:00
{
2020-02-25 13:15:08 +02:00
struct dram_info * dram_info = & i915 - > dram_info ;
2020-02-25 13:15:07 +02:00
int ret ;
2020-02-25 13:15:08 +02:00
dram_info - > type = skl_get_dram_type ( i915 ) ;
drm_dbg_kms ( & i915 - > drm , " DRAM type: %s \n " ,
2020-02-25 13:15:07 +02:00
intel_dram_type_str ( dram_info - > type ) ) ;
2020-02-25 13:15:08 +02:00
ret = skl_dram_get_channels_info ( i915 ) ;
2020-02-25 13:15:07 +02:00
if ( ret )
return ret ;
return 0 ;
}
/* Returns Gb per DRAM device */
static int bxt_get_dimm_size ( u32 val )
{
switch ( val & BXT_DRAM_SIZE_MASK ) {
case BXT_DRAM_SIZE_4GBIT :
return 4 ;
case BXT_DRAM_SIZE_6GBIT :
return 6 ;
case BXT_DRAM_SIZE_8GBIT :
return 8 ;
case BXT_DRAM_SIZE_12GBIT :
return 12 ;
case BXT_DRAM_SIZE_16GBIT :
return 16 ;
default :
MISSING_CASE ( val ) ;
return 0 ;
}
}
static int bxt_get_dimm_width ( u32 val )
{
if ( ! bxt_get_dimm_size ( val ) )
return 0 ;
val = ( val & BXT_DRAM_WIDTH_MASK ) > > BXT_DRAM_WIDTH_SHIFT ;
return 8 < < val ;
}
static int bxt_get_dimm_ranks ( u32 val )
{
if ( ! bxt_get_dimm_size ( val ) )
return 0 ;
switch ( val & BXT_DRAM_RANK_MASK ) {
case BXT_DRAM_RANK_SINGLE :
return 1 ;
case BXT_DRAM_RANK_DUAL :
return 2 ;
default :
MISSING_CASE ( val ) ;
return 0 ;
}
}
static enum intel_dram_type bxt_get_dimm_type ( u32 val )
{
if ( ! bxt_get_dimm_size ( val ) )
return INTEL_DRAM_UNKNOWN ;
switch ( val & BXT_DRAM_TYPE_MASK ) {
case BXT_DRAM_TYPE_DDR3 :
return INTEL_DRAM_DDR3 ;
case BXT_DRAM_TYPE_LPDDR3 :
return INTEL_DRAM_LPDDR3 ;
case BXT_DRAM_TYPE_DDR4 :
return INTEL_DRAM_DDR4 ;
case BXT_DRAM_TYPE_LPDDR4 :
return INTEL_DRAM_LPDDR4 ;
default :
MISSING_CASE ( val ) ;
return INTEL_DRAM_UNKNOWN ;
}
}
static void bxt_get_dimm_info ( struct dram_dimm_info * dimm , u32 val )
{
dimm - > width = bxt_get_dimm_width ( val ) ;
dimm - > ranks = bxt_get_dimm_ranks ( val ) ;
/*
* Size in register is Gb per DRAM device . Convert to total
2020-09-29 16:13:12 +03:00
* Gb to match the way we report this for non - LP platforms .
2020-02-25 13:15:07 +02:00
*/
2020-09-29 16:13:12 +03:00
dimm - > size = bxt_get_dimm_size ( val ) * intel_dimm_num_devices ( dimm ) ;
2020-02-25 13:15:07 +02:00
}
2020-02-25 13:15:08 +02:00
static int bxt_get_dram_info ( struct drm_i915_private * i915 )
2020-02-25 13:15:07 +02:00
{
2020-02-25 13:15:08 +02:00
struct dram_info * dram_info = & i915 - > dram_info ;
2021-10-12 18:00:46 -07:00
u32 val ;
u8 valid_ranks = 0 ;
2020-02-25 13:15:07 +02:00
int i ;
/*
* Now read each DUNIT8 / 9 / 10 / 11 to check the rank of each dimms .
*/
for ( i = BXT_D_CR_DRP0_DUNIT_START ; i < = BXT_D_CR_DRP0_DUNIT_END ; i + + ) {
struct dram_dimm_info dimm ;
enum intel_dram_type type ;
2020-02-25 13:15:08 +02:00
val = intel_uncore_read ( & i915 - > uncore , BXT_D_CR_DRP0_DUNIT ( i ) ) ;
2020-02-25 13:15:07 +02:00
if ( val = = 0xFFFFFFFF )
continue ;
dram_info - > num_channels + + ;
bxt_get_dimm_info ( & dimm , val ) ;
type = bxt_get_dimm_type ( val ) ;
2020-02-25 13:15:08 +02:00
drm_WARN_ON ( & i915 - > drm , type ! = INTEL_DRAM_UNKNOWN & &
2020-02-25 13:15:07 +02:00
dram_info - > type ! = INTEL_DRAM_UNKNOWN & &
dram_info - > type ! = type ) ;
2020-02-25 13:15:08 +02:00
drm_dbg_kms ( & i915 - > drm ,
2020-09-29 16:13:12 +03:00
" CH%u DIMM size: %u Gb, width: X%u, ranks: %u, type: %s \n " ,
2020-02-25 13:15:07 +02:00
i - BXT_D_CR_DRP0_DUNIT_START ,
dimm . size , dimm . width , dimm . ranks ,
intel_dram_type_str ( type ) ) ;
2021-01-28 08:43:10 -08:00
if ( valid_ranks = = 0 )
valid_ranks = dimm . ranks ;
2020-02-25 13:15:07 +02:00
if ( type ! = INTEL_DRAM_UNKNOWN )
dram_info - > type = type ;
}
2021-01-28 08:43:10 -08:00
if ( dram_info - > type = = INTEL_DRAM_UNKNOWN | | valid_ranks = = 0 ) {
2020-02-25 13:15:08 +02:00
drm_info ( & i915 - > drm , " couldn't get memory information \n " ) ;
2020-02-25 13:15:07 +02:00
return - EINVAL ;
}
return 0 ;
}
2021-01-28 08:43:11 -08:00
static int icl_pcode_read_mem_global_info ( struct drm_i915_private * dev_priv )
{
struct dram_info * dram_info = & dev_priv - > dram_info ;
u32 val = 0 ;
int ret ;
2022-01-12 13:17:40 +02:00
ret = snb_pcode_read ( dev_priv , ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
ICL_PCODE_MEM_SS_READ_GLOBAL_INFO , & val , NULL ) ;
2021-01-28 08:43:11 -08:00
if ( ret )
return ret ;
2021-06-05 21:50:49 -07:00
if ( GRAPHICS_VER ( dev_priv ) = = 12 ) {
2021-01-28 08:43:11 -08:00
switch ( val & 0xf ) {
case 0 :
dram_info - > type = INTEL_DRAM_DDR4 ;
break ;
2021-02-04 12:04:58 -08:00
case 1 :
dram_info - > type = INTEL_DRAM_DDR5 ;
break ;
case 2 :
dram_info - > type = INTEL_DRAM_LPDDR5 ;
break ;
2021-01-28 08:43:11 -08:00
case 3 :
dram_info - > type = INTEL_DRAM_LPDDR4 ;
break ;
case 4 :
dram_info - > type = INTEL_DRAM_DDR3 ;
break ;
case 5 :
dram_info - > type = INTEL_DRAM_LPDDR3 ;
break ;
default :
MISSING_CASE ( val & 0xf ) ;
2021-09-30 14:24:36 +03:00
return - EINVAL ;
2021-01-28 08:43:11 -08:00
}
} else {
switch ( val & 0xf ) {
case 0 :
dram_info - > type = INTEL_DRAM_DDR4 ;
break ;
case 1 :
dram_info - > type = INTEL_DRAM_DDR3 ;
break ;
case 2 :
dram_info - > type = INTEL_DRAM_LPDDR3 ;
break ;
case 3 :
dram_info - > type = INTEL_DRAM_LPDDR4 ;
break ;
default :
MISSING_CASE ( val & 0xf ) ;
2021-09-30 14:24:36 +03:00
return - EINVAL ;
2021-01-28 08:43:11 -08:00
}
}
dram_info - > num_channels = ( val & 0xf0 ) > > 4 ;
dram_info - > num_qgv_points = ( val & 0xf00 ) > > 8 ;
drm/i915: Implement PSF GV point support
PSF GV points are an additional factor that can limit the
bandwidth available to display, separate from the traditional
QGV points. Whereas traditional QGV points represent possible
memory clock frequencies, PSF GV points reflect possible
frequencies of the memory fabric.
Switching between PSF GV points has the advantage of incurring
almost no memory access block time and thus does not need to be
accounted for in watermark calculations.
This patch adds support for those on top of regular QGV points.
Those are supposed to be used simultaneously, i.e we are always
at some QGV and some PSF GV point, based on the current video
mode requirements.
Bspec: 64631, 53998
v2: Seems that initial assumption made during ml conversation
was wrong, PCode rejects any masks containing points beyond
the ones returned, so even though BSpec says we have around
8 points theoretically, we can mask/unmask only those which
are returned, trying to manipulate those beyond causes a
failure from PCode. So switched back to generating mask
from 1 - num_qgv_points, where num_qgv_points is the actual
amount of points, advertised by PCode.
v3: - Extended restricted qgv point mask to 0xf, as we have now
3:2 bits for PSF GV points(Matt Roper)
- Replaced val2 with NULL from PCode request, since its not being
used(Matt Roper)
- Replaced %d to 0x%x for better readability(thanks for spotting)
Signed-off-by: Stanislav Lisovskiy <stanislav.lisovskiy@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210531064845.4389-2-stanislav.lisovskiy@intel.com
2021-05-31 09:48:45 +03:00
dram_info - > num_psf_gv_points = ( val & 0x3000 ) > > 12 ;
2021-01-28 08:43:11 -08:00
return 0 ;
}
static int gen11_get_dram_info ( struct drm_i915_private * i915 )
{
int ret = skl_get_dram_info ( i915 ) ;
if ( ret )
return ret ;
return icl_pcode_read_mem_global_info ( i915 ) ;
}
static int gen12_get_dram_info ( struct drm_i915_private * i915 )
{
2021-06-22 14:22:09 -07:00
i915 - > dram_info . wm_lv_0_adjust_needed = false ;
2021-01-28 08:43:11 -08:00
return icl_pcode_read_mem_global_info ( i915 ) ;
}
2020-02-25 13:15:07 +02:00
void intel_dram_detect ( struct drm_i915_private * i915 )
{
struct dram_info * dram_info = & i915 - > dram_info ;
int ret ;
2021-07-21 15:30:42 -07:00
if ( GRAPHICS_VER ( i915 ) < 9 | | IS_DG2 ( i915 ) | | ! HAS_DISPLAY ( i915 ) )
return ;
2020-02-25 13:15:07 +02:00
/*
2021-01-28 08:43:12 -08:00
* Assume level 0 watermark latency adjustment is needed until proven
* otherwise , this w / a is not needed by bxt / glk .
2020-02-25 13:15:07 +02:00
*/
2021-01-28 08:43:12 -08:00
dram_info - > wm_lv_0_adjust_needed = ! IS_GEN9_LP ( i915 ) ;
2020-02-25 13:15:07 +02:00
2021-06-05 21:50:49 -07:00
if ( GRAPHICS_VER ( i915 ) > = 12 )
2021-01-28 08:43:11 -08:00
ret = gen12_get_dram_info ( i915 ) ;
2021-06-05 21:50:49 -07:00
else if ( GRAPHICS_VER ( i915 ) > = 11 )
2021-01-28 08:43:11 -08:00
ret = gen11_get_dram_info ( i915 ) ;
else if ( IS_GEN9_LP ( i915 ) )
2020-02-25 13:15:07 +02:00
ret = bxt_get_dram_info ( i915 ) ;
else
ret = skl_get_dram_info ( i915 ) ;
if ( ret )
return ;
2021-01-28 08:43:10 -08:00
drm_dbg_kms ( & i915 - > drm , " DRAM channels: %u \n " , dram_info - > num_channels ) ;
2020-02-25 13:15:07 +02:00
2021-01-28 08:43:12 -08:00
drm_dbg_kms ( & i915 - > drm , " Watermark level 0 adjustment needed: %s \n " ,
yesno ( dram_info - > wm_lv_0_adjust_needed ) ) ;
2020-02-25 13:15:07 +02:00
}
static u32 gen9_edram_size_mb ( struct drm_i915_private * i915 , u32 cap )
{
static const u8 ways [ 8 ] = { 4 , 8 , 12 , 16 , 16 , 16 , 16 , 16 } ;
static const u8 sets [ 4 ] = { 1 , 1 , 2 , 2 } ;
return EDRAM_NUM_BANKS ( cap ) *
ways [ EDRAM_WAYS_IDX ( cap ) ] *
sets [ EDRAM_SETS_IDX ( cap ) ] ;
}
void intel_dram_edram_detect ( struct drm_i915_private * i915 )
{
u32 edram_cap = 0 ;
2021-06-05 21:50:49 -07:00
if ( ! ( IS_HASWELL ( i915 ) | | IS_BROADWELL ( i915 ) | | GRAPHICS_VER ( i915 ) > = 9 ) )
2020-02-25 13:15:07 +02:00
return ;
edram_cap = __raw_uncore_read32 ( & i915 - > uncore , HSW_EDRAM_CAP ) ;
/* NB: We can't write IDICR yet because we don't have gt funcs set up */
if ( ! ( edram_cap & EDRAM_ENABLED ) )
return ;
/*
* The needed capability bits for size calculation are not there with
* pre gen9 so return 128 MB always .
*/
2021-06-05 21:50:49 -07:00
if ( GRAPHICS_VER ( i915 ) < 9 )
2020-02-25 13:15:07 +02:00
i915 - > edram_size_mb = 128 ;
else
i915 - > edram_size_mb = gen9_edram_size_mb ( i915 , edram_cap ) ;
2020-04-02 14:48:15 +03:00
drm_info ( & i915 - > drm , " Found %uMB of eDRAM \n " , i915 - > edram_size_mb ) ;
2020-02-25 13:15:07 +02:00
}