2019-08-12 00:00:40 +03:00
// SPDX-License-Identifier: GPL-2.0+
/*
* Tegra30 External Memory Controller driver
*
* Based on downstream driver from NVIDIA and tegra124 - emc . c
* Copyright ( C ) 2011 - 2014 NVIDIA Corporation
*
* Author : Dmitry Osipenko < digetx @ gmail . com >
* Copyright ( C ) 2019 GRATE - DRIVER project
*/
2021-12-22 07:32:14 +03:00
# include <linux/bitfield.h>
2019-08-12 00:00:40 +03:00
# include <linux/clk.h>
# include <linux/clk/tegra.h>
2019-12-22 12:40:33 +01:00
# include <linux/debugfs.h>
2019-08-12 00:00:40 +03:00
# include <linux/delay.h>
# include <linux/err.h>
2020-12-03 22:24:32 +03:00
# include <linux/interconnect-provider.h>
2019-08-12 00:00:40 +03:00
# include <linux/interrupt.h>
# include <linux/io.h>
# include <linux/iopoll.h>
# include <linux/kernel.h>
# include <linux/module.h>
2020-12-03 22:24:32 +03:00
# include <linux/mutex.h>
2023-07-14 11:47:16 -06:00
# include <linux/of.h>
2019-08-12 00:00:40 +03:00
# include <linux/platform_device.h>
2020-12-03 22:24:32 +03:00
# include <linux/pm_opp.h>
# include <linux/slab.h>
2019-08-12 00:00:40 +03:00
# include <linux/sort.h>
# include <linux/types.h>
2020-12-03 22:24:32 +03:00
# include <soc/tegra/common.h>
2019-08-12 00:00:40 +03:00
# include <soc/tegra/fuse.h>
2021-12-22 07:32:14 +03:00
# include "../jedec_ddr.h"
# include "../of_memory.h"
2019-08-12 00:00:40 +03:00
# include "mc.h"
# define EMC_INTSTATUS 0x000
# define EMC_INTMASK 0x004
# define EMC_DBG 0x008
2021-12-22 07:32:14 +03:00
# define EMC_ADR_CFG 0x010
2019-08-12 00:00:40 +03:00
# define EMC_CFG 0x00c
# define EMC_REFCTRL 0x020
# define EMC_TIMING_CONTROL 0x028
# define EMC_RC 0x02c
# define EMC_RFC 0x030
# define EMC_RAS 0x034
# define EMC_RP 0x038
# define EMC_R2W 0x03c
# define EMC_W2R 0x040
# define EMC_R2P 0x044
# define EMC_W2P 0x048
# define EMC_RD_RCD 0x04c
# define EMC_WR_RCD 0x050
# define EMC_RRD 0x054
# define EMC_REXT 0x058
# define EMC_WDV 0x05c
# define EMC_QUSE 0x060
# define EMC_QRST 0x064
# define EMC_QSAFE 0x068
# define EMC_RDV 0x06c
# define EMC_REFRESH 0x070
# define EMC_BURST_REFRESH_NUM 0x074
# define EMC_PDEX2WR 0x078
# define EMC_PDEX2RD 0x07c
# define EMC_PCHG2PDEN 0x080
# define EMC_ACT2PDEN 0x084
# define EMC_AR2PDEN 0x088
# define EMC_RW2PDEN 0x08c
# define EMC_TXSR 0x090
# define EMC_TCKE 0x094
# define EMC_TFAW 0x098
# define EMC_TRPAB 0x09c
# define EMC_TCLKSTABLE 0x0a0
# define EMC_TCLKSTOP 0x0a4
# define EMC_TREFBW 0x0a8
# define EMC_QUSE_EXTRA 0x0ac
# define EMC_ODT_WRITE 0x0b0
# define EMC_ODT_READ 0x0b4
# define EMC_WEXT 0x0b8
# define EMC_CTT 0x0bc
# define EMC_MRS_WAIT_CNT 0x0c8
# define EMC_MRS 0x0cc
# define EMC_EMRS 0x0d0
# define EMC_SELF_REF 0x0e0
# define EMC_MRW 0x0e8
2021-12-22 07:32:14 +03:00
# define EMC_MRR 0x0ec
2019-08-12 00:00:40 +03:00
# define EMC_XM2DQSPADCTRL3 0x0f8
# define EMC_FBIO_SPARE 0x100
# define EMC_FBIO_CFG5 0x104
# define EMC_FBIO_CFG6 0x114
# define EMC_CFG_RSV 0x120
# define EMC_AUTO_CAL_CONFIG 0x2a4
# define EMC_AUTO_CAL_INTERVAL 0x2a8
# define EMC_AUTO_CAL_STATUS 0x2ac
# define EMC_STATUS 0x2b4
# define EMC_CFG_2 0x2b8
# define EMC_CFG_DIG_DLL 0x2bc
# define EMC_CFG_DIG_DLL_PERIOD 0x2c0
# define EMC_CTT_DURATION 0x2d8
# define EMC_CTT_TERM_CTRL 0x2dc
# define EMC_ZCAL_INTERVAL 0x2e0
# define EMC_ZCAL_WAIT_CNT 0x2e4
# define EMC_ZQ_CAL 0x2ec
# define EMC_XM2CMDPADCTRL 0x2f0
# define EMC_XM2DQSPADCTRL2 0x2fc
# define EMC_XM2DQPADCTRL2 0x304
# define EMC_XM2CLKPADCTRL 0x308
# define EMC_XM2COMPPADCTRL 0x30c
# define EMC_XM2VTTGENPADCTRL 0x310
# define EMC_XM2VTTGENPADCTRL2 0x314
# define EMC_XM2QUSEPADCTRL 0x318
# define EMC_DLL_XFORM_DQS0 0x328
# define EMC_DLL_XFORM_DQS1 0x32c
# define EMC_DLL_XFORM_DQS2 0x330
# define EMC_DLL_XFORM_DQS3 0x334
# define EMC_DLL_XFORM_DQS4 0x338
# define EMC_DLL_XFORM_DQS5 0x33c
# define EMC_DLL_XFORM_DQS6 0x340
# define EMC_DLL_XFORM_DQS7 0x344
# define EMC_DLL_XFORM_QUSE0 0x348
# define EMC_DLL_XFORM_QUSE1 0x34c
# define EMC_DLL_XFORM_QUSE2 0x350
# define EMC_DLL_XFORM_QUSE3 0x354
# define EMC_DLL_XFORM_QUSE4 0x358
# define EMC_DLL_XFORM_QUSE5 0x35c
# define EMC_DLL_XFORM_QUSE6 0x360
# define EMC_DLL_XFORM_QUSE7 0x364
# define EMC_DLL_XFORM_DQ0 0x368
# define EMC_DLL_XFORM_DQ1 0x36c
# define EMC_DLL_XFORM_DQ2 0x370
# define EMC_DLL_XFORM_DQ3 0x374
# define EMC_DLI_TRIM_TXDQS0 0x3a8
# define EMC_DLI_TRIM_TXDQS1 0x3ac
# define EMC_DLI_TRIM_TXDQS2 0x3b0
# define EMC_DLI_TRIM_TXDQS3 0x3b4
# define EMC_DLI_TRIM_TXDQS4 0x3b8
# define EMC_DLI_TRIM_TXDQS5 0x3bc
# define EMC_DLI_TRIM_TXDQS6 0x3c0
# define EMC_DLI_TRIM_TXDQS7 0x3c4
# define EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE 0x3c8
# define EMC_STALL_THEN_EXE_AFTER_CLKCHANGE 0x3cc
# define EMC_UNSTALL_RW_AFTER_CLKCHANGE 0x3d0
# define EMC_SEL_DPD_CTRL 0x3d8
# define EMC_PRE_REFRESH_REQ_CNT 0x3dc
# define EMC_DYN_SELF_REF_CONTROL 0x3e0
# define EMC_TXSRDLL 0x3e4
# define EMC_STATUS_TIMING_UPDATE_STALLED BIT(23)
# define EMC_MODE_SET_DLL_RESET BIT(8)
# define EMC_MODE_SET_LONG_CNT BIT(26)
# define EMC_SELF_REF_CMD_ENABLED BIT(0)
# define DRAM_DEV_SEL_ALL (0 << 30)
2021-06-01 05:31:12 +03:00
# define DRAM_DEV_SEL_0 BIT(31)
# define DRAM_DEV_SEL_1 BIT(30)
2019-08-12 00:00:40 +03:00
# define DRAM_BROADCAST(num) \
( ( num ) > 1 ? DRAM_DEV_SEL_ALL : DRAM_DEV_SEL_0 )
# define EMC_ZQ_CAL_CMD BIT(0)
# define EMC_ZQ_CAL_LONG BIT(4)
# define EMC_ZQ_CAL_LONG_CMD_DEV0 \
( DRAM_DEV_SEL_0 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD )
# define EMC_ZQ_CAL_LONG_CMD_DEV1 \
( DRAM_DEV_SEL_1 | EMC_ZQ_CAL_LONG | EMC_ZQ_CAL_CMD )
# define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
# define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
# define EMC_DBG_FORCE_UPDATE BIT(2)
# define EMC_DBG_CFG_PRIORITY BIT(24)
# define EMC_CFG5_QUSE_MODE_SHIFT 13
# define EMC_CFG5_QUSE_MODE_MASK (7 << EMC_CFG5_QUSE_MODE_SHIFT)
# define EMC_CFG5_QUSE_MODE_INTERNAL_LPBK 2
# define EMC_CFG5_QUSE_MODE_PULSE_INTERN 3
# define EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE BIT(9)
# define EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE BIT(10)
# define EMC_XM2QUSEPADCTRL_IVREF_ENABLE BIT(4)
# define EMC_XM2DQSPADCTRL2_VREF_ENABLE BIT(5)
# define EMC_XM2DQSPADCTRL3_VREF_ENABLE BIT(5)
# define EMC_AUTO_CAL_STATUS_ACTIVE BIT(31)
# define EMC_FBIO_CFG5_DRAM_TYPE_MASK 0x3
# define EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK 0x3ff
# define EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT 16
# define EMC_MRS_WAIT_CNT_LONG_WAIT_MASK \
( 0x3ff < < EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT )
# define EMC_REFCTRL_DEV_SEL_MASK 0x3
# define EMC_REFCTRL_ENABLE BIT(31)
# define EMC_REFCTRL_ENABLE_ALL(num) \
( ( ( num ) > 1 ? 0 : 2 ) | EMC_REFCTRL_ENABLE )
# define EMC_REFCTRL_DISABLE_ALL(num) ((num) > 1 ? 0 : 2)
# define EMC_CFG_PERIODIC_QRST BIT(21)
# define EMC_CFG_DYN_SREF_ENABLE BIT(28)
# define EMC_CLKCHANGE_REQ_ENABLE BIT(0)
# define EMC_CLKCHANGE_PD_ENABLE BIT(1)
# define EMC_CLKCHANGE_SR_ENABLE BIT(2)
# define EMC_TIMING_UPDATE BIT(0)
# define EMC_REFRESH_OVERFLOW_INT BIT(3)
# define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
2021-12-22 07:32:14 +03:00
# define EMC_MRR_DIVLD_INT BIT(5)
# define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
# define EMC_MRR_MRR_MA GENMASK(23, 16)
# define EMC_MRR_MRR_DATA GENMASK(15, 0)
# define EMC_ADR_CFG_EMEM_NUMDEV BIT(0)
2019-08-12 00:00:40 +03:00
enum emc_dram_type {
DRAM_TYPE_DDR3 ,
DRAM_TYPE_DDR1 ,
DRAM_TYPE_LPDDR2 ,
DRAM_TYPE_DDR2 ,
} ;
enum emc_dll_change {
DLL_CHANGE_NONE ,
DLL_CHANGE_ON ,
DLL_CHANGE_OFF
} ;
static const u16 emc_timing_registers [ ] = {
[ 0 ] = EMC_RC ,
[ 1 ] = EMC_RFC ,
[ 2 ] = EMC_RAS ,
[ 3 ] = EMC_RP ,
[ 4 ] = EMC_R2W ,
[ 5 ] = EMC_W2R ,
[ 6 ] = EMC_R2P ,
[ 7 ] = EMC_W2P ,
[ 8 ] = EMC_RD_RCD ,
[ 9 ] = EMC_WR_RCD ,
[ 10 ] = EMC_RRD ,
[ 11 ] = EMC_REXT ,
[ 12 ] = EMC_WEXT ,
[ 13 ] = EMC_WDV ,
[ 14 ] = EMC_QUSE ,
[ 15 ] = EMC_QRST ,
[ 16 ] = EMC_QSAFE ,
[ 17 ] = EMC_RDV ,
[ 18 ] = EMC_REFRESH ,
[ 19 ] = EMC_BURST_REFRESH_NUM ,
[ 20 ] = EMC_PRE_REFRESH_REQ_CNT ,
[ 21 ] = EMC_PDEX2WR ,
[ 22 ] = EMC_PDEX2RD ,
[ 23 ] = EMC_PCHG2PDEN ,
[ 24 ] = EMC_ACT2PDEN ,
[ 25 ] = EMC_AR2PDEN ,
[ 26 ] = EMC_RW2PDEN ,
[ 27 ] = EMC_TXSR ,
[ 28 ] = EMC_TXSRDLL ,
[ 29 ] = EMC_TCKE ,
[ 30 ] = EMC_TFAW ,
[ 31 ] = EMC_TRPAB ,
[ 32 ] = EMC_TCLKSTABLE ,
[ 33 ] = EMC_TCLKSTOP ,
[ 34 ] = EMC_TREFBW ,
[ 35 ] = EMC_QUSE_EXTRA ,
[ 36 ] = EMC_FBIO_CFG6 ,
[ 37 ] = EMC_ODT_WRITE ,
[ 38 ] = EMC_ODT_READ ,
[ 39 ] = EMC_FBIO_CFG5 ,
[ 40 ] = EMC_CFG_DIG_DLL ,
[ 41 ] = EMC_CFG_DIG_DLL_PERIOD ,
[ 42 ] = EMC_DLL_XFORM_DQS0 ,
[ 43 ] = EMC_DLL_XFORM_DQS1 ,
[ 44 ] = EMC_DLL_XFORM_DQS2 ,
[ 45 ] = EMC_DLL_XFORM_DQS3 ,
[ 46 ] = EMC_DLL_XFORM_DQS4 ,
[ 47 ] = EMC_DLL_XFORM_DQS5 ,
[ 48 ] = EMC_DLL_XFORM_DQS6 ,
[ 49 ] = EMC_DLL_XFORM_DQS7 ,
[ 50 ] = EMC_DLL_XFORM_QUSE0 ,
[ 51 ] = EMC_DLL_XFORM_QUSE1 ,
[ 52 ] = EMC_DLL_XFORM_QUSE2 ,
[ 53 ] = EMC_DLL_XFORM_QUSE3 ,
[ 54 ] = EMC_DLL_XFORM_QUSE4 ,
[ 55 ] = EMC_DLL_XFORM_QUSE5 ,
[ 56 ] = EMC_DLL_XFORM_QUSE6 ,
[ 57 ] = EMC_DLL_XFORM_QUSE7 ,
[ 58 ] = EMC_DLI_TRIM_TXDQS0 ,
[ 59 ] = EMC_DLI_TRIM_TXDQS1 ,
[ 60 ] = EMC_DLI_TRIM_TXDQS2 ,
[ 61 ] = EMC_DLI_TRIM_TXDQS3 ,
[ 62 ] = EMC_DLI_TRIM_TXDQS4 ,
[ 63 ] = EMC_DLI_TRIM_TXDQS5 ,
[ 64 ] = EMC_DLI_TRIM_TXDQS6 ,
[ 65 ] = EMC_DLI_TRIM_TXDQS7 ,
[ 66 ] = EMC_DLL_XFORM_DQ0 ,
[ 67 ] = EMC_DLL_XFORM_DQ1 ,
[ 68 ] = EMC_DLL_XFORM_DQ2 ,
[ 69 ] = EMC_DLL_XFORM_DQ3 ,
[ 70 ] = EMC_XM2CMDPADCTRL ,
[ 71 ] = EMC_XM2DQSPADCTRL2 ,
[ 72 ] = EMC_XM2DQPADCTRL2 ,
[ 73 ] = EMC_XM2CLKPADCTRL ,
[ 74 ] = EMC_XM2COMPPADCTRL ,
[ 75 ] = EMC_XM2VTTGENPADCTRL ,
[ 76 ] = EMC_XM2VTTGENPADCTRL2 ,
[ 77 ] = EMC_XM2QUSEPADCTRL ,
[ 78 ] = EMC_XM2DQSPADCTRL3 ,
[ 79 ] = EMC_CTT_TERM_CTRL ,
[ 80 ] = EMC_ZCAL_INTERVAL ,
[ 81 ] = EMC_ZCAL_WAIT_CNT ,
[ 82 ] = EMC_MRS_WAIT_CNT ,
[ 83 ] = EMC_AUTO_CAL_CONFIG ,
[ 84 ] = EMC_CTT ,
[ 85 ] = EMC_CTT_DURATION ,
[ 86 ] = EMC_DYN_SELF_REF_CONTROL ,
[ 87 ] = EMC_FBIO_SPARE ,
[ 88 ] = EMC_CFG_RSV ,
} ;
struct emc_timing {
unsigned long rate ;
u32 data [ ARRAY_SIZE ( emc_timing_registers ) ] ;
u32 emc_auto_cal_interval ;
u32 emc_mode_1 ;
u32 emc_mode_2 ;
u32 emc_mode_reset ;
u32 emc_zcal_cnt_long ;
bool emc_cfg_periodic_qrst ;
bool emc_cfg_dyn_self_ref ;
} ;
2020-12-03 22:24:32 +03:00
enum emc_rate_request_type {
EMC_RATE_DEBUG ,
EMC_RATE_ICC ,
EMC_RATE_TYPE_MAX ,
} ;
struct emc_rate_request {
unsigned long min_rate ;
unsigned long max_rate ;
} ;
2019-08-12 00:00:40 +03:00
struct tegra_emc {
struct device * dev ;
struct tegra_mc * mc ;
2020-12-03 22:24:32 +03:00
struct icc_provider provider ;
2019-08-12 00:00:40 +03:00
struct notifier_block clk_nb ;
struct clk * clk ;
void __iomem * regs ;
unsigned int irq ;
2019-12-20 05:08:48 +03:00
bool bad_state ;
2019-08-12 00:00:40 +03:00
2019-12-20 05:08:48 +03:00
struct emc_timing * new_timing ;
2019-08-12 00:00:40 +03:00
struct emc_timing * timings ;
unsigned int num_timings ;
u32 mc_override ;
u32 emc_cfg ;
u32 emc_mode_1 ;
u32 emc_mode_2 ;
u32 emc_mode_reset ;
bool vref_cal_toggle : 1 ;
bool zcal_long : 1 ;
bool dll_on : 1 ;
2019-12-22 12:40:33 +01:00
struct {
struct dentry * root ;
unsigned long min_rate ;
unsigned long max_rate ;
} debugfs ;
2020-12-03 22:24:32 +03:00
/*
* There are multiple sources in the EMC driver which could request
* a min / max clock rate , these rates are contained in this array .
*/
struct emc_rate_request requested_rate [ EMC_RATE_TYPE_MAX ] ;
/* protect shared rate-change code path */
struct mutex rate_lock ;
2021-12-22 07:32:14 +03:00
bool mrr_error ;
2019-08-12 00:00:40 +03:00
} ;
2019-12-20 05:08:48 +03:00
static int emc_seq_update_timing ( struct tegra_emc * emc )
{
u32 val ;
int err ;
writel_relaxed ( EMC_TIMING_UPDATE , emc - > regs + EMC_TIMING_CONTROL ) ;
err = readl_relaxed_poll_timeout_atomic ( emc - > regs + EMC_STATUS , val ,
! ( val & EMC_STATUS_TIMING_UPDATE_STALLED ) ,
1 , 200 ) ;
if ( err ) {
dev_err ( emc - > dev , " failed to update timing: %d \n " , err ) ;
return err ;
}
return 0 ;
}
2019-08-12 00:00:40 +03:00
static irqreturn_t tegra_emc_isr ( int irq , void * data )
{
struct tegra_emc * emc = data ;
2020-03-19 22:36:48 +03:00
u32 intmask = EMC_REFRESH_OVERFLOW_INT ;
2019-08-12 00:00:40 +03:00
u32 status ;
status = readl_relaxed ( emc - > regs + EMC_INTSTATUS ) & intmask ;
if ( ! status )
return IRQ_NONE ;
/* notify about HW problem */
if ( status & EMC_REFRESH_OVERFLOW_INT )
dev_err_ratelimited ( emc - > dev ,
" refresh request overflow timeout \n " ) ;
/* clear interrupts */
writel_relaxed ( status , emc - > regs + EMC_INTSTATUS ) ;
return IRQ_HANDLED ;
}
static struct emc_timing * emc_find_timing ( struct tegra_emc * emc ,
unsigned long rate )
{
struct emc_timing * timing = NULL ;
unsigned int i ;
for ( i = 0 ; i < emc - > num_timings ; i + + ) {
if ( emc - > timings [ i ] . rate > = rate ) {
timing = & emc - > timings [ i ] ;
break ;
}
}
if ( ! timing ) {
dev_err ( emc - > dev , " no timing for rate %lu \n " , rate ) ;
return NULL ;
}
return timing ;
}
static bool emc_dqs_preset ( struct tegra_emc * emc , struct emc_timing * timing ,
bool * schmitt_to_vref )
{
bool preset = false ;
u32 val ;
if ( timing - > data [ 71 ] & EMC_XM2DQSPADCTRL2_VREF_ENABLE ) {
val = readl_relaxed ( emc - > regs + EMC_XM2DQSPADCTRL2 ) ;
if ( ! ( val & EMC_XM2DQSPADCTRL2_VREF_ENABLE ) ) {
val | = EMC_XM2DQSPADCTRL2_VREF_ENABLE ;
writel_relaxed ( val , emc - > regs + EMC_XM2DQSPADCTRL2 ) ;
preset = true ;
}
}
if ( timing - > data [ 78 ] & EMC_XM2DQSPADCTRL3_VREF_ENABLE ) {
val = readl_relaxed ( emc - > regs + EMC_XM2DQSPADCTRL3 ) ;
if ( ! ( val & EMC_XM2DQSPADCTRL3_VREF_ENABLE ) ) {
val | = EMC_XM2DQSPADCTRL3_VREF_ENABLE ;
writel_relaxed ( val , emc - > regs + EMC_XM2DQSPADCTRL3 ) ;
preset = true ;
}
}
if ( timing - > data [ 77 ] & EMC_XM2QUSEPADCTRL_IVREF_ENABLE ) {
val = readl_relaxed ( emc - > regs + EMC_XM2QUSEPADCTRL ) ;
if ( ! ( val & EMC_XM2QUSEPADCTRL_IVREF_ENABLE ) ) {
val | = EMC_XM2QUSEPADCTRL_IVREF_ENABLE ;
writel_relaxed ( val , emc - > regs + EMC_XM2QUSEPADCTRL ) ;
* schmitt_to_vref = true ;
preset = true ;
}
}
return preset ;
}
static int emc_prepare_mc_clk_cfg ( struct tegra_emc * emc , unsigned long rate )
{
struct tegra_mc * mc = emc - > mc ;
unsigned int misc0_index = 16 ;
unsigned int i ;
bool same ;
for ( i = 0 ; i < mc - > num_timings ; i + + ) {
if ( mc - > timings [ i ] . rate ! = rate )
continue ;
if ( mc - > timings [ i ] . emem_data [ misc0_index ] & BIT ( 27 ) )
same = true ;
else
same = false ;
return tegra20_clk_prepare_emc_mc_same_freq ( emc - > clk , same ) ;
}
return - EINVAL ;
}
static int emc_prepare_timing_change ( struct tegra_emc * emc , unsigned long rate )
{
struct emc_timing * timing = emc_find_timing ( emc , rate ) ;
enum emc_dll_change dll_change ;
enum emc_dram_type dram_type ;
bool schmitt_to_vref = false ;
unsigned int pre_wait = 0 ;
bool qrst_used = false ;
unsigned int dram_num ;
unsigned int i ;
u32 fbio_cfg5 ;
u32 emc_dbg ;
u32 val ;
int err ;
if ( ! timing | | emc - > bad_state )
return - EINVAL ;
dev_dbg ( emc - > dev , " %s: using timing rate %lu for requested rate %lu \n " ,
__func__ , timing - > rate , rate ) ;
emc - > bad_state = true ;
err = emc_prepare_mc_clk_cfg ( emc , rate ) ;
if ( err ) {
dev_err ( emc - > dev , " mc clock preparation failed: %d \n " , err ) ;
return err ;
}
emc - > vref_cal_toggle = false ;
emc - > mc_override = mc_readl ( emc - > mc , MC_EMEM_ARB_OVERRIDE ) ;
emc - > emc_cfg = readl_relaxed ( emc - > regs + EMC_CFG ) ;
emc_dbg = readl_relaxed ( emc - > regs + EMC_DBG ) ;
if ( emc - > dll_on = = ! ! ( timing - > emc_mode_1 & 0x1 ) )
dll_change = DLL_CHANGE_NONE ;
else if ( timing - > emc_mode_1 & 0x1 )
dll_change = DLL_CHANGE_ON ;
else
dll_change = DLL_CHANGE_OFF ;
emc - > dll_on = ! ! ( timing - > emc_mode_1 & 0x1 ) ;
if ( timing - > data [ 80 ] & & ! readl_relaxed ( emc - > regs + EMC_ZCAL_INTERVAL ) )
emc - > zcal_long = true ;
else
emc - > zcal_long = false ;
fbio_cfg5 = readl_relaxed ( emc - > regs + EMC_FBIO_CFG5 ) ;
dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK ;
dram_num = tegra_mc_get_emem_device_count ( emc - > mc ) ;
/* disable dynamic self-refresh */
if ( emc - > emc_cfg & EMC_CFG_DYN_SREF_ENABLE ) {
emc - > emc_cfg & = ~ EMC_CFG_DYN_SREF_ENABLE ;
writel_relaxed ( emc - > emc_cfg , emc - > regs + EMC_CFG ) ;
pre_wait = 5 ;
}
/* update MC arbiter settings */
val = mc_readl ( emc - > mc , MC_EMEM_ARB_OUTSTANDING_REQ ) ;
if ( ! ( val & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE ) | |
( ( val & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK ) > 0x50 ) ) {
val = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | 0x50 ;
mc_writel ( emc - > mc , val , MC_EMEM_ARB_OUTSTANDING_REQ ) ;
mc_writel ( emc - > mc , MC_TIMING_UPDATE , MC_TIMING_CONTROL ) ;
}
if ( emc - > mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK )
mc_writel ( emc - > mc ,
emc - > mc_override & ~ MC_EMEM_ARB_OVERRIDE_EACK_MASK ,
MC_EMEM_ARB_OVERRIDE ) ;
/* check DQ/DQS VREF delay */
if ( emc_dqs_preset ( emc , timing , & schmitt_to_vref ) ) {
if ( pre_wait < 3 )
pre_wait = 3 ;
}
if ( pre_wait ) {
err = emc_seq_update_timing ( emc ) ;
if ( err )
return err ;
udelay ( pre_wait ) ;
}
/* disable auto-calibration if VREF mode is switching */
if ( timing - > emc_auto_cal_interval ) {
val = readl_relaxed ( emc - > regs + EMC_XM2COMPPADCTRL ) ;
val ^ = timing - > data [ 74 ] ;
if ( val & EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE ) {
writel_relaxed ( 0 , emc - > regs + EMC_AUTO_CAL_INTERVAL ) ;
err = readl_relaxed_poll_timeout_atomic (
emc - > regs + EMC_AUTO_CAL_STATUS , val ,
! ( val & EMC_AUTO_CAL_STATUS_ACTIVE ) , 1 , 300 ) ;
if ( err ) {
dev_err ( emc - > dev ,
2019-12-20 05:08:49 +03:00
" auto-cal finish timeout: %d \n " , err ) ;
2019-08-12 00:00:40 +03:00
return err ;
}
emc - > vref_cal_toggle = true ;
}
}
/* program shadow registers */
for ( i = 0 ; i < ARRAY_SIZE ( timing - > data ) ; i + + ) {
/* EMC_XM2CLKPADCTRL should be programmed separately */
if ( i ! = 73 )
writel_relaxed ( timing - > data [ i ] ,
emc - > regs + emc_timing_registers [ i ] ) ;
}
err = tegra_mc_write_emem_configuration ( emc - > mc , timing - > rate ) ;
if ( err )
return err ;
/* DDR3: predict MRS long wait count */
if ( dram_type = = DRAM_TYPE_DDR3 & & dll_change = = DLL_CHANGE_ON ) {
u32 cnt = 512 ;
if ( emc - > zcal_long )
cnt - = dram_num * 256 ;
val = timing - > data [ 82 ] & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK ;
if ( cnt < val )
cnt = val ;
val = timing - > data [ 82 ] & ~ EMC_MRS_WAIT_CNT_LONG_WAIT_MASK ;
val | = ( cnt < < EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT ) &
EMC_MRS_WAIT_CNT_LONG_WAIT_MASK ;
writel_relaxed ( val , emc - > regs + EMC_MRS_WAIT_CNT ) ;
}
/* this read also completes the writes */
val = readl_relaxed ( emc - > regs + EMC_SEL_DPD_CTRL ) ;
if ( ! ( val & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE ) & & schmitt_to_vref ) {
u32 cur_mode , new_mode ;
cur_mode = fbio_cfg5 & EMC_CFG5_QUSE_MODE_MASK ;
cur_mode > > = EMC_CFG5_QUSE_MODE_SHIFT ;
new_mode = timing - > data [ 39 ] & EMC_CFG5_QUSE_MODE_MASK ;
new_mode > > = EMC_CFG5_QUSE_MODE_SHIFT ;
if ( ( cur_mode ! = EMC_CFG5_QUSE_MODE_PULSE_INTERN & &
cur_mode ! = EMC_CFG5_QUSE_MODE_INTERNAL_LPBK ) | |
( new_mode ! = EMC_CFG5_QUSE_MODE_PULSE_INTERN & &
new_mode ! = EMC_CFG5_QUSE_MODE_INTERNAL_LPBK ) )
qrst_used = true ;
}
/* flow control marker 1 */
writel_relaxed ( 0x1 , emc - > regs + EMC_STALL_THEN_EXE_BEFORE_CLKCHANGE ) ;
/* enable periodic reset */
if ( qrst_used ) {
writel_relaxed ( emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE ,
emc - > regs + EMC_DBG ) ;
writel_relaxed ( emc - > emc_cfg | EMC_CFG_PERIODIC_QRST ,
emc - > regs + EMC_CFG ) ;
writel_relaxed ( emc_dbg , emc - > regs + EMC_DBG ) ;
}
/* disable auto-refresh to save time after clock change */
writel_relaxed ( EMC_REFCTRL_DISABLE_ALL ( dram_num ) ,
emc - > regs + EMC_REFCTRL ) ;
/* turn off DLL and enter self-refresh on DDR3 */
if ( dram_type = = DRAM_TYPE_DDR3 ) {
if ( dll_change = = DLL_CHANGE_OFF )
writel_relaxed ( timing - > emc_mode_1 ,
emc - > regs + EMC_EMRS ) ;
writel_relaxed ( DRAM_BROADCAST ( dram_num ) |
EMC_SELF_REF_CMD_ENABLED ,
emc - > regs + EMC_SELF_REF ) ;
}
/* flow control marker 2 */
writel_relaxed ( 0x1 , emc - > regs + EMC_STALL_THEN_EXE_AFTER_CLKCHANGE ) ;
/* enable write-active MUX, update unshadowed pad control */
writel_relaxed ( emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE , emc - > regs + EMC_DBG ) ;
writel_relaxed ( timing - > data [ 73 ] , emc - > regs + EMC_XM2CLKPADCTRL ) ;
/* restore periodic QRST and disable write-active MUX */
val = ! ! ( emc - > emc_cfg & EMC_CFG_PERIODIC_QRST ) ;
if ( qrst_used | | timing - > emc_cfg_periodic_qrst ! = val ) {
if ( timing - > emc_cfg_periodic_qrst )
emc - > emc_cfg | = EMC_CFG_PERIODIC_QRST ;
else
emc - > emc_cfg & = ~ EMC_CFG_PERIODIC_QRST ;
writel_relaxed ( emc - > emc_cfg , emc - > regs + EMC_CFG ) ;
}
writel_relaxed ( emc_dbg , emc - > regs + EMC_DBG ) ;
/* exit self-refresh on DDR3 */
if ( dram_type = = DRAM_TYPE_DDR3 )
writel_relaxed ( DRAM_BROADCAST ( dram_num ) ,
emc - > regs + EMC_SELF_REF ) ;
/* set DRAM-mode registers */
if ( dram_type = = DRAM_TYPE_DDR3 ) {
if ( timing - > emc_mode_1 ! = emc - > emc_mode_1 )
writel_relaxed ( timing - > emc_mode_1 ,
emc - > regs + EMC_EMRS ) ;
if ( timing - > emc_mode_2 ! = emc - > emc_mode_2 )
writel_relaxed ( timing - > emc_mode_2 ,
emc - > regs + EMC_EMRS ) ;
if ( timing - > emc_mode_reset ! = emc - > emc_mode_reset | |
dll_change = = DLL_CHANGE_ON ) {
val = timing - > emc_mode_reset ;
if ( dll_change = = DLL_CHANGE_ON ) {
val | = EMC_MODE_SET_DLL_RESET ;
val | = EMC_MODE_SET_LONG_CNT ;
} else {
val & = ~ EMC_MODE_SET_DLL_RESET ;
}
writel_relaxed ( val , emc - > regs + EMC_MRS ) ;
}
} else {
if ( timing - > emc_mode_2 ! = emc - > emc_mode_2 )
writel_relaxed ( timing - > emc_mode_2 ,
emc - > regs + EMC_MRW ) ;
if ( timing - > emc_mode_1 ! = emc - > emc_mode_1 )
writel_relaxed ( timing - > emc_mode_1 ,
emc - > regs + EMC_MRW ) ;
}
emc - > emc_mode_1 = timing - > emc_mode_1 ;
emc - > emc_mode_2 = timing - > emc_mode_2 ;
emc - > emc_mode_reset = timing - > emc_mode_reset ;
/* issue ZCAL command if turning ZCAL on */
if ( emc - > zcal_long ) {
writel_relaxed ( EMC_ZQ_CAL_LONG_CMD_DEV0 ,
emc - > regs + EMC_ZQ_CAL ) ;
if ( dram_num > 1 )
writel_relaxed ( EMC_ZQ_CAL_LONG_CMD_DEV1 ,
emc - > regs + EMC_ZQ_CAL ) ;
}
/* flow control marker 3 */
writel_relaxed ( 0x1 , emc - > regs + EMC_UNSTALL_RW_AFTER_CLKCHANGE ) ;
2019-12-20 05:08:48 +03:00
/*
* Read and discard an arbitrary MC register ( Note : EMC registers
* can ' t be used ) to ensure the register writes are completed .
*/
mc_readl ( emc - > mc , MC_EMEM_ARB_OVERRIDE ) ;
2019-08-12 00:00:40 +03:00
return 0 ;
}
static int emc_complete_timing_change ( struct tegra_emc * emc ,
unsigned long rate )
{
2020-03-19 22:36:48 +03:00
struct emc_timing * timing = emc_find_timing ( emc , rate ) ;
unsigned int dram_num ;
int err ;
u32 v ;
2019-08-12 00:00:40 +03:00
2020-03-19 22:36:48 +03:00
err = readl_relaxed_poll_timeout_atomic ( emc - > regs + EMC_INTSTATUS , v ,
v & EMC_CLKCHANGE_COMPLETE_INT ,
1 , 100 ) ;
if ( err ) {
dev_err ( emc - > dev , " emc-car handshake timeout: %d \n " , err ) ;
return err ;
2019-08-12 00:00:40 +03:00
}
2020-03-19 22:36:48 +03:00
/* re-enable auto-refresh */
dram_num = tegra_mc_get_emem_device_count ( emc - > mc ) ;
writel_relaxed ( EMC_REFCTRL_ENABLE_ALL ( dram_num ) ,
emc - > regs + EMC_REFCTRL ) ;
/* restore auto-calibration */
if ( emc - > vref_cal_toggle )
writel_relaxed ( timing - > emc_auto_cal_interval ,
emc - > regs + EMC_AUTO_CAL_INTERVAL ) ;
2019-08-12 00:00:40 +03:00
2020-03-19 22:36:48 +03:00
/* restore dynamic self-refresh */
if ( timing - > emc_cfg_dyn_self_ref ) {
emc - > emc_cfg | = EMC_CFG_DYN_SREF_ENABLE ;
writel_relaxed ( emc - > emc_cfg , emc - > regs + EMC_CFG ) ;
}
/* set number of clocks to wait after each ZQ command */
if ( emc - > zcal_long )
writel_relaxed ( timing - > emc_zcal_cnt_long ,
emc - > regs + EMC_ZCAL_WAIT_CNT ) ;
/* wait for writes to settle */
udelay ( 2 ) ;
/* update restored timing */
err = emc_seq_update_timing ( emc ) ;
if ( ! err )
emc - > bad_state = false ;
/* restore early ACK */
mc_writel ( emc - > mc , emc - > mc_override , MC_EMEM_ARB_OVERRIDE ) ;
return err ;
2019-08-12 00:00:40 +03:00
}
static int emc_unprepare_timing_change ( struct tegra_emc * emc ,
unsigned long rate )
{
2019-12-20 05:08:47 +03:00
if ( ! emc - > bad_state ) {
2019-08-12 00:00:40 +03:00
/* shouldn't ever happen in practice */
dev_err ( emc - > dev , " timing configuration can't be reverted \n " ) ;
emc - > bad_state = true ;
}
return 0 ;
}
static int emc_clk_change_notify ( struct notifier_block * nb ,
unsigned long msg , void * data )
{
struct tegra_emc * emc = container_of ( nb , struct tegra_emc , clk_nb ) ;
struct clk_notifier_data * cnd = data ;
int err ;
switch ( msg ) {
case PRE_RATE_CHANGE :
2019-12-20 05:08:48 +03:00
/*
* Disable interrupt since read accesses are prohibited after
* stalling .
*/
disable_irq ( emc - > irq ) ;
2019-08-12 00:00:40 +03:00
err = emc_prepare_timing_change ( emc , cnd - > new_rate ) ;
2019-12-20 05:08:48 +03:00
enable_irq ( emc - > irq ) ;
2019-08-12 00:00:40 +03:00
break ;
case ABORT_RATE_CHANGE :
err = emc_unprepare_timing_change ( emc , cnd - > old_rate ) ;
break ;
case POST_RATE_CHANGE :
err = emc_complete_timing_change ( emc , cnd - > new_rate ) ;
break ;
default :
return NOTIFY_DONE ;
}
return notifier_from_errno ( err ) ;
}
static int load_one_timing_from_dt ( struct tegra_emc * emc ,
struct emc_timing * timing ,
struct device_node * node )
{
u32 value ;
int err ;
err = of_property_read_u32 ( node , " clock-frequency " , & value ) ;
if ( err ) {
dev_err ( emc - > dev , " timing %pOF: failed to read rate: %d \n " ,
node , err ) ;
return err ;
}
timing - > rate = value ;
err = of_property_read_u32_array ( node , " nvidia,emc-configuration " ,
timing - > data ,
ARRAY_SIZE ( emc_timing_registers ) ) ;
if ( err ) {
dev_err ( emc - > dev ,
" timing %pOF: failed to read emc timing data: %d \n " ,
node , err ) ;
return err ;
}
# define EMC_READ_BOOL(prop, dtprop) \
timing - > prop = of_property_read_bool ( node , dtprop ) ;
# define EMC_READ_U32(prop, dtprop) \
err = of_property_read_u32 ( node , dtprop , & timing - > prop ) ; \
if ( err ) { \
dev_err ( emc - > dev , \
" timing %pOFn: failed to read " # prop " : %d \n " , \
node , err ) ; \
return err ; \
}
EMC_READ_U32 ( emc_auto_cal_interval , " nvidia,emc-auto-cal-interval " )
EMC_READ_U32 ( emc_mode_1 , " nvidia,emc-mode-1 " )
EMC_READ_U32 ( emc_mode_2 , " nvidia,emc-mode-2 " )
EMC_READ_U32 ( emc_mode_reset , " nvidia,emc-mode-reset " )
EMC_READ_U32 ( emc_zcal_cnt_long , " nvidia,emc-zcal-cnt-long " )
EMC_READ_BOOL ( emc_cfg_dyn_self_ref , " nvidia,emc-cfg-dyn-self-ref " )
EMC_READ_BOOL ( emc_cfg_periodic_qrst , " nvidia,emc-cfg-periodic-qrst " )
# undef EMC_READ_U32
# undef EMC_READ_BOOL
dev_dbg ( emc - > dev , " %s: %pOF: rate %lu \n " , __func__ , node , timing - > rate ) ;
return 0 ;
}
static int cmp_timings ( const void * _a , const void * _b )
{
const struct emc_timing * a = _a ;
const struct emc_timing * b = _b ;
if ( a - > rate < b - > rate )
return - 1 ;
if ( a - > rate > b - > rate )
return 1 ;
return 0 ;
}
static int emc_check_mc_timings ( struct tegra_emc * emc )
{
struct tegra_mc * mc = emc - > mc ;
unsigned int i ;
if ( emc - > num_timings ! = mc - > num_timings ) {
dev_err ( emc - > dev , " emc/mc timings number mismatch: %u %u \n " ,
emc - > num_timings , mc - > num_timings ) ;
return - EINVAL ;
}
for ( i = 0 ; i < mc - > num_timings ; i + + ) {
if ( emc - > timings [ i ] . rate ! = mc - > timings [ i ] . rate ) {
dev_err ( emc - > dev ,
" emc/mc timing rate mismatch: %lu %lu \n " ,
emc - > timings [ i ] . rate , mc - > timings [ i ] . rate ) ;
return - EINVAL ;
}
}
return 0 ;
}
static int emc_load_timings_from_dt ( struct tegra_emc * emc ,
struct device_node * node )
{
struct device_node * child ;
struct emc_timing * timing ;
int child_count ;
int err ;
child_count = of_get_child_count ( node ) ;
if ( ! child_count ) {
dev_err ( emc - > dev , " no memory timings in: %pOF \n " , node ) ;
return - EINVAL ;
}
emc - > timings = devm_kcalloc ( emc - > dev , child_count , sizeof ( * timing ) ,
GFP_KERNEL ) ;
if ( ! emc - > timings )
return - ENOMEM ;
emc - > num_timings = child_count ;
timing = emc - > timings ;
for_each_child_of_node ( node , child ) {
err = load_one_timing_from_dt ( emc , timing + + , child ) ;
if ( err ) {
of_node_put ( child ) ;
return err ;
}
}
sort ( emc - > timings , emc - > num_timings , sizeof ( * timing ) , cmp_timings ,
NULL ) ;
err = emc_check_mc_timings ( emc ) ;
if ( err )
return err ;
2021-03-31 02:04:45 +03:00
dev_info_once ( emc - > dev ,
" got %u timings for RAM code %u (min %luMHz max %luMHz) \n " ,
emc - > num_timings ,
tegra_read_ram_code ( ) ,
emc - > timings [ 0 ] . rate / 1000000 ,
emc - > timings [ emc - > num_timings - 1 ] . rate / 1000000 ) ;
2019-08-12 00:00:40 +03:00
return 0 ;
}
2021-12-22 07:32:14 +03:00
static struct device_node * emc_find_node_by_ram_code ( struct tegra_emc * emc )
2019-08-12 00:00:40 +03:00
{
2021-12-22 07:32:14 +03:00
struct device * dev = emc - > dev ;
2019-08-12 00:00:40 +03:00
struct device_node * np ;
u32 value , ram_code ;
int err ;
2021-12-22 07:32:14 +03:00
if ( emc - > mrr_error ) {
dev_warn ( dev , " memory timings skipped due to MRR error \n " ) ;
return NULL ;
}
2020-11-11 04:14:38 +03:00
if ( of_get_child_count ( dev - > of_node ) = = 0 ) {
2021-03-31 02:04:45 +03:00
dev_info_once ( dev , " device-tree doesn't have memory timings \n " ) ;
2020-11-11 04:14:38 +03:00
return NULL ;
}
2019-08-12 00:00:40 +03:00
ram_code = tegra_read_ram_code ( ) ;
for_each_child_of_node ( dev - > of_node , np ) {
err = of_property_read_u32 ( np , " nvidia,ram-code " , & value ) ;
if ( err | | value ! = ram_code )
continue ;
return np ;
}
dev_err ( dev , " no memory timings for RAM code %u found in device-tree \n " ,
ram_code ) ;
return NULL ;
}
2021-12-22 07:32:14 +03:00
static int emc_read_lpddr_mode_register ( struct tegra_emc * emc ,
unsigned int emem_dev ,
unsigned int register_addr ,
unsigned int * register_data )
{
u32 memory_dev = emem_dev ? 1 : 2 ;
u32 val , mr_mask = 0xff ;
int err ;
/* clear data-valid interrupt status */
writel_relaxed ( EMC_MRR_DIVLD_INT , emc - > regs + EMC_INTSTATUS ) ;
/* issue mode register read request */
val = FIELD_PREP ( EMC_MRR_DEV_SELECTN , memory_dev ) ;
val | = FIELD_PREP ( EMC_MRR_MRR_MA , register_addr ) ;
writel_relaxed ( val , emc - > regs + EMC_MRR ) ;
/* wait for the LPDDR2 data-valid interrupt */
err = readl_relaxed_poll_timeout_atomic ( emc - > regs + EMC_INTSTATUS , val ,
val & EMC_MRR_DIVLD_INT ,
1 , 100 ) ;
if ( err ) {
dev_err ( emc - > dev , " mode register %u read failed: %d \n " ,
register_addr , err ) ;
emc - > mrr_error = true ;
return err ;
}
/* read out mode register data */
val = readl_relaxed ( emc - > regs + EMC_MRR ) ;
* register_data = FIELD_GET ( EMC_MRR_MRR_DATA , val ) & mr_mask ;
return 0 ;
}
static void emc_read_lpddr_sdram_info ( struct tegra_emc * emc ,
unsigned int emem_dev )
{
union lpddr2_basic_config4 basic_conf4 ;
unsigned int manufacturer_id ;
unsigned int revision_id1 ;
unsigned int revision_id2 ;
/* these registers are standard for all LPDDR JEDEC memory chips */
emc_read_lpddr_mode_register ( emc , emem_dev , 5 , & manufacturer_id ) ;
emc_read_lpddr_mode_register ( emc , emem_dev , 6 , & revision_id1 ) ;
emc_read_lpddr_mode_register ( emc , emem_dev , 7 , & revision_id2 ) ;
emc_read_lpddr_mode_register ( emc , emem_dev , 8 , & basic_conf4 . value ) ;
dev_info ( emc - > dev , " SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit \n " ,
emem_dev , manufacturer_id ,
lpddr2_jedec_manufacturer ( manufacturer_id ) ,
revision_id1 , revision_id2 ,
4 > > basic_conf4 . arch_type ,
64 < < basic_conf4 . density ,
32 > > basic_conf4 . io_width ) ;
}
2019-08-12 00:00:40 +03:00
static int emc_setup_hw ( struct tegra_emc * emc )
{
2021-12-22 07:32:14 +03:00
u32 fbio_cfg5 , emc_cfg , emc_dbg , emc_adr_cfg ;
2020-03-19 22:36:48 +03:00
u32 intmask = EMC_REFRESH_OVERFLOW_INT ;
2021-12-22 07:32:14 +03:00
static bool print_sdram_info_once ;
2019-08-12 00:00:40 +03:00
enum emc_dram_type dram_type ;
2021-12-22 07:32:14 +03:00
const char * dram_type_str ;
unsigned int emem_numdev ;
2019-08-12 00:00:40 +03:00
fbio_cfg5 = readl_relaxed ( emc - > regs + EMC_FBIO_CFG5 ) ;
dram_type = fbio_cfg5 & EMC_FBIO_CFG5_DRAM_TYPE_MASK ;
emc_cfg = readl_relaxed ( emc - > regs + EMC_CFG_2 ) ;
/* enable EMC and CAR to handshake on PLL divider/source changes */
emc_cfg | = EMC_CLKCHANGE_REQ_ENABLE ;
/* configure clock change mode accordingly to DRAM type */
switch ( dram_type ) {
case DRAM_TYPE_LPDDR2 :
emc_cfg | = EMC_CLKCHANGE_PD_ENABLE ;
emc_cfg & = ~ EMC_CLKCHANGE_SR_ENABLE ;
break ;
default :
emc_cfg & = ~ EMC_CLKCHANGE_SR_ENABLE ;
emc_cfg & = ~ EMC_CLKCHANGE_PD_ENABLE ;
break ;
}
writel_relaxed ( emc_cfg , emc - > regs + EMC_CFG_2 ) ;
/* initialize interrupt */
writel_relaxed ( intmask , emc - > regs + EMC_INTMASK ) ;
writel_relaxed ( 0xffffffff , emc - > regs + EMC_INTSTATUS ) ;
/* ensure that unwanted debug features are disabled */
emc_dbg = readl_relaxed ( emc - > regs + EMC_DBG ) ;
emc_dbg | = EMC_DBG_CFG_PRIORITY ;
emc_dbg & = ~ EMC_DBG_READ_MUX_ASSEMBLY ;
emc_dbg & = ~ EMC_DBG_WRITE_MUX_ACTIVE ;
emc_dbg & = ~ EMC_DBG_FORCE_UPDATE ;
writel_relaxed ( emc_dbg , emc - > regs + EMC_DBG ) ;
2021-12-22 07:32:14 +03:00
switch ( dram_type ) {
case DRAM_TYPE_DDR1 :
dram_type_str = " DDR1 " ;
break ;
case DRAM_TYPE_LPDDR2 :
dram_type_str = " LPDDR2 " ;
break ;
case DRAM_TYPE_DDR2 :
dram_type_str = " DDR2 " ;
break ;
case DRAM_TYPE_DDR3 :
dram_type_str = " DDR3 " ;
break ;
}
emc_adr_cfg = readl_relaxed ( emc - > regs + EMC_ADR_CFG ) ;
emem_numdev = FIELD_GET ( EMC_ADR_CFG_EMEM_NUMDEV , emc_adr_cfg ) + 1 ;
dev_info_once ( emc - > dev , " %u %s %s attached \n " , emem_numdev ,
dram_type_str , emem_numdev = = 2 ? " devices " : " device " ) ;
if ( dram_type = = DRAM_TYPE_LPDDR2 & & ! print_sdram_info_once ) {
while ( emem_numdev - - )
emc_read_lpddr_sdram_info ( emc , emem_numdev ) ;
print_sdram_info_once = true ;
}
2019-08-12 00:00:40 +03:00
return 0 ;
}
static long emc_round_rate ( unsigned long rate ,
unsigned long min_rate ,
unsigned long max_rate ,
void * arg )
{
struct emc_timing * timing = NULL ;
struct tegra_emc * emc = arg ;
unsigned int i ;
2020-11-11 04:14:38 +03:00
if ( ! emc - > num_timings )
return clk_get_rate ( emc - > clk ) ;
2019-08-12 00:00:40 +03:00
min_rate = min ( min_rate , emc - > timings [ emc - > num_timings - 1 ] . rate ) ;
for ( i = 0 ; i < emc - > num_timings ; i + + ) {
if ( emc - > timings [ i ] . rate < rate & & i ! = emc - > num_timings - 1 )
continue ;
if ( emc - > timings [ i ] . rate > max_rate ) {
i = max ( i , 1u ) - 1 ;
if ( emc - > timings [ i ] . rate < min_rate )
break ;
}
if ( emc - > timings [ i ] . rate < min_rate )
continue ;
timing = & emc - > timings [ i ] ;
break ;
}
if ( ! timing ) {
dev_err ( emc - > dev , " no timing for rate %lu min %lu max %lu \n " ,
rate , min_rate , max_rate ) ;
return - EINVAL ;
}
return timing - > rate ;
}
2020-12-03 22:24:32 +03:00
static void tegra_emc_rate_requests_init ( struct tegra_emc * emc )
{
unsigned int i ;
for ( i = 0 ; i < EMC_RATE_TYPE_MAX ; i + + ) {
emc - > requested_rate [ i ] . min_rate = 0 ;
emc - > requested_rate [ i ] . max_rate = ULONG_MAX ;
}
}
static int emc_request_rate ( struct tegra_emc * emc ,
unsigned long new_min_rate ,
unsigned long new_max_rate ,
enum emc_rate_request_type type )
{
struct emc_rate_request * req = emc - > requested_rate ;
unsigned long min_rate = 0 , max_rate = ULONG_MAX ;
unsigned int i ;
int err ;
/* select minimum and maximum rates among the requested rates */
for ( i = 0 ; i < EMC_RATE_TYPE_MAX ; i + + , req + + ) {
if ( i = = type ) {
min_rate = max ( new_min_rate , min_rate ) ;
max_rate = min ( new_max_rate , max_rate ) ;
} else {
min_rate = max ( req - > min_rate , min_rate ) ;
max_rate = min ( req - > max_rate , max_rate ) ;
}
}
if ( min_rate > max_rate ) {
dev_err_ratelimited ( emc - > dev , " %s: type %u: out of range: %lu %lu \n " ,
__func__ , type , min_rate , max_rate ) ;
return - ERANGE ;
}
/*
* EMC rate - changes should go via OPP API because it manages voltage
* changes .
*/
err = dev_pm_opp_set_rate ( emc - > dev , min_rate ) ;
if ( err )
return err ;
emc - > requested_rate [ type ] . min_rate = new_min_rate ;
emc - > requested_rate [ type ] . max_rate = new_max_rate ;
return 0 ;
}
static int emc_set_min_rate ( struct tegra_emc * emc , unsigned long rate ,
enum emc_rate_request_type type )
{
struct emc_rate_request * req = & emc - > requested_rate [ type ] ;
int ret ;
mutex_lock ( & emc - > rate_lock ) ;
ret = emc_request_rate ( emc , rate , req - > max_rate , type ) ;
mutex_unlock ( & emc - > rate_lock ) ;
return ret ;
}
static int emc_set_max_rate ( struct tegra_emc * emc , unsigned long rate ,
enum emc_rate_request_type type )
{
struct emc_rate_request * req = & emc - > requested_rate [ type ] ;
int ret ;
mutex_lock ( & emc - > rate_lock ) ;
ret = emc_request_rate ( emc , req - > min_rate , rate , type ) ;
mutex_unlock ( & emc - > rate_lock ) ;
return ret ;
}
2019-12-22 12:40:33 +01:00
/*
* debugfs interface
*
* The memory controller driver exposes some files in debugfs that can be used
* to control the EMC frequency . The top - level directory can be found here :
*
* / sys / kernel / debug / emc
*
* It contains the following files :
*
* - available_rates : This file contains a list of valid , space - separated
* EMC frequencies .
*
* - min_rate : Writing a value to this file sets the given frequency as the
* floor of the permitted range . If this is higher than the currently
* configured EMC frequency , this will cause the frequency to be
* increased so that it stays within the valid range .
*
* - max_rate : Similarily to the min_rate file , writing a value to this file
* sets the given frequency as the ceiling of the permitted range . If
* the value is lower than the currently configured EMC frequency , this
* will cause the frequency to be decreased so that it stays within the
* valid range .
*/
static bool tegra_emc_validate_rate ( struct tegra_emc * emc , unsigned long rate )
{
unsigned int i ;
for ( i = 0 ; i < emc - > num_timings ; i + + )
if ( rate = = emc - > timings [ i ] . rate )
return true ;
return false ;
}
static int tegra_emc_debug_available_rates_show ( struct seq_file * s , void * data )
{
struct tegra_emc * emc = s - > private ;
const char * prefix = " " ;
unsigned int i ;
for ( i = 0 ; i < emc - > num_timings ; i + + ) {
seq_printf ( s , " %s%lu " , prefix , emc - > timings [ i ] . rate ) ;
prefix = " " ;
}
seq_puts ( s , " \n " ) ;
return 0 ;
}
2022-09-26 21:00:23 +08:00
DEFINE_SHOW_ATTRIBUTE ( tegra_emc_debug_available_rates ) ;
2019-12-22 12:40:33 +01:00
static int tegra_emc_debug_min_rate_get ( void * data , u64 * rate )
{
struct tegra_emc * emc = data ;
* rate = emc - > debugfs . min_rate ;
return 0 ;
}
static int tegra_emc_debug_min_rate_set ( void * data , u64 rate )
{
struct tegra_emc * emc = data ;
int err ;
if ( ! tegra_emc_validate_rate ( emc , rate ) )
return - EINVAL ;
2020-12-03 22:24:32 +03:00
err = emc_set_min_rate ( emc , rate , EMC_RATE_DEBUG ) ;
2019-12-22 12:40:33 +01:00
if ( err < 0 )
return err ;
emc - > debugfs . min_rate = rate ;
return 0 ;
}
2021-08-20 20:52:23 -07:00
DEFINE_DEBUGFS_ATTRIBUTE ( tegra_emc_debug_min_rate_fops ,
2019-12-22 12:40:33 +01:00
tegra_emc_debug_min_rate_get ,
tegra_emc_debug_min_rate_set , " %llu \n " ) ;
static int tegra_emc_debug_max_rate_get ( void * data , u64 * rate )
{
struct tegra_emc * emc = data ;
* rate = emc - > debugfs . max_rate ;
return 0 ;
}
static int tegra_emc_debug_max_rate_set ( void * data , u64 rate )
{
struct tegra_emc * emc = data ;
int err ;
if ( ! tegra_emc_validate_rate ( emc , rate ) )
return - EINVAL ;
2020-12-03 22:24:32 +03:00
err = emc_set_max_rate ( emc , rate , EMC_RATE_DEBUG ) ;
2019-12-22 12:40:33 +01:00
if ( err < 0 )
return err ;
emc - > debugfs . max_rate = rate ;
return 0 ;
}
2021-08-20 20:52:23 -07:00
DEFINE_DEBUGFS_ATTRIBUTE ( tegra_emc_debug_max_rate_fops ,
2019-12-22 12:40:33 +01:00
tegra_emc_debug_max_rate_get ,
tegra_emc_debug_max_rate_set , " %llu \n " ) ;
static void tegra_emc_debugfs_init ( struct tegra_emc * emc )
{
struct device * dev = emc - > dev ;
unsigned int i ;
int err ;
emc - > debugfs . min_rate = ULONG_MAX ;
emc - > debugfs . max_rate = 0 ;
for ( i = 0 ; i < emc - > num_timings ; i + + ) {
if ( emc - > timings [ i ] . rate < emc - > debugfs . min_rate )
emc - > debugfs . min_rate = emc - > timings [ i ] . rate ;
if ( emc - > timings [ i ] . rate > emc - > debugfs . max_rate )
emc - > debugfs . max_rate = emc - > timings [ i ] . rate ;
}
2020-02-25 02:58:35 +03:00
if ( ! emc - > num_timings ) {
emc - > debugfs . min_rate = clk_get_rate ( emc - > clk ) ;
emc - > debugfs . max_rate = emc - > debugfs . min_rate ;
}
2019-12-22 12:40:33 +01:00
err = clk_set_rate_range ( emc - > clk , emc - > debugfs . min_rate ,
emc - > debugfs . max_rate ) ;
if ( err < 0 ) {
dev_err ( dev , " failed to set rate range [%lu-%lu] for %pC \n " ,
emc - > debugfs . min_rate , emc - > debugfs . max_rate ,
emc - > clk ) ;
}
emc - > debugfs . root = debugfs_create_dir ( " emc " , NULL ) ;
2020-03-13 11:57:39 +03:00
debugfs_create_file ( " available_rates " , 0444 , emc - > debugfs . root ,
2019-12-22 12:40:33 +01:00
emc , & tegra_emc_debug_available_rates_fops ) ;
2020-03-13 11:57:39 +03:00
debugfs_create_file ( " min_rate " , 0644 , emc - > debugfs . root ,
2019-12-22 12:40:33 +01:00
emc , & tegra_emc_debug_min_rate_fops ) ;
2020-03-13 11:57:39 +03:00
debugfs_create_file ( " max_rate " , 0644 , emc - > debugfs . root ,
2019-12-22 12:40:33 +01:00
emc , & tegra_emc_debug_max_rate_fops ) ;
}
2020-12-03 22:24:32 +03:00
static inline struct tegra_emc *
to_tegra_emc_provider ( struct icc_provider * provider )
{
return container_of ( provider , struct tegra_emc , provider ) ;
}
static struct icc_node_data *
2024-02-20 08:22:13 +01:00
emc_of_icc_xlate_extended ( const struct of_phandle_args * spec , void * data )
2020-12-03 22:24:32 +03:00
{
struct icc_provider * provider = data ;
struct icc_node_data * ndata ;
struct icc_node * node ;
/* External Memory is the only possible ICC route */
list_for_each_entry ( node , & provider - > nodes , node_list ) {
if ( node - > id ! = TEGRA_ICC_EMEM )
continue ;
ndata = kzalloc ( sizeof ( * ndata ) , GFP_KERNEL ) ;
if ( ! ndata )
return ERR_PTR ( - ENOMEM ) ;
/*
* SRC and DST nodes should have matching TAG in order to have
* it set by default for a requested path .
*/
ndata - > tag = TEGRA_MC_ICC_TAG_ISO ;
ndata - > node = node ;
return ndata ;
}
return ERR_PTR ( - EPROBE_DEFER ) ;
}
static int emc_icc_set ( struct icc_node * src , struct icc_node * dst )
{
struct tegra_emc * emc = to_tegra_emc_provider ( dst - > provider ) ;
unsigned long long peak_bw = icc_units_to_bps ( dst - > peak_bw ) ;
unsigned long long avg_bw = icc_units_to_bps ( dst - > avg_bw ) ;
unsigned long long rate = max ( avg_bw , peak_bw ) ;
const unsigned int dram_data_bus_width_bytes = 4 ;
const unsigned int ddr = 2 ;
int err ;
/*
* Tegra30 EMC runs on a clock rate of SDRAM bus . This means that
* EMC clock rate is twice smaller than the peak data rate because
* data is sampled on both EMC clock edges .
*/
do_div ( rate , ddr * dram_data_bus_width_bytes ) ;
rate = min_t ( u64 , rate , U32_MAX ) ;
err = emc_set_min_rate ( emc , rate , EMC_RATE_ICC ) ;
if ( err )
return err ;
return 0 ;
}
static int tegra_emc_interconnect_init ( struct tegra_emc * emc )
{
const struct tegra_mc_soc * soc = emc - > mc - > soc ;
struct icc_node * node ;
int err ;
emc - > provider . dev = emc - > dev ;
emc - > provider . set = emc_icc_set ;
emc - > provider . data = & emc - > provider ;
emc - > provider . aggregate = soc - > icc_ops - > aggregate ;
emc - > provider . xlate_extended = emc_of_icc_xlate_extended ;
2023-03-06 08:56:48 +01:00
icc_provider_init ( & emc - > provider ) ;
2020-12-03 22:24:32 +03:00
/* create External Memory Controller node */
node = icc_node_create ( TEGRA_ICC_EMC ) ;
if ( IS_ERR ( node ) ) {
err = PTR_ERR ( node ) ;
2023-03-06 08:56:48 +01:00
goto err_msg ;
2020-12-03 22:24:32 +03:00
}
node - > name = " External Memory Controller " ;
icc_node_add ( node , & emc - > provider ) ;
/* link External Memory Controller to External Memory (DRAM) */
err = icc_link_create ( node , TEGRA_ICC_EMEM ) ;
if ( err )
goto remove_nodes ;
/* create External Memory node */
node = icc_node_create ( TEGRA_ICC_EMEM ) ;
if ( IS_ERR ( node ) ) {
err = PTR_ERR ( node ) ;
goto remove_nodes ;
}
node - > name = " External Memory (DRAM) " ;
icc_node_add ( node , & emc - > provider ) ;
2023-03-06 08:56:48 +01:00
err = icc_provider_register ( & emc - > provider ) ;
if ( err )
goto remove_nodes ;
2020-12-03 22:24:32 +03:00
return 0 ;
remove_nodes :
icc_nodes_remove ( & emc - > provider ) ;
err_msg :
dev_err ( emc - > dev , " failed to initialize ICC: %d \n " , err ) ;
return err ;
}
static void devm_tegra_emc_unset_callback ( void * data )
{
tegra20_clk_set_emc_round_callback ( NULL , NULL ) ;
}
static void devm_tegra_emc_unreg_clk_notifier ( void * data )
{
struct tegra_emc * emc = data ;
clk_notifier_unregister ( emc - > clk , & emc - > clk_nb ) ;
}
static int tegra_emc_init_clk ( struct tegra_emc * emc )
{
int err ;
tegra20_clk_set_emc_round_callback ( emc_round_rate , emc ) ;
err = devm_add_action_or_reset ( emc - > dev , devm_tegra_emc_unset_callback ,
NULL ) ;
if ( err )
return err ;
emc - > clk = devm_clk_get ( emc - > dev , NULL ) ;
if ( IS_ERR ( emc - > clk ) ) {
dev_err ( emc - > dev , " failed to get EMC clock: %pe \n " , emc - > clk ) ;
return PTR_ERR ( emc - > clk ) ;
}
err = clk_notifier_register ( emc - > clk , & emc - > clk_nb ) ;
if ( err ) {
dev_err ( emc - > dev , " failed to register clk notifier: %d \n " , err ) ;
return err ;
}
err = devm_add_action_or_reset ( emc - > dev ,
devm_tegra_emc_unreg_clk_notifier , emc ) ;
if ( err )
return err ;
return 0 ;
}
2019-08-12 00:00:40 +03:00
static int tegra_emc_probe ( struct platform_device * pdev )
{
2021-06-01 05:31:15 +03:00
struct tegra_core_opp_params opp_params = { } ;
2019-08-12 00:00:40 +03:00
struct device_node * np ;
struct tegra_emc * emc ;
int err ;
emc = devm_kzalloc ( & pdev - > dev , sizeof ( * emc ) , GFP_KERNEL ) ;
2020-11-19 12:52:44 -07:00
if ( ! emc )
2019-08-12 00:00:40 +03:00
return - ENOMEM ;
2020-11-04 19:49:04 +03:00
emc - > mc = devm_tegra_memory_controller_get ( & pdev - > dev ) ;
if ( IS_ERR ( emc - > mc ) )
return PTR_ERR ( emc - > mc ) ;
2019-08-12 00:00:40 +03:00
2020-12-03 22:24:32 +03:00
mutex_init ( & emc - > rate_lock ) ;
2019-08-12 00:00:40 +03:00
emc - > clk_nb . notifier_call = emc_clk_change_notify ;
emc - > dev = & pdev - > dev ;
emc - > regs = devm_platform_ioremap_resource ( pdev , 0 ) ;
if ( IS_ERR ( emc - > regs ) )
return PTR_ERR ( emc - > regs ) ;
err = emc_setup_hw ( emc ) ;
if ( err )
return err ;
2021-12-22 07:32:14 +03:00
np = emc_find_node_by_ram_code ( emc ) ;
if ( np ) {
err = emc_load_timings_from_dt ( emc , np ) ;
of_node_put ( np ) ;
if ( err )
return err ;
}
2019-08-12 00:00:40 +03:00
err = platform_get_irq ( pdev , 0 ) ;
2020-11-04 19:49:06 +03:00
if ( err < 0 )
2019-08-12 00:00:40 +03:00
return err ;
2020-11-04 19:49:06 +03:00
2019-08-12 00:00:40 +03:00
emc - > irq = err ;
err = devm_request_irq ( & pdev - > dev , emc - > irq , tegra_emc_isr , 0 ,
dev_name ( & pdev - > dev ) , emc ) ;
if ( err ) {
dev_err ( & pdev - > dev , " failed to request irq: %d \n " , err ) ;
return err ;
}
2020-12-03 22:24:32 +03:00
err = tegra_emc_init_clk ( emc ) ;
if ( err )
return err ;
2019-08-12 00:00:40 +03:00
2021-06-01 05:31:15 +03:00
opp_params . init_state = true ;
err = devm_tegra_core_dev_init_opp_table ( & pdev - > dev , & opp_params ) ;
2020-12-03 22:24:32 +03:00
if ( err )
return err ;
2019-08-12 00:00:40 +03:00
platform_set_drvdata ( pdev , emc ) ;
2020-12-03 22:24:32 +03:00
tegra_emc_rate_requests_init ( emc ) ;
2019-12-22 12:40:33 +01:00
tegra_emc_debugfs_init ( emc ) ;
2020-12-03 22:24:32 +03:00
tegra_emc_interconnect_init ( emc ) ;
2019-08-12 00:00:40 +03:00
2020-11-11 04:14:37 +03:00
/*
* Don ' t allow the kernel module to be unloaded . Unloading adds some
* extra complexity which doesn ' t really worth the effort in a case of
* this driver .
*/
try_module_get ( THIS_MODULE ) ;
2019-08-12 00:00:40 +03:00
return 0 ;
}
static int tegra_emc_suspend ( struct device * dev )
{
struct tegra_emc * emc = dev_get_drvdata ( dev ) ;
2019-12-20 05:08:47 +03:00
int err ;
/* take exclusive control over the clock's rate */
err = clk_rate_exclusive_get ( emc - > clk ) ;
if ( err ) {
dev_err ( emc - > dev , " failed to acquire clk: %d \n " , err ) ;
return err ;
}
2019-08-12 00:00:40 +03:00
2019-12-20 05:08:47 +03:00
/* suspending in a bad state will hang machine */
if ( WARN ( emc - > bad_state , " hardware in a bad state \n " ) )
2019-08-12 00:00:40 +03:00
return - EINVAL ;
emc - > bad_state = true ;
return 0 ;
}
static int tegra_emc_resume ( struct device * dev )
{
struct tegra_emc * emc = dev_get_drvdata ( dev ) ;
emc_setup_hw ( emc ) ;
emc - > bad_state = false ;
2019-12-20 05:08:47 +03:00
clk_rate_exclusive_put ( emc - > clk ) ;
2019-08-12 00:00:40 +03:00
return 0 ;
}
static const struct dev_pm_ops tegra_emc_pm_ops = {
. suspend = tegra_emc_suspend ,
. resume = tegra_emc_resume ,
} ;
static const struct of_device_id tegra_emc_of_match [ ] = {
{ . compatible = " nvidia,tegra30-emc " , } ,
{ } ,
} ;
2020-11-11 04:14:37 +03:00
MODULE_DEVICE_TABLE ( of , tegra_emc_of_match ) ;
2019-08-12 00:00:40 +03:00
static struct platform_driver tegra_emc_driver = {
. probe = tegra_emc_probe ,
. driver = {
. name = " tegra30-emc " ,
. of_match_table = tegra_emc_of_match ,
. pm = & tegra_emc_pm_ops ,
. suppress_bind_attrs = true ,
2020-12-03 22:24:32 +03:00
. sync_state = icc_sync_state ,
2019-08-12 00:00:40 +03:00
} ,
} ;
2020-11-11 04:14:37 +03:00
module_platform_driver ( tegra_emc_driver ) ;
2019-08-12 00:00:40 +03:00
2020-11-11 04:14:37 +03:00
MODULE_AUTHOR ( " Dmitry Osipenko <digetx@gmail.com> " ) ;
MODULE_DESCRIPTION ( " NVIDIA Tegra30 EMC driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;