2019-05-29 17:17:58 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2014-03-10 19:37:12 +04:00
/*
* drivers / mmc / host / sdhci - msm . c - Qualcomm SDHCI Platform driver
*
* Copyright ( c ) 2013 - 2014 , The Linux Foundation . All rights reserved .
*/
# include <linux/module.h>
# include <linux/of_device.h>
# include <linux/delay.h>
2014-03-10 19:37:13 +04:00
# include <linux/mmc/mmc.h>
2016-10-21 09:42:04 +03:00
# include <linux/pm_runtime.h>
2020-04-17 17:04:31 +03:00
# include <linux/pm_opp.h>
2014-03-10 19:37:13 +04:00
# include <linux/slab.h>
2016-11-21 09:37:25 +03:00
# include <linux/iopoll.h>
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
# include <linux/qcom_scm.h>
2018-04-20 15:15:28 +03:00
# include <linux/regulator/consumer.h>
2020-06-09 11:37:25 +03:00
# include <linux/interconnect.h>
2020-07-08 16:11:20 +03:00
# include <linux/pinctrl/consumer.h>
2014-03-10 19:37:12 +04:00
# include "sdhci-pltfm.h"
2020-01-16 20:03:11 +03:00
# include "cqhci.h"
2014-03-10 19:37:12 +04:00
2015-03-23 19:47:29 +03:00
# define CORE_MCI_VERSION 0x50
# define CORE_VERSION_MAJOR_SHIFT 28
# define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
# define CORE_VERSION_MINOR_MASK 0xff
2017-11-20 22:56:47 +03:00
# define CORE_MCI_GENERICS 0x70
# define SWITCHABLE_SIGNALING_VOLTAGE BIT(29)
2014-03-10 19:37:12 +04:00
# define HC_MODE_EN 0x1
# define CORE_POWER 0x0
# define CORE_SW_RST BIT(7)
2016-11-21 09:37:23 +03:00
# define FF_CLK_SW_RST_DIS BIT(13)
2014-03-10 19:37:12 +04:00
2016-06-24 18:07:14 +03:00
# define CORE_PWRCTL_BUS_OFF BIT(0)
# define CORE_PWRCTL_BUS_ON BIT(1)
# define CORE_PWRCTL_IO_LOW BIT(2)
# define CORE_PWRCTL_IO_HIGH BIT(3)
# define CORE_PWRCTL_BUS_SUCCESS BIT(0)
2020-06-23 16:34:48 +03:00
# define CORE_PWRCTL_BUS_FAIL BIT(1)
2016-06-24 18:07:14 +03:00
# define CORE_PWRCTL_IO_SUCCESS BIT(2)
2020-06-23 16:34:48 +03:00
# define CORE_PWRCTL_IO_FAIL BIT(3)
2016-06-24 18:07:14 +03:00
# define REQ_BUS_OFF BIT(0)
# define REQ_BUS_ON BIT(1)
# define REQ_IO_LOW BIT(2)
# define REQ_IO_HIGH BIT(3)
# define INT_MASK 0xf
2014-03-10 19:37:13 +04:00
# define MAX_PHASES 16
# define CORE_DLL_LOCK BIT(7)
2016-11-21 09:37:26 +03:00
# define CORE_DDR_DLL_LOCK BIT(11)
2014-03-10 19:37:13 +04:00
# define CORE_DLL_EN BIT(16)
# define CORE_CDR_EN BIT(17)
# define CORE_CK_OUT_EN BIT(18)
# define CORE_CDR_EXT_EN BIT(19)
# define CORE_DLL_PDN BIT(29)
# define CORE_DLL_RST BIT(30)
2016-11-21 09:37:25 +03:00
# define CORE_CMD_DAT_TRACK_SEL BIT(0)
2014-03-10 19:37:13 +04:00
2016-11-21 09:37:26 +03:00
# define CORE_DDR_CAL_EN BIT(0)
2016-11-21 09:37:16 +03:00
# define CORE_FLL_CYCLE_CNT BIT(18)
# define CORE_DLL_CLOCK_DISABLE BIT(21)
2020-05-22 12:32:25 +03:00
# define DLL_USR_CTL_POR_VAL 0x10800
# define ENABLE_DLL_LOCK_STATUS BIT(26)
# define FINE_TUNE_MODE_EN BIT(27)
# define BIAS_OK_SIGNAL BIT(29)
2020-05-22 12:32:26 +03:00
# define DLL_CONFIG_3_LOW_FREQ_VAL 0x08
# define DLL_CONFIG_3_HIGH_FREQ_VAL 0x10
2020-04-20 09:20:25 +03:00
# define CORE_VENDOR_SPEC_POR_VAL 0xa9c
2014-03-10 19:37:13 +04:00
# define CORE_CLK_PWRSAVE BIT(1)
2016-11-21 09:37:23 +03:00
# define CORE_HC_MCLK_SEL_DFLT (2 << 8)
# define CORE_HC_MCLK_SEL_HS400 (3 << 8)
# define CORE_HC_MCLK_SEL_MASK (3 << 8)
2020-04-20 09:20:25 +03:00
# define CORE_IO_PAD_PWR_SWITCH_EN BIT(15)
# define CORE_IO_PAD_PWR_SWITCH BIT(16)
2016-11-21 09:37:23 +03:00
# define CORE_HC_SELECT_IN_EN BIT(18)
# define CORE_HC_SELECT_IN_HS400 (6 << 19)
# define CORE_HC_SELECT_IN_MASK (7 << 19)
2014-03-10 19:37:13 +04:00
2020-04-20 09:20:25 +03:00
# define CORE_3_0V_SUPPORT BIT(25)
# define CORE_1_8V_SUPPORT BIT(26)
2018-04-20 15:15:29 +03:00
# define CORE_VOLT_SUPPORT (CORE_3_0V_SUPPORT | CORE_1_8V_SUPPORT)
2018-04-20 15:15:28 +03:00
2016-11-21 09:37:25 +03:00
# define CORE_CSR_CDC_CTLR_CFG0 0x130
# define CORE_SW_TRIG_FULL_CALIB BIT(16)
# define CORE_HW_AUTOCAL_ENA BIT(17)
# define CORE_CSR_CDC_CTLR_CFG1 0x134
# define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
# define CORE_TIMER_ENA BIT(16)
# define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
# define CORE_CSR_CDC_REFCOUNT_CFG 0x140
# define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
# define CORE_CDC_OFFSET_CFG 0x14C
# define CORE_CSR_CDC_DELAY_CFG 0x150
# define CORE_CDC_SLAVE_DDA_CFG 0x160
# define CORE_CSR_CDC_STATUS0 0x164
# define CORE_CALIBRATION_DONE BIT(0)
# define CORE_CDC_ERROR_CODE_MASK 0x7000000
# define CORE_CSR_CDC_GEN_CFG 0x178
# define CORE_CDC_SWITCH_BYPASS_OFF BIT(0)
# define CORE_CDC_SWITCH_RC_EN BIT(1)
# define CORE_CDC_T4_DLY_SEL BIT(0)
2017-01-10 10:00:52 +03:00
# define CORE_CMDIN_RCLK_EN BIT(1)
2016-11-21 09:37:25 +03:00
# define CORE_START_CDC_TRAFFIC BIT(6)
2018-06-19 08:39:21 +03:00
2016-11-21 09:37:26 +03:00
# define CORE_PWRSAVE_DLL BIT(3)
2019-11-26 13:19:16 +03:00
# define DDR_CONFIG_POR_VAL 0x80040873
2016-11-21 09:37:25 +03:00
2015-03-23 19:47:29 +03:00
2016-11-21 09:37:24 +03:00
# define INVALID_TUNING_PHASE -1
2016-11-21 09:37:17 +03:00
# define SDHCI_MSM_MIN_CLOCK 400000
2016-11-21 09:37:23 +03:00
# define CORE_FREQ_100MHZ (100 * 1000 * 1000)
2016-11-21 09:37:17 +03:00
2014-03-10 19:37:13 +04:00
# define CDR_SELEXT_SHIFT 20
# define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
# define CMUX_SHIFT_PHASE_SHIFT 24
# define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
2016-10-21 09:42:04 +03:00
# define MSM_MMC_AUTOSUSPEND_DELAY_MS 50
2017-09-27 08:34:43 +03:00
/* Timeout value to avoid infinite waiting for pwr_irq */
# define MSM_PWR_IRQ_TIMEOUT_MS 5000
2020-06-23 16:34:48 +03:00
/* Max load for eMMC Vdd-io supply */
# define MMC_VQMMC_MAX_LOAD_UA 325000
2018-06-19 08:39:21 +03:00
# define msm_host_readl(msm_host, host, offset) \
msm_host - > var_ops - > msm_readl_relaxed ( host , offset )
# define msm_host_writel(msm_host, val, host, offset) \
msm_host - > var_ops - > msm_writel_relaxed ( val , host , offset )
2020-01-16 20:03:11 +03:00
/* CQHCI vendor specific registers */
# define CQHCI_VENDOR_CFG1 0xA00
# define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13)
2018-06-19 08:39:18 +03:00
struct sdhci_msm_offset {
u32 core_hc_mode ;
u32 core_mci_data_cnt ;
u32 core_mci_status ;
u32 core_mci_fifo_cnt ;
u32 core_mci_version ;
u32 core_generics ;
u32 core_testbus_config ;
u32 core_testbus_sel2_bit ;
u32 core_testbus_ena ;
u32 core_testbus_sel2 ;
u32 core_pwrctl_status ;
u32 core_pwrctl_mask ;
u32 core_pwrctl_clear ;
u32 core_pwrctl_ctl ;
u32 core_sdcc_debug_reg ;
u32 core_dll_config ;
u32 core_dll_status ;
u32 core_vendor_spec ;
u32 core_vendor_spec_adma_err_addr0 ;
u32 core_vendor_spec_adma_err_addr1 ;
u32 core_vendor_spec_func2 ;
u32 core_vendor_spec_capabilities0 ;
u32 core_ddr_200_cfg ;
u32 core_vendor_spec3 ;
u32 core_dll_config_2 ;
2019-11-26 13:19:16 +03:00
u32 core_dll_config_3 ;
u32 core_ddr_config_old ; /* Applicable to sdcc minor ver < 0x49 */
2018-06-19 08:39:18 +03:00
u32 core_ddr_config ;
2020-05-22 12:32:25 +03:00
u32 core_dll_usr_ctl ; /* Present on SDCC5.1 onwards */
2018-06-19 08:39:18 +03:00
} ;
static const struct sdhci_msm_offset sdhci_msm_v5_offset = {
. core_mci_data_cnt = 0x35c ,
. core_mci_status = 0x324 ,
. core_mci_fifo_cnt = 0x308 ,
. core_mci_version = 0x318 ,
. core_generics = 0x320 ,
. core_testbus_config = 0x32c ,
. core_testbus_sel2_bit = 3 ,
. core_testbus_ena = ( 1 < < 31 ) ,
. core_testbus_sel2 = ( 1 < < 3 ) ,
. core_pwrctl_status = 0x240 ,
. core_pwrctl_mask = 0x244 ,
. core_pwrctl_clear = 0x248 ,
. core_pwrctl_ctl = 0x24c ,
. core_sdcc_debug_reg = 0x358 ,
. core_dll_config = 0x200 ,
. core_dll_status = 0x208 ,
. core_vendor_spec = 0x20c ,
. core_vendor_spec_adma_err_addr0 = 0x214 ,
. core_vendor_spec_adma_err_addr1 = 0x218 ,
. core_vendor_spec_func2 = 0x210 ,
. core_vendor_spec_capabilities0 = 0x21c ,
. core_ddr_200_cfg = 0x224 ,
. core_vendor_spec3 = 0x250 ,
. core_dll_config_2 = 0x254 ,
2019-11-26 13:19:16 +03:00
. core_dll_config_3 = 0x258 ,
. core_ddr_config = 0x25c ,
2020-05-22 12:32:25 +03:00
. core_dll_usr_ctl = 0x388 ,
2018-06-19 08:39:18 +03:00
} ;
static const struct sdhci_msm_offset sdhci_msm_mci_offset = {
. core_hc_mode = 0x78 ,
. core_mci_data_cnt = 0x30 ,
. core_mci_status = 0x34 ,
. core_mci_fifo_cnt = 0x44 ,
. core_mci_version = 0x050 ,
. core_generics = 0x70 ,
. core_testbus_config = 0x0cc ,
. core_testbus_sel2_bit = 4 ,
. core_testbus_ena = ( 1 < < 3 ) ,
. core_testbus_sel2 = ( 1 < < 4 ) ,
. core_pwrctl_status = 0xdc ,
. core_pwrctl_mask = 0xe0 ,
. core_pwrctl_clear = 0xe4 ,
. core_pwrctl_ctl = 0xe8 ,
. core_sdcc_debug_reg = 0x124 ,
. core_dll_config = 0x100 ,
. core_dll_status = 0x108 ,
. core_vendor_spec = 0x10c ,
. core_vendor_spec_adma_err_addr0 = 0x114 ,
. core_vendor_spec_adma_err_addr1 = 0x118 ,
. core_vendor_spec_func2 = 0x110 ,
. core_vendor_spec_capabilities0 = 0x11c ,
. core_ddr_200_cfg = 0x184 ,
. core_vendor_spec3 = 0x1b0 ,
. core_dll_config_2 = 0x1b4 ,
2019-11-26 13:19:16 +03:00
. core_ddr_config_old = 0x1b8 ,
. core_ddr_config = 0x1bc ,
2018-06-19 08:39:18 +03:00
} ;
2018-06-19 08:39:19 +03:00
struct sdhci_msm_variant_ops {
u32 ( * msm_readl_relaxed ) ( struct sdhci_host * host , u32 offset ) ;
void ( * msm_writel_relaxed ) ( u32 val , struct sdhci_host * host ,
u32 offset ) ;
} ;
/*
* From V5 , register spaces have changed . Wrap this info in a structure
* and choose the data_structure based on version info mentioned in DT .
*/
struct sdhci_msm_variant_info {
bool mci_removed ;
2018-11-12 09:52:17 +03:00
bool restore_dll_config ;
2018-06-19 08:39:19 +03:00
const struct sdhci_msm_variant_ops * var_ops ;
const struct sdhci_msm_offset * offset ;
} ;
2014-03-10 19:37:12 +04:00
struct sdhci_msm_host {
struct platform_device * pdev ;
void __iomem * core_mem ; /* MSM SDCC mapped address */
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
void __iomem * ice_mem ; /* MSM ICE mapped address (if available) */
2016-06-24 18:07:14 +03:00
int pwr_irq ; /* power irq */
2014-03-10 19:37:12 +04:00
struct clk * bus_clk ; /* SDHC bus voter clock */
2016-11-21 09:37:16 +03:00
struct clk * xo_clk ; /* TCXO clk needed for FLL feature of cm_dll*/
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
/* core, iface, cal, sleep, and ice clocks */
struct clk_bulk_data bulk_clks [ 5 ] ;
2016-11-21 09:37:20 +03:00
unsigned long clk_rate ;
2014-03-10 19:37:12 +04:00
struct mmc_host * mmc ;
2016-11-21 09:37:16 +03:00
bool use_14lpp_dll_reset ;
2016-11-21 09:37:23 +03:00
bool tuning_done ;
bool calibration_done ;
2016-11-21 09:37:24 +03:00
u8 saved_tuning_phase ;
2016-11-21 09:37:26 +03:00
bool use_cdclp533 ;
2017-09-27 08:34:43 +03:00
u32 curr_pwr_state ;
u32 curr_io_level ;
wait_queue_head_t pwr_irq_wait ;
bool pwr_irq_flag ;
2018-04-20 15:15:28 +03:00
u32 caps_0 ;
2018-06-19 08:39:19 +03:00
bool mci_removed ;
2018-11-12 09:52:17 +03:00
bool restore_dll_config ;
2018-06-19 08:39:19 +03:00
const struct sdhci_msm_variant_ops * var_ops ;
const struct sdhci_msm_offset * offset ;
2018-12-04 15:25:32 +03:00
bool use_cdr ;
u32 transfer_mode ;
2019-11-26 13:19:16 +03:00
bool updated_ddr_cfg ;
2020-05-22 12:32:25 +03:00
bool uses_tassadar_dll ;
2020-05-22 12:32:28 +03:00
u32 dll_config ;
2020-05-22 12:32:27 +03:00
u32 ddr_config ;
2020-06-23 16:34:48 +03:00
bool vqmmc_enabled ;
2014-03-10 19:37:12 +04:00
} ;
2018-06-19 08:39:21 +03:00
static const struct sdhci_msm_offset * sdhci_priv_msm_offset ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
return msm_host - > offset ;
}
2018-06-19 08:39:19 +03:00
/*
* APIs to read / write to vendor specific registers which were there in the
* core_mem region before MCI was removed .
*/
static u32 sdhci_msm_mci_variant_readl_relaxed ( struct sdhci_host * host ,
u32 offset )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
return readl_relaxed ( msm_host - > core_mem + offset ) ;
}
static u32 sdhci_msm_v5_variant_readl_relaxed ( struct sdhci_host * host ,
u32 offset )
{
return readl_relaxed ( host - > ioaddr + offset ) ;
}
static void sdhci_msm_mci_variant_writel_relaxed ( u32 val ,
struct sdhci_host * host , u32 offset )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
writel_relaxed ( val , msm_host - > core_mem + offset ) ;
}
static void sdhci_msm_v5_variant_writel_relaxed ( u32 val ,
struct sdhci_host * host , u32 offset )
{
writel_relaxed ( val , host - > ioaddr + offset ) ;
}
2020-12-14 20:21:15 +03:00
static unsigned int msm_get_clock_mult_for_bus_mode ( struct sdhci_host * host )
2017-01-10 10:00:46 +03:00
{
struct mmc_ios ios = host - > mmc - > ios ;
/*
* The SDHC requires internal clock frequency to be double the
* actual clock that will be set for DDR mode . The controller
* uses the faster clock ( 100 / 400 MHz ) for some of its parts and
* send the actual required clock ( 50 / 200 MHz ) to the card .
*/
if ( ios . timing = = MMC_TIMING_UHS_DDR50 | |
ios . timing = = MMC_TIMING_MMC_DDR52 | |
2017-01-10 10:00:51 +03:00
ios . timing = = MMC_TIMING_MMC_HS400 | |
host - > flags & SDHCI_HS400_TUNING )
2020-12-14 20:21:15 +03:00
return 2 ;
return 1 ;
2017-01-10 10:00:46 +03:00
}
static void msm_set_clock_rate_for_bus_mode ( struct sdhci_host * host ,
unsigned int clock )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
struct mmc_ios curr_ios = host - > mmc - > ios ;
2017-09-16 02:35:23 +03:00
struct clk * core_clk = msm_host - > bulk_clks [ 0 ] . clk ;
2020-12-14 20:21:14 +03:00
unsigned long achieved_rate ;
2020-12-14 20:21:15 +03:00
unsigned int desired_rate ;
unsigned int mult ;
2017-01-10 10:00:46 +03:00
int rc ;
2020-12-14 20:21:15 +03:00
mult = msm_get_clock_mult_for_bus_mode ( host ) ;
desired_rate = clock * mult ;
rc = dev_pm_opp_set_rate ( mmc_dev ( host - > mmc ) , desired_rate ) ;
2017-01-10 10:00:46 +03:00
if ( rc ) {
pr_err ( " %s: Failed to set clock at rate %u at timing %d \n " ,
2020-12-14 20:21:15 +03:00
mmc_hostname ( host - > mmc ) , desired_rate , curr_ios . timing ) ;
2017-01-10 10:00:46 +03:00
return ;
}
2020-12-14 20:21:14 +03:00
/*
* Qualcomm clock drivers by default round clock _up_ if they can ' t
* make the requested rate . This is not good for SD . Yell if we
* encounter it .
*/
achieved_rate = clk_get_rate ( core_clk ) ;
2020-12-14 20:21:15 +03:00
if ( achieved_rate > desired_rate )
2020-12-14 20:21:14 +03:00
pr_warn ( " %s: Card appears overclocked; req %u Hz, actual %lu Hz \n " ,
2020-12-14 20:21:15 +03:00
mmc_hostname ( host - > mmc ) , desired_rate , achieved_rate ) ;
host - > mmc - > actual_clock = achieved_rate / mult ;
/* Stash the rate we requested to use in sdhci_msm_runtime_resume() */
msm_host - > clk_rate = desired_rate ;
2020-12-14 20:21:14 +03:00
2017-01-10 10:00:46 +03:00
pr_debug ( " %s: Setting clock at rate %lu at timing %d \n " ,
2020-12-14 20:21:14 +03:00
mmc_hostname ( host - > mmc ) , achieved_rate , curr_ios . timing ) ;
2017-01-10 10:00:46 +03:00
}
2014-03-10 19:37:12 +04:00
/* Platform specific tuning */
2014-03-10 19:37:13 +04:00
static inline int msm_dll_poll_ck_out_en ( struct sdhci_host * host , u8 poll )
{
u32 wait_cnt = 50 ;
u8 ck_out_en ;
struct mmc_host * mmc = host - > mmc ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
sdhci_priv_msm_offset ( host ) ;
2014-03-10 19:37:13 +04:00
/* Poll for CK_OUT_EN bit. max. poll time = 50us */
2018-06-19 08:39:21 +03:00
ck_out_en = ! ! ( readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) & CORE_CK_OUT_EN ) ;
2014-03-10 19:37:13 +04:00
while ( ck_out_en ! = poll ) {
if ( - - wait_cnt = = 0 ) {
dev_err ( mmc_dev ( mmc ) , " %s: CK_OUT_EN bit is not %d \n " ,
mmc_hostname ( mmc ) , poll ) ;
return - ETIMEDOUT ;
}
udelay ( 1 ) ;
2018-06-19 08:39:21 +03:00
ck_out_en = ! ! ( readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) & CORE_CK_OUT_EN ) ;
2014-03-10 19:37:13 +04:00
}
return 0 ;
}
static int msm_config_cm_dll_phase ( struct sdhci_host * host , u8 phase )
{
int rc ;
static const u8 grey_coded_phase_table [ ] = {
0x0 , 0x1 , 0x3 , 0x2 , 0x6 , 0x7 , 0x5 , 0x4 ,
0xc , 0xd , 0xf , 0xe , 0xa , 0xb , 0x9 , 0x8
} ;
unsigned long flags ;
u32 config ;
struct mmc_host * mmc = host - > mmc ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
sdhci_priv_msm_offset ( host ) ;
2014-03-10 19:37:13 +04:00
2016-11-21 09:37:24 +03:00
if ( phase > 0xf )
return - EINVAL ;
2014-03-10 19:37:13 +04:00
spin_lock_irqsave ( & host - > lock , flags ) ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
config & = ~ ( CORE_CDR_EN | CORE_CK_OUT_EN ) ;
config | = ( CORE_CDR_EXT_EN | CORE_DLL_EN ) ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
rc = msm_dll_poll_ck_out_en ( host , 0 ) ;
if ( rc )
goto err_out ;
/*
* Write the selected DLL clock output phase ( 0 . . . 15 )
* to CDR_SELEXT bit field of DLL_CONFIG register .
*/
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
config & = ~ CDR_SELEXT_MASK ;
config | = grey_coded_phase_table [ phase ] < < CDR_SELEXT_SHIFT ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config | = CORE_CK_OUT_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en ( host , 1 ) ;
if ( rc )
goto err_out ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
config | = CORE_CDR_EN ;
config & = ~ CORE_CDR_EXT_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
goto out ;
err_out :
dev_err ( mmc_dev ( mmc ) , " %s: Failed to set DLL phase: %d \n " ,
mmc_hostname ( mmc ) , phase ) ;
out :
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return rc ;
}
/*
* Find out the greatest range of consecuitive selected
* DLL clock output phases that can be used as sampling
* setting for SD3 .0 UHS - I card read operation ( in SDR104
2016-11-21 09:37:23 +03:00
* timing mode ) or for eMMC4 .5 card read operation ( in
* HS400 / HS200 timing mode ) .
2014-03-10 19:37:13 +04:00
* Select the 3 / 4 of the range and configure the DLL with the
* selected DLL clock output phase .
*/
static int msm_find_most_appropriate_phase ( struct sdhci_host * host ,
u8 * phase_table , u8 total_phases )
{
int ret ;
u8 ranges [ MAX_PHASES ] [ MAX_PHASES ] = { { 0 } , { 0 } } ;
u8 phases_per_row [ MAX_PHASES ] = { 0 } ;
int row_index = 0 , col_index = 0 , selected_row_index = 0 , curr_max = 0 ;
int i , cnt , phase_0_raw_index = 0 , phase_15_raw_index = 0 ;
bool phase_0_found = false , phase_15_found = false ;
struct mmc_host * mmc = host - > mmc ;
if ( ! total_phases | | ( total_phases > MAX_PHASES ) ) {
dev_err ( mmc_dev ( mmc ) , " %s: Invalid argument: total_phases=%d \n " ,
mmc_hostname ( mmc ) , total_phases ) ;
return - EINVAL ;
}
for ( cnt = 0 ; cnt < total_phases ; cnt + + ) {
ranges [ row_index ] [ col_index ] = phase_table [ cnt ] ;
phases_per_row [ row_index ] + = 1 ;
col_index + + ;
if ( ( cnt + 1 ) = = total_phases ) {
continue ;
/* check if next phase in phase_table is consecutive or not */
} else if ( ( phase_table [ cnt ] + 1 ) ! = phase_table [ cnt + 1 ] ) {
row_index + + ;
col_index = 0 ;
}
}
if ( row_index > = MAX_PHASES )
return - EINVAL ;
/* Check if phase-0 is present in first valid window? */
if ( ! ranges [ 0 ] [ 0 ] ) {
phase_0_found = true ;
phase_0_raw_index = 0 ;
/* Check if cycle exist between 2 valid windows */
for ( cnt = 1 ; cnt < = row_index ; cnt + + ) {
if ( phases_per_row [ cnt ] ) {
for ( i = 0 ; i < phases_per_row [ cnt ] ; i + + ) {
if ( ranges [ cnt ] [ i ] = = 15 ) {
phase_15_found = true ;
phase_15_raw_index = cnt ;
break ;
}
}
}
}
}
/* If 2 valid windows form cycle then merge them as single window */
if ( phase_0_found & & phase_15_found ) {
/* number of phases in raw where phase 0 is present */
u8 phases_0 = phases_per_row [ phase_0_raw_index ] ;
/* number of phases in raw where phase 15 is present */
u8 phases_15 = phases_per_row [ phase_15_raw_index ] ;
if ( phases_0 + phases_15 > = MAX_PHASES )
/*
* If there are more than 1 phase windows then total
* number of phases in both the windows should not be
* more than or equal to MAX_PHASES .
*/
return - EINVAL ;
/* Merge 2 cyclic windows */
i = phases_15 ;
for ( cnt = 0 ; cnt < phases_0 ; cnt + + ) {
ranges [ phase_15_raw_index ] [ i ] =
ranges [ phase_0_raw_index ] [ cnt ] ;
if ( + + i > = MAX_PHASES )
break ;
}
phases_per_row [ phase_0_raw_index ] = 0 ;
phases_per_row [ phase_15_raw_index ] = phases_15 + phases_0 ;
}
for ( cnt = 0 ; cnt < = row_index ; cnt + + ) {
if ( phases_per_row [ cnt ] > curr_max ) {
curr_max = phases_per_row [ cnt ] ;
selected_row_index = cnt ;
}
}
i = ( curr_max * 3 ) / 4 ;
if ( i )
i - - ;
ret = ranges [ selected_row_index ] [ i ] ;
if ( ret > = MAX_PHASES ) {
ret = - EINVAL ;
dev_err ( mmc_dev ( mmc ) , " %s: Invalid phase selected=%d \n " ,
mmc_hostname ( mmc ) , ret ) ;
}
return ret ;
}
static inline void msm_cm_dll_set_freq ( struct sdhci_host * host )
2014-03-10 19:37:12 +04:00
{
2014-03-10 19:37:13 +04:00
u32 mclk_freq = 0 , config ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
sdhci_priv_msm_offset ( host ) ;
2014-03-10 19:37:13 +04:00
/* Program the MCLK value to MCLK_FREQ bit field */
if ( host - > clock < = 112000000 )
mclk_freq = 0 ;
else if ( host - > clock < = 125000000 )
mclk_freq = 1 ;
else if ( host - > clock < = 137000000 )
mclk_freq = 2 ;
else if ( host - > clock < = 150000000 )
mclk_freq = 3 ;
else if ( host - > clock < = 162000000 )
mclk_freq = 4 ;
else if ( host - > clock < = 175000000 )
mclk_freq = 5 ;
else if ( host - > clock < = 187000000 )
mclk_freq = 6 ;
else if ( host - > clock < = 200000000 )
mclk_freq = 7 ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
config & = ~ CMUX_SHIFT_PHASE_MASK ;
config | = mclk_freq < < CMUX_SHIFT_PHASE_SHIFT ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
}
/* Initialize the DLL (Programmable Delay Line) */
static int msm_init_cm_dll ( struct sdhci_host * host )
{
struct mmc_host * mmc = host - > mmc ;
2016-11-21 09:37:16 +03:00
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2014-03-10 19:37:13 +04:00
int wait_cnt = 50 ;
2019-07-01 18:01:25 +03:00
unsigned long flags , xo_clk = 0 ;
2016-11-21 09:37:13 +03:00
u32 config ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2014-03-10 19:37:13 +04:00
2019-07-01 18:01:25 +03:00
if ( msm_host - > use_14lpp_dll_reset & & ! IS_ERR_OR_NULL ( msm_host - > xo_clk ) )
xo_clk = clk_get_rate ( msm_host - > xo_clk ) ;
2014-03-10 19:37:13 +04:00
spin_lock_irqsave ( & host - > lock , flags ) ;
2014-03-10 19:37:12 +04:00
/*
2014-03-10 19:37:13 +04:00
* Make sure that clock is always enabled when DLL
* tuning is in progress . Keeping PWRSAVE ON may
* turn off the clock .
2014-03-10 19:37:12 +04:00
*/
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2016-11-21 09:37:13 +03:00
config & = ~ CORE_CLK_PWRSAVE ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2014-03-10 19:37:13 +04:00
2020-07-08 16:11:19 +03:00
if ( msm_host - > dll_config )
writel_relaxed ( msm_host - > dll_config ,
host - > ioaddr + msm_offset - > core_dll_config ) ;
2020-05-22 12:32:28 +03:00
2016-11-21 09:37:16 +03:00
if ( msm_host - > use_14lpp_dll_reset ) {
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:16 +03:00
config & = ~ CORE_CK_OUT_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:16 +03:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
config | = CORE_DLL_CLOCK_DISABLE ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
}
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config | = CORE_DLL_RST ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config | = CORE_DLL_PDN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2020-05-22 12:32:28 +03:00
if ( ! msm_host - > dll_config )
msm_cm_dll_set_freq ( host ) ;
2014-03-10 19:37:13 +04:00
2016-11-21 09:37:16 +03:00
if ( msm_host - > use_14lpp_dll_reset & &
! IS_ERR_OR_NULL ( msm_host - > xo_clk ) ) {
u32 mclk_freq = 0 ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
config & = CORE_FLL_CYCLE_CNT ;
if ( config )
mclk_freq = DIV_ROUND_CLOSEST_ULL ( ( host - > clock * 8 ) ,
2019-07-01 18:01:25 +03:00
xo_clk ) ;
2016-11-21 09:37:16 +03:00
else
mclk_freq = DIV_ROUND_CLOSEST_ULL ( ( host - > clock * 4 ) ,
2019-07-01 18:01:25 +03:00
xo_clk ) ;
2016-11-21 09:37:16 +03:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
config & = ~ ( 0xFF < < 10 ) ;
config | = mclk_freq < < 10 ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
/* wait for 5us before enabling DLL clock */
udelay ( 5 ) ;
}
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config & = ~ CORE_DLL_RST ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config & = ~ CORE_DLL_PDN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
2016-11-21 09:37:16 +03:00
if ( msm_host - > use_14lpp_dll_reset ) {
2020-05-22 12:32:28 +03:00
if ( ! msm_host - > dll_config )
msm_cm_dll_set_freq ( host ) ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
config & = ~ CORE_DLL_CLOCK_DISABLE ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:16 +03:00
}
2020-05-22 12:32:25 +03:00
/*
* Configure DLL user control register to enable DLL status .
* This setting is applicable to SDCC v5 .1 onwards only .
*/
if ( msm_host - > uses_tassadar_dll ) {
config = DLL_USR_CTL_POR_VAL | FINE_TUNE_MODE_EN |
ENABLE_DLL_LOCK_STATUS | BIAS_OK_SIGNAL ;
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_usr_ctl ) ;
2020-05-22 12:32:26 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config_3 ) ;
config & = ~ 0xFF ;
if ( msm_host - > clk_rate < 150000000 )
config | = DLL_CONFIG_3_LOW_FREQ_VAL ;
else
config | = DLL_CONFIG_3_HIGH_FREQ_VAL ;
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config_3 ) ;
2020-05-22 12:32:25 +03:00
}
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config | = CORE_DLL_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:13 +03:00
config | = CORE_CK_OUT_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2014-03-10 19:37:13 +04:00
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
2018-06-19 08:39:21 +03:00
while ( ! ( readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_status ) &
2014-03-10 19:37:13 +04:00
CORE_DLL_LOCK ) ) {
/* max. wait for 50us sec for LOCK bit to be set */
if ( - - wait_cnt = = 0 ) {
dev_err ( mmc_dev ( mmc ) , " %s: DLL failed to LOCK \n " ,
mmc_hostname ( mmc ) ) ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
return - ETIMEDOUT ;
}
udelay ( 1 ) ;
}
spin_unlock_irqrestore ( & host - > lock , flags ) ;
2014-03-10 19:37:12 +04:00
return 0 ;
}
2017-01-10 10:00:45 +03:00
static void msm_hc_select_default ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
u32 config ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2017-01-10 10:00:45 +03:00
if ( ! msm_host - > use_cdclp533 ) {
config = readl_relaxed ( host - > ioaddr +
2018-06-19 08:39:21 +03:00
msm_offset - > core_vendor_spec3 ) ;
2017-01-10 10:00:45 +03:00
config & = ~ CORE_PWRSAVE_DLL ;
writel_relaxed ( config , host - > ioaddr +
2018-06-19 08:39:21 +03:00
msm_offset - > core_vendor_spec3 ) ;
2017-01-10 10:00:45 +03:00
}
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
config & = ~ CORE_HC_MCLK_SEL_MASK ;
config | = CORE_HC_MCLK_SEL_DFLT ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
/*
* Disable HC_SELECT_IN to be able to use the UHS mode select
* configuration from Host Control2 register for all other
* modes .
* Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
* in VENDOR_SPEC_FUNC
*/
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
config & = ~ CORE_HC_SELECT_IN_EN ;
config & = ~ CORE_HC_SELECT_IN_MASK ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
/*
* Make sure above writes impacting free running MCLK are completed
* before changing the clk_rate at GCC .
*/
wmb ( ) ;
}
static void msm_hc_select_hs400 ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2017-01-10 10:00:52 +03:00
struct mmc_ios ios = host - > mmc - > ios ;
2017-01-10 10:00:45 +03:00
u32 config , dll_lock ;
int rc ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2017-01-10 10:00:45 +03:00
/* Select the divided clock (free running MCLK/2) */
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
config & = ~ CORE_HC_MCLK_SEL_MASK ;
config | = CORE_HC_MCLK_SEL_HS400 ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
/*
* Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
* register
*/
2017-01-10 10:00:52 +03:00
if ( ( msm_host - > tuning_done | | ios . enhanced_strobe ) & &
! msm_host - > calibration_done ) {
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
config | = CORE_HC_SELECT_IN_HS400 ;
config | = CORE_HC_SELECT_IN_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_vendor_spec ) ;
2017-01-10 10:00:45 +03:00
}
if ( ! msm_host - > clk_rate & & ! msm_host - > use_cdclp533 ) {
/*
* Poll on DLL_LOCK or DDR_DLL_LOCK bits in
2018-06-19 08:39:21 +03:00
* core_dll_status to be set . This should get set
2017-01-10 10:00:45 +03:00
* within 15 us at 200 MHz .
*/
rc = readl_relaxed_poll_timeout ( host - > ioaddr +
2018-06-19 08:39:21 +03:00
msm_offset - > core_dll_status ,
2017-01-10 10:00:45 +03:00
dll_lock ,
( dll_lock &
( CORE_DLL_LOCK |
CORE_DDR_DLL_LOCK ) ) , 10 ,
1000 ) ;
if ( rc = = - ETIMEDOUT )
pr_err ( " %s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x \n " ,
mmc_hostname ( host - > mmc ) , dll_lock ) ;
}
/*
* Make sure above writes impacting free running MCLK are completed
* before changing the clk_rate at GCC .
*/
wmb ( ) ;
}
/*
* sdhci_msm_hc_select_mode : - In general all timing modes are
* controlled via UHS mode select in Host Control2 register .
* eMMC specific HS200 / HS400 doesn ' t have their respective modes
* defined here , hence we use these values .
*
* HS200 - SDR104 ( Since they both are equivalent in functionality )
* HS400 - This involves multiple configurations
* Initially SDR104 - when tuning is required as HS200
* Then when switching to DDR @ 400 MHz ( HS400 ) we use
* the vendor specific HC_SELECT_IN to control the mode .
*
* In addition to controlling the modes we also need to select the
* correct input clock for DLL depending on the mode .
*
* HS400 - divided clock ( free running MCLK / 2 )
* All other modes - default ( free running MCLK )
*/
2017-07-31 15:00:46 +03:00
static void sdhci_msm_hc_select_mode ( struct sdhci_host * host )
2017-01-10 10:00:45 +03:00
{
struct mmc_ios ios = host - > mmc - > ios ;
2017-01-10 10:00:51 +03:00
if ( ios . timing = = MMC_TIMING_MMC_HS400 | |
host - > flags & SDHCI_HS400_TUNING )
2017-01-10 10:00:45 +03:00
msm_hc_select_hs400 ( host ) ;
else
msm_hc_select_default ( host ) ;
}
2016-11-21 09:37:25 +03:00
static int sdhci_msm_cdclp533_calibration ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
u32 config , calib_done ;
int ret ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2016-11-21 09:37:25 +03:00
pr_debug ( " %s: %s: Enter \n " , mmc_hostname ( host - > mmc ) , __func__ ) ;
/*
* Retuning in HS400 ( DDR mode ) will fail , just reset the
* tuning block and restore the saved tuning phase .
*/
ret = msm_init_cm_dll ( host ) ;
if ( ret )
goto out ;
/* Set the selected phase in delay line hw block */
ret = msm_config_cm_dll_phase ( host , msm_host - > saved_tuning_phase ) ;
if ( ret )
goto out ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ;
2016-11-21 09:37:25 +03:00
config | = CORE_CMD_DAT_TRACK_SEL ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config ) ;
2016-11-21 09:37:25 +03:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_ddr_200_cfg ) ;
2016-11-21 09:37:25 +03:00
config & = ~ CORE_CDC_T4_DLY_SEL ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_ddr_200_cfg ) ;
2016-11-21 09:37:25 +03:00
config = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_GEN_CFG ) ;
config & = ~ CORE_CDC_SWITCH_BYPASS_OFF ;
writel_relaxed ( config , host - > ioaddr + CORE_CSR_CDC_GEN_CFG ) ;
config = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_GEN_CFG ) ;
config | = CORE_CDC_SWITCH_RC_EN ;
writel_relaxed ( config , host - > ioaddr + CORE_CSR_CDC_GEN_CFG ) ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_ddr_200_cfg ) ;
2016-11-21 09:37:25 +03:00
config & = ~ CORE_START_CDC_TRAFFIC ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_ddr_200_cfg ) ;
2016-11-21 09:37:25 +03:00
2017-01-24 11:50:27 +03:00
/* Perform CDC Register Initialization Sequence */
2016-11-21 09:37:25 +03:00
writel_relaxed ( 0x11800EC , host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
writel_relaxed ( 0x3011111 , host - > ioaddr + CORE_CSR_CDC_CTLR_CFG1 ) ;
writel_relaxed ( 0x1201000 , host - > ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0 ) ;
writel_relaxed ( 0x4 , host - > ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1 ) ;
writel_relaxed ( 0xCB732020 , host - > ioaddr + CORE_CSR_CDC_REFCOUNT_CFG ) ;
writel_relaxed ( 0xB19 , host - > ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG ) ;
2017-01-10 10:00:49 +03:00
writel_relaxed ( 0x4E2 , host - > ioaddr + CORE_CSR_CDC_DELAY_CFG ) ;
2016-11-21 09:37:25 +03:00
writel_relaxed ( 0x0 , host - > ioaddr + CORE_CDC_OFFSET_CFG ) ;
writel_relaxed ( 0x16334 , host - > ioaddr + CORE_CDC_SLAVE_DDA_CFG ) ;
/* CDC HW Calibration */
config = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
config | = CORE_SW_TRIG_FULL_CALIB ;
writel_relaxed ( config , host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
config = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
config & = ~ CORE_SW_TRIG_FULL_CALIB ;
writel_relaxed ( config , host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
config = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
config | = CORE_HW_AUTOCAL_ENA ;
writel_relaxed ( config , host - > ioaddr + CORE_CSR_CDC_CTLR_CFG0 ) ;
config = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0 ) ;
config | = CORE_TIMER_ENA ;
writel_relaxed ( config , host - > ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0 ) ;
ret = readl_relaxed_poll_timeout ( host - > ioaddr + CORE_CSR_CDC_STATUS0 ,
calib_done ,
( calib_done & CORE_CALIBRATION_DONE ) ,
1 , 50 ) ;
if ( ret = = - ETIMEDOUT ) {
pr_err ( " %s: %s: CDC calibration was not completed \n " ,
mmc_hostname ( host - > mmc ) , __func__ ) ;
goto out ;
}
ret = readl_relaxed ( host - > ioaddr + CORE_CSR_CDC_STATUS0 )
& CORE_CDC_ERROR_CODE_MASK ;
if ( ret ) {
pr_err ( " %s: %s: CDC error code %d \n " ,
mmc_hostname ( host - > mmc ) , __func__ , ret ) ;
ret = - EINVAL ;
goto out ;
}
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_ddr_200_cfg ) ;
2016-11-21 09:37:25 +03:00
config | = CORE_START_CDC_TRAFFIC ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_ddr_200_cfg ) ;
2016-11-21 09:37:25 +03:00
out :
pr_debug ( " %s: %s: Exit, ret %d \n " , mmc_hostname ( host - > mmc ) ,
__func__ , ret ) ;
return ret ;
}
2016-11-21 09:37:26 +03:00
static int sdhci_msm_cm_dll_sdc4_calibration ( struct sdhci_host * host )
{
2017-01-10 10:00:52 +03:00
struct mmc_host * mmc = host - > mmc ;
2019-11-26 13:19:16 +03:00
u32 dll_status , config , ddr_cfg_offset ;
2016-11-21 09:37:26 +03:00
int ret ;
2019-11-26 13:19:16 +03:00
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
sdhci_priv_msm_offset ( host ) ;
2016-11-21 09:37:26 +03:00
pr_debug ( " %s: %s: Enter \n " , mmc_hostname ( host - > mmc ) , __func__ ) ;
/*
2018-06-19 08:39:21 +03:00
* Currently the core_ddr_config register defaults to desired
2016-11-21 09:37:26 +03:00
* configuration on reset . Currently reprogramming the power on
* reset ( POR ) value in case it might have been modified by
* bootloaders . In the future , if this changes , then the desired
* values will need to be programmed appropriately .
*/
2019-11-26 13:19:16 +03:00
if ( msm_host - > updated_ddr_cfg )
ddr_cfg_offset = msm_offset - > core_ddr_config ;
else
ddr_cfg_offset = msm_offset - > core_ddr_config_old ;
2020-05-22 12:32:27 +03:00
writel_relaxed ( msm_host - > ddr_config , host - > ioaddr + ddr_cfg_offset ) ;
2016-11-21 09:37:26 +03:00
2017-01-10 10:00:52 +03:00
if ( mmc - > ios . enhanced_strobe ) {
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_ddr_200_cfg ) ;
2017-01-10 10:00:52 +03:00
config | = CORE_CMDIN_RCLK_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_ddr_200_cfg ) ;
2017-01-10 10:00:52 +03:00
}
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:26 +03:00
config | = CORE_DDR_CAL_EN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr + msm_offset - > core_dll_config_2 ) ;
2016-11-21 09:37:26 +03:00
2018-06-19 08:39:21 +03:00
ret = readl_relaxed_poll_timeout ( host - > ioaddr +
msm_offset - > core_dll_status ,
dll_status ,
( dll_status & CORE_DDR_DLL_LOCK ) ,
10 , 1000 ) ;
2016-11-21 09:37:26 +03:00
if ( ret = = - ETIMEDOUT ) {
pr_err ( " %s: %s: CM_DLL_SDC4 calibration was not completed \n " ,
mmc_hostname ( host - > mmc ) , __func__ ) ;
goto out ;
}
2020-02-07 15:04:28 +03:00
/*
* Set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 .
* When MCLK is gated OFF , it is not gated for less than 0.5 us
* and MCLK must be switched on for at - least 1u s before DATA
* starts coming . Controllers with 14l pp and later tech DLL cannot
* guarantee above requirement . So PWRSAVE_DLL should not be
* turned on for host controllers using this DLL .
*/
if ( ! msm_host - > use_14lpp_dll_reset ) {
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_vendor_spec3 ) ;
config | = CORE_PWRSAVE_DLL ;
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_vendor_spec3 ) ;
}
2016-11-21 09:37:26 +03:00
/*
* Drain writebuffer to ensure above DLL calibration
* and PWRSAVE DLL is enabled .
*/
wmb ( ) ;
out :
pr_debug ( " %s: %s: Exit, ret %d \n " , mmc_hostname ( host - > mmc ) ,
__func__ , ret ) ;
return ret ;
}
static int sdhci_msm_hs400_dll_calibration ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2017-01-10 10:00:52 +03:00
struct mmc_host * mmc = host - > mmc ;
2016-11-21 09:37:26 +03:00
int ret ;
u32 config ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2016-11-21 09:37:26 +03:00
pr_debug ( " %s: %s: Enter \n " , mmc_hostname ( host - > mmc ) , __func__ ) ;
/*
* Retuning in HS400 ( DDR mode ) will fail , just reset the
* tuning block and restore the saved tuning phase .
*/
ret = msm_init_cm_dll ( host ) ;
if ( ret )
goto out ;
2017-01-10 10:00:52 +03:00
if ( ! mmc - > ios . enhanced_strobe ) {
/* Set the selected phase in delay line hw block */
ret = msm_config_cm_dll_phase ( host ,
msm_host - > saved_tuning_phase ) ;
if ( ret )
goto out ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2017-01-10 10:00:52 +03:00
config | = CORE_CMD_DAT_TRACK_SEL ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2017-01-10 10:00:52 +03:00
}
2016-11-21 09:37:26 +03:00
if ( msm_host - > use_cdclp533 )
ret = sdhci_msm_cdclp533_calibration ( host ) ;
else
ret = sdhci_msm_cm_dll_sdc4_calibration ( host ) ;
out :
pr_debug ( " %s: %s: Exit, ret %d \n " , mmc_hostname ( host - > mmc ) ,
__func__ , ret ) ;
return ret ;
}
2018-11-12 09:52:17 +03:00
static bool sdhci_msm_is_tuning_needed ( struct sdhci_host * host )
{
struct mmc_ios * ios = & host - > mmc - > ios ;
/*
* Tuning is required for SDR104 , HS200 and HS400 cards and
* if clock frequency is greater than 100 MHz in these modes .
*/
if ( host - > clock < = CORE_FREQ_100MHZ | |
! ( ios - > timing = = MMC_TIMING_MMC_HS400 | |
ios - > timing = = MMC_TIMING_MMC_HS200 | |
ios - > timing = = MMC_TIMING_UHS_SDR104 ) | |
ios - > enhanced_strobe )
return false ;
return true ;
}
static int sdhci_msm_restore_sdr_dll_config ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
int ret ;
/*
* SDR DLL comes into picture only for timing modes which needs
* tuning .
*/
if ( ! sdhci_msm_is_tuning_needed ( host ) )
return 0 ;
/* Reset the tuning block */
ret = msm_init_cm_dll ( host ) ;
if ( ret )
return ret ;
/* Restore the tuning block */
ret = msm_config_cm_dll_phase ( host , msm_host - > saved_tuning_phase ) ;
return ret ;
}
2018-12-04 15:25:32 +03:00
static void sdhci_msm_set_cdr ( struct sdhci_host * host , bool enable )
{
const struct sdhci_msm_offset * msm_offset = sdhci_priv_msm_offset ( host ) ;
u32 config , oldconfig = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
config = oldconfig ;
if ( enable ) {
config | = CORE_CDR_EN ;
config & = ~ CORE_CDR_EXT_EN ;
} else {
config & = ~ CORE_CDR_EN ;
config | = CORE_CDR_EXT_EN ;
}
if ( config ! = oldconfig ) {
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
}
}
2017-01-24 11:50:26 +03:00
static int sdhci_msm_execute_tuning ( struct mmc_host * mmc , u32 opcode )
2014-03-10 19:37:13 +04:00
{
2017-01-24 11:50:26 +03:00
struct sdhci_host * host = mmc_priv ( mmc ) ;
2020-08-27 17:58:41 +03:00
int tuning_seq_cnt = 10 ;
2014-12-05 14:59:41 +03:00
u8 phase , tuned_phases [ 16 ] , tuned_phase_cnt = 0 ;
2014-03-10 19:37:13 +04:00
int rc ;
struct mmc_ios ios = host - > mmc - > ios ;
2016-11-21 09:37:24 +03:00
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2014-03-10 19:37:13 +04:00
2018-12-04 15:25:32 +03:00
if ( ! sdhci_msm_is_tuning_needed ( host ) ) {
msm_host - > use_cdr = false ;
sdhci_msm_set_cdr ( host , false ) ;
2014-03-10 19:37:13 +04:00
return 0 ;
2018-12-04 15:25:32 +03:00
}
/* Clock-Data-Recovery used to dynamically adjust RX sampling point */
msm_host - > use_cdr = true ;
2014-03-10 19:37:13 +04:00
2020-05-28 18:13:52 +03:00
/*
* Clear tuning_done flag before tuning to ensure proper
* HS400 settings .
*/
msm_host - > tuning_done = 0 ;
2017-01-10 10:00:51 +03:00
/*
* For HS400 tuning in HS200 timing requires :
* - select MCLK / 2 in VENDOR_SPEC
* - program MCLK to 400 MHz ( or nearest supported ) in GCC
*/
if ( host - > flags & SDHCI_HS400_TUNING ) {
sdhci_msm_hc_select_mode ( host ) ;
msm_set_clock_rate_for_bus_mode ( host , ios . clock ) ;
2017-01-24 11:50:26 +03:00
host - > flags & = ~ SDHCI_HS400_TUNING ;
2017-01-10 10:00:51 +03:00
}
2014-03-10 19:37:13 +04:00
retry :
/* First of all reset the tuning block */
rc = msm_init_cm_dll ( host ) ;
if ( rc )
2014-12-05 14:59:41 +03:00
return rc ;
2014-03-10 19:37:13 +04:00
phase = 0 ;
do {
/* Set the phase in delay line hw block */
rc = msm_config_cm_dll_phase ( host , phase ) ;
if ( rc )
2014-12-05 14:59:41 +03:00
return rc ;
2014-03-10 19:37:13 +04:00
2015-10-27 09:24:28 +03:00
rc = mmc_send_tuning ( mmc , opcode , NULL ) ;
2014-12-05 14:59:41 +03:00
if ( ! rc ) {
2014-03-10 19:37:13 +04:00
/* Tuning is successful at this tuning point */
tuned_phases [ tuned_phase_cnt + + ] = phase ;
dev_dbg ( mmc_dev ( mmc ) , " %s: Found good phase = %d \n " ,
mmc_hostname ( mmc ) , phase ) ;
}
} while ( + + phase < ARRAY_SIZE ( tuned_phases ) ) ;
if ( tuned_phase_cnt ) {
2020-08-27 17:58:41 +03:00
if ( tuned_phase_cnt = = ARRAY_SIZE ( tuned_phases ) ) {
/*
* All phases valid is _almost_ as bad as no phases
* valid . Probably all phases are not really reliable
* but we didn ' t detect where the unreliable place is .
* That means we ' ll essentially be guessing and hoping
* we get a good phase . Better to try a few times .
*/
dev_dbg ( mmc_dev ( mmc ) , " %s: All phases valid; try again \n " ,
mmc_hostname ( mmc ) ) ;
if ( - - tuning_seq_cnt ) {
tuned_phase_cnt = 0 ;
goto retry ;
}
}
2014-03-10 19:37:13 +04:00
rc = msm_find_most_appropriate_phase ( host , tuned_phases ,
tuned_phase_cnt ) ;
if ( rc < 0 )
2014-12-05 14:59:41 +03:00
return rc ;
2014-03-10 19:37:13 +04:00
else
phase = rc ;
/*
* Finally set the selected phase in delay
* line hw block .
*/
rc = msm_config_cm_dll_phase ( host , phase ) ;
if ( rc )
2014-12-05 14:59:41 +03:00
return rc ;
2018-11-12 09:52:17 +03:00
msm_host - > saved_tuning_phase = phase ;
2014-03-10 19:37:13 +04:00
dev_dbg ( mmc_dev ( mmc ) , " %s: Setting the tuning phase to %d \n " ,
mmc_hostname ( mmc ) , phase ) ;
} else {
if ( - - tuning_seq_cnt )
goto retry ;
/* Tuning failed */
dev_dbg ( mmc_dev ( mmc ) , " %s: No tuning point found \n " ,
mmc_hostname ( mmc ) ) ;
rc = - EIO ;
}
2016-11-21 09:37:23 +03:00
if ( ! rc )
msm_host - > tuning_done = true ;
2014-03-10 19:37:13 +04:00
return rc ;
}
2017-01-10 10:00:47 +03:00
/*
* sdhci_msm_hs400 - Calibrate the DLL for HS400 bus speed mode operation .
2017-01-10 10:00:52 +03:00
* This needs to be done for both tuning and enhanced_strobe mode .
2017-01-10 10:00:47 +03:00
* DLL operation is only needed for clock > 100 MHz . For clock < = 100 MHz
* fixed feedback clock is used .
*/
static void sdhci_msm_hs400 ( struct sdhci_host * host , struct mmc_ios * ios )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
int ret ;
if ( host - > clock > CORE_FREQ_100MHZ & &
2017-01-10 10:00:52 +03:00
( msm_host - > tuning_done | | ios - > enhanced_strobe ) & &
! msm_host - > calibration_done ) {
2017-01-10 10:00:47 +03:00
ret = sdhci_msm_hs400_dll_calibration ( host ) ;
if ( ! ret )
msm_host - > calibration_done = true ;
else
pr_err ( " %s: Failed to calibrate DLL for hs400 mode (%d) \n " ,
mmc_hostname ( host - > mmc ) , ret ) ;
}
}
2016-07-19 17:52:25 +03:00
static void sdhci_msm_set_uhs_signaling ( struct sdhci_host * host ,
unsigned int uhs )
{
struct mmc_host * mmc = host - > mmc ;
2016-11-21 09:37:23 +03:00
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2016-07-19 17:52:25 +03:00
u16 ctrl_2 ;
2016-11-21 09:37:23 +03:00
u32 config ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2016-07-19 17:52:25 +03:00
ctrl_2 = sdhci_readw ( host , SDHCI_HOST_CONTROL2 ) ;
/* Select Bus Speed Mode for host */
ctrl_2 & = ~ SDHCI_CTRL_UHS_MASK ;
switch ( uhs ) {
case MMC_TIMING_UHS_SDR12 :
ctrl_2 | = SDHCI_CTRL_UHS_SDR12 ;
break ;
case MMC_TIMING_UHS_SDR25 :
ctrl_2 | = SDHCI_CTRL_UHS_SDR25 ;
break ;
case MMC_TIMING_UHS_SDR50 :
ctrl_2 | = SDHCI_CTRL_UHS_SDR50 ;
break ;
2016-11-21 09:37:23 +03:00
case MMC_TIMING_MMC_HS400 :
2016-07-19 17:52:25 +03:00
case MMC_TIMING_MMC_HS200 :
case MMC_TIMING_UHS_SDR104 :
ctrl_2 | = SDHCI_CTRL_UHS_SDR104 ;
break ;
case MMC_TIMING_UHS_DDR50 :
case MMC_TIMING_MMC_DDR52 :
ctrl_2 | = SDHCI_CTRL_UHS_DDR50 ;
break ;
}
/*
* When clock frequency is less than 100 MHz , the feedback clock must be
* provided and DLL must not be used so that tuning can be skipped . To
* provide feedback clock , the mode selection can be any value less
* than 3 ' b011 in bits [ 2 : 0 ] of HOST CONTROL2 register .
*/
2016-11-21 09:37:23 +03:00
if ( host - > clock < = CORE_FREQ_100MHZ ) {
if ( uhs = = MMC_TIMING_MMC_HS400 | |
uhs = = MMC_TIMING_MMC_HS200 | |
uhs = = MMC_TIMING_UHS_SDR104 )
ctrl_2 & = ~ SDHCI_CTRL_UHS_MASK ;
/*
* DLL is not required for clock < = 100 MHz
* Thus , make sure DLL it is disabled when not required
*/
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:23 +03:00
config | = CORE_DLL_RST ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:23 +03:00
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:23 +03:00
config | = CORE_DLL_PDN ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config , host - > ioaddr +
msm_offset - > core_dll_config ) ;
2016-11-21 09:37:23 +03:00
/*
* The DLL needs to be restored and CDCLP533 recalibrated
* when the clock frequency is set back to 400 MHz .
*/
msm_host - > calibration_done = false ;
}
2016-07-19 17:52:25 +03:00
dev_dbg ( mmc_dev ( mmc ) , " %s: clock=%u uhs=%u ctrl_2=0x%x \n " ,
mmc_hostname ( host - > mmc ) , host - > clock , uhs , ctrl_2 ) ;
sdhci_writew ( host , ctrl_2 , SDHCI_HOST_CONTROL2 ) ;
2016-11-21 09:37:25 +03:00
2017-01-10 10:00:47 +03:00
if ( mmc - > ios . timing = = MMC_TIMING_MMC_HS400 )
sdhci_msm_hs400 ( host , & mmc - > ios ) ;
2016-07-19 17:52:25 +03:00
}
2020-07-08 16:11:20 +03:00
static int sdhci_msm_set_pincfg ( struct sdhci_msm_host * msm_host , bool level )
{
struct platform_device * pdev = msm_host - > pdev ;
int ret ;
if ( level )
ret = pinctrl_pm_select_default_state ( & pdev - > dev ) ;
else
ret = pinctrl_pm_select_sleep_state ( & pdev - > dev ) ;
return ret ;
}
2020-06-23 16:34:48 +03:00
static int sdhci_msm_set_vmmc ( struct mmc_host * mmc )
{
if ( IS_ERR ( mmc - > supply . vmmc ) )
return 0 ;
return mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc , mmc - > ios . vdd ) ;
}
static int msm_toggle_vqmmc ( struct sdhci_msm_host * msm_host ,
struct mmc_host * mmc , bool level )
{
int ret ;
struct mmc_ios ios ;
if ( msm_host - > vqmmc_enabled = = level )
return 0 ;
if ( level ) {
/* Set the IO voltage regulator to default voltage level */
if ( msm_host - > caps_0 & CORE_3_0V_SUPPORT )
ios . signal_voltage = MMC_SIGNAL_VOLTAGE_330 ;
else if ( msm_host - > caps_0 & CORE_1_8V_SUPPORT )
ios . signal_voltage = MMC_SIGNAL_VOLTAGE_180 ;
if ( msm_host - > caps_0 & CORE_VOLT_SUPPORT ) {
ret = mmc_regulator_set_vqmmc ( mmc , & ios ) ;
if ( ret < 0 ) {
dev_err ( mmc_dev ( mmc ) , " %s: vqmmc set volgate failed: %d \n " ,
mmc_hostname ( mmc ) , ret ) ;
goto out ;
}
}
ret = regulator_enable ( mmc - > supply . vqmmc ) ;
} else {
ret = regulator_disable ( mmc - > supply . vqmmc ) ;
}
if ( ret )
dev_err ( mmc_dev ( mmc ) , " %s: vqmm %sable failed: %d \n " ,
mmc_hostname ( mmc ) , level ? " en " : " dis " , ret ) ;
else
msm_host - > vqmmc_enabled = level ;
out :
return ret ;
}
static int msm_config_vqmmc_mode ( struct sdhci_msm_host * msm_host ,
struct mmc_host * mmc , bool hpm )
{
int load , ret ;
load = hpm ? MMC_VQMMC_MAX_LOAD_UA : 0 ;
ret = regulator_set_load ( mmc - > supply . vqmmc , load ) ;
if ( ret )
dev_err ( mmc_dev ( mmc ) , " %s: vqmmc set load failed: %d \n " ,
mmc_hostname ( mmc ) , ret ) ;
return ret ;
}
static int sdhci_msm_set_vqmmc ( struct sdhci_msm_host * msm_host ,
struct mmc_host * mmc , bool level )
{
int ret ;
bool always_on ;
if ( IS_ERR ( mmc - > supply . vqmmc ) | |
( mmc - > ios . power_mode = = MMC_POWER_UNDEFINED ) )
return 0 ;
/*
* For eMMC don ' t turn off Vqmmc , Instead just configure it in LPM
* and HPM modes by setting the corresponding load .
*
* Till eMMC is initialized ( i . e . always_on = = 0 ) , just turn on / off
* Vqmmc . Vqmmc gets turned off only if init fails and mmc_power_off
* gets invoked . Once eMMC is initialized ( i . e . always_on = = 1 ) ,
* Vqmmc should remain ON , So just set the load instead of turning it
* off / on .
*/
always_on = ! mmc_card_is_removable ( mmc ) & &
mmc - > card & & mmc_card_mmc ( mmc - > card ) ;
if ( always_on )
ret = msm_config_vqmmc_mode ( msm_host , mmc , level ) ;
else
ret = msm_toggle_vqmmc ( msm_host , mmc , level ) ;
return ret ;
}
2017-09-27 08:34:43 +03:00
static inline void sdhci_msm_init_pwr_irq_wait ( struct sdhci_msm_host * msm_host )
{
init_waitqueue_head ( & msm_host - > pwr_irq_wait ) ;
}
static inline void sdhci_msm_complete_pwr_irq_wait (
struct sdhci_msm_host * msm_host )
{
wake_up ( & msm_host - > pwr_irq_wait ) ;
}
/*
* sdhci_msm_check_power_status API should be called when registers writes
* which can toggle sdhci IO bus ON / OFF or change IO lines HIGH / LOW happens .
* To what state the register writes will change the IO lines should be passed
* as the argument req_type . This API will check whether the IO line ' s state
* is already the expected state and will wait for power irq only if
2020-06-17 18:19:38 +03:00
* power irq is expected to be triggered based on the current IO line state
2017-09-27 08:34:43 +03:00
* and expected IO line state .
*/
static void sdhci_msm_check_power_status ( struct sdhci_host * host , u32 req_type )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
bool done = false ;
2018-06-19 08:39:21 +03:00
u32 val = SWITCHABLE_SIGNALING_VOLTAGE ;
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2017-09-27 08:34:43 +03:00
pr_debug ( " %s: %s: request %d curr_pwr_state %x curr_io_level %x \n " ,
mmc_hostname ( host - > mmc ) , __func__ , req_type ,
msm_host - > curr_pwr_state , msm_host - > curr_io_level ) ;
2017-11-20 22:56:47 +03:00
/*
* The power interrupt will not be generated for signal voltage
* switches if SWITCHABLE_SIGNALING_VOLTAGE in MCI_GENERICS is not set .
2018-06-19 08:39:21 +03:00
* Since sdhci - msm - v5 , this bit has been removed and SW must consider
* it as always set .
2017-11-20 22:56:47 +03:00
*/
2018-06-19 08:39:21 +03:00
if ( ! msm_host - > mci_removed )
val = msm_host_readl ( msm_host , host ,
msm_offset - > core_generics ) ;
2017-11-20 22:56:47 +03:00
if ( ( req_type & REQ_IO_HIGH | | req_type & REQ_IO_LOW ) & &
! ( val & SWITCHABLE_SIGNALING_VOLTAGE ) ) {
return ;
}
2017-09-27 08:34:43 +03:00
/*
* The IRQ for request type IO High / LOW will be generated when -
* there is a state change in 1.8 V enable bit ( bit 3 ) of
* SDHCI_HOST_CONTROL2 register . The reset state of that bit is 0
* which indicates 3.3 V IO voltage . So , when MMC core layer tries
* to set it to 3.3 V before card detection happens , the
* IRQ doesn ' t get triggered as there is no state change in this bit .
* The driver already handles this case by changing the IO voltage
* level to high as part of controller power up sequence . Hence , check
* for host - > pwr to handle a case where IO voltage high request is
* issued even before controller power up .
*/
if ( ( req_type & REQ_IO_HIGH ) & & ! host - > pwr ) {
pr_debug ( " %s: do not wait for power IRQ that never comes, req_type: %d \n " ,
mmc_hostname ( host - > mmc ) , req_type ) ;
return ;
}
if ( ( req_type & msm_host - > curr_pwr_state ) | |
( req_type & msm_host - > curr_io_level ) )
done = true ;
/*
* This is needed here to handle cases where register writes will
* not change the current bus state or io level of the controller .
* In this case , no power irq will be triggerred and we should
* not wait .
*/
if ( ! done ) {
if ( ! wait_event_timeout ( msm_host - > pwr_irq_wait ,
msm_host - > pwr_irq_flag ,
msecs_to_jiffies ( MSM_PWR_IRQ_TIMEOUT_MS ) ) )
2017-10-10 12:14:16 +03:00
dev_warn ( & msm_host - > pdev - > dev ,
" %s: pwr_irq for req: (%d) timed out \n " ,
mmc_hostname ( host - > mmc ) , req_type ) ;
2017-09-27 08:34:43 +03:00
}
pr_debug ( " %s: %s: request %d done \n " , mmc_hostname ( host - > mmc ) ,
__func__ , req_type ) ;
}
2017-09-27 08:34:41 +03:00
static void sdhci_msm_dump_pwr_ctrl_regs ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset =
msm_host - > offset ;
2017-09-27 08:34:41 +03:00
pr_err ( " %s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x \n " ,
2018-06-19 08:39:21 +03:00
mmc_hostname ( host - > mmc ) ,
msm_host_readl ( msm_host , host , msm_offset - > core_pwrctl_status ) ,
msm_host_readl ( msm_host , host , msm_offset - > core_pwrctl_mask ) ,
msm_host_readl ( msm_host , host , msm_offset - > core_pwrctl_ctl ) ) ;
2017-09-27 08:34:41 +03:00
}
static void sdhci_msm_handle_pwr_irq ( struct sdhci_host * host , int irq )
2016-06-24 18:07:14 +03:00
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2020-06-23 16:34:48 +03:00
struct mmc_host * mmc = host - > mmc ;
2016-06-24 18:07:14 +03:00
u32 irq_status , irq_ack = 0 ;
2020-06-23 16:34:48 +03:00
int retry = 10 , ret ;
2018-04-20 15:15:28 +03:00
u32 pwr_state = 0 , io_level = 0 ;
2018-04-20 15:15:29 +03:00
u32 config ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset = msm_host - > offset ;
2016-06-24 18:07:14 +03:00
2018-06-19 08:39:21 +03:00
irq_status = msm_host_readl ( msm_host , host ,
msm_offset - > core_pwrctl_status ) ;
2016-06-24 18:07:14 +03:00
irq_status & = INT_MASK ;
2018-06-19 08:39:21 +03:00
msm_host_writel ( msm_host , irq_status , host ,
msm_offset - > core_pwrctl_clear ) ;
2016-06-24 18:07:14 +03:00
2017-09-27 08:34:41 +03:00
/*
* There is a rare HW scenario where the first clear pulse could be
* lost when actual reset and clear / read of status register is
* happening at a time . Hence , retry for at least 10 times to make
* sure status register is cleared . Otherwise , this will result in
* a spurious power IRQ resulting in system instability .
*/
2018-06-19 08:39:21 +03:00
while ( irq_status & msm_host_readl ( msm_host , host ,
msm_offset - > core_pwrctl_status ) ) {
2017-09-27 08:34:41 +03:00
if ( retry = = 0 ) {
pr_err ( " %s: Timedout clearing (0x%x) pwrctl status register \n " ,
mmc_hostname ( host - > mmc ) , irq_status ) ;
sdhci_msm_dump_pwr_ctrl_regs ( host ) ;
WARN_ON ( 1 ) ;
break ;
}
2018-06-19 08:39:21 +03:00
msm_host_writel ( msm_host , irq_status , host ,
msm_offset - > core_pwrctl_clear ) ;
2017-09-27 08:34:41 +03:00
retry - - ;
udelay ( 10 ) ;
}
2017-09-27 08:34:43 +03:00
/* Handle BUS ON/OFF*/
if ( irq_status & CORE_PWRCTL_BUS_ON ) {
pwr_state = REQ_BUS_ON ;
io_level = REQ_IO_HIGH ;
}
if ( irq_status & CORE_PWRCTL_BUS_OFF ) {
pwr_state = REQ_BUS_OFF ;
io_level = REQ_IO_LOW ;
}
2020-06-23 16:34:48 +03:00
if ( pwr_state ) {
ret = sdhci_msm_set_vmmc ( mmc ) ;
if ( ! ret )
ret = sdhci_msm_set_vqmmc ( msm_host , mmc ,
pwr_state & REQ_BUS_ON ) ;
2020-07-08 16:11:20 +03:00
if ( ! ret )
ret = sdhci_msm_set_pincfg ( msm_host ,
pwr_state & REQ_BUS_ON ) ;
2020-06-23 16:34:48 +03:00
if ( ! ret )
irq_ack | = CORE_PWRCTL_BUS_SUCCESS ;
else
irq_ack | = CORE_PWRCTL_BUS_FAIL ;
}
2017-09-27 08:34:43 +03:00
/* Handle IO LOW/HIGH */
2020-06-23 16:34:48 +03:00
if ( irq_status & CORE_PWRCTL_IO_LOW )
2017-09-27 08:34:43 +03:00
io_level = REQ_IO_LOW ;
2020-06-23 16:34:48 +03:00
if ( irq_status & CORE_PWRCTL_IO_HIGH )
2017-09-27 08:34:43 +03:00
io_level = REQ_IO_HIGH ;
2020-06-23 16:34:48 +03:00
if ( io_level )
2017-09-27 08:34:43 +03:00
irq_ack | = CORE_PWRCTL_IO_SUCCESS ;
2020-06-23 16:34:48 +03:00
if ( io_level & & ! IS_ERR ( mmc - > supply . vqmmc ) & & ! pwr_state ) {
ret = mmc_regulator_set_vqmmc ( mmc , & mmc - > ios ) ;
if ( ret < 0 ) {
dev_err ( mmc_dev ( mmc ) , " %s: IO_level setting failed(%d). signal_voltage: %d, vdd: %d irq_status: 0x%08x \n " ,
mmc_hostname ( mmc ) , ret ,
mmc - > ios . signal_voltage , mmc - > ios . vdd ,
irq_status ) ;
irq_ack | = CORE_PWRCTL_IO_FAIL ;
}
2017-09-27 08:34:43 +03:00
}
2016-06-24 18:07:14 +03:00
/*
* The driver has to acknowledge the interrupt , switch voltages and
* report back if it succeded or not to this register . The voltage
* switches are handled by the sdhci core , so just report success .
*/
2018-06-19 08:39:21 +03:00
msm_host_writel ( msm_host , irq_ack , host ,
msm_offset - > core_pwrctl_ctl ) ;
2017-09-27 08:34:41 +03:00
2018-04-20 15:15:29 +03:00
/*
* If we don ' t have info regarding the voltage levels supported by
* regulators , don ' t change the IO PAD PWR SWITCH .
*/
if ( msm_host - > caps_0 & CORE_VOLT_SUPPORT ) {
u32 new_config ;
/*
* We should unset IO PAD PWR switch only if the register write
* can set IO lines high and the regulator also switches to 3 V .
* Else , we should keep the IO PAD PWR switch set .
* This is applicable to certain targets where eMMC vccq supply
* is only 1.8 V . In such targets , even during REQ_IO_HIGH , the
* IO PAD PWR switch must be kept set to reflect actual
* regulator voltage . This way , during initialization of
* controllers with only 1.8 V , we will set the IO PAD bit
* without waiting for a REQ_IO_LOW .
*/
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_vendor_spec ) ;
2018-04-20 15:15:29 +03:00
new_config = config ;
if ( ( io_level & REQ_IO_HIGH ) & &
( msm_host - > caps_0 & CORE_3_0V_SUPPORT ) )
new_config & = ~ CORE_IO_PAD_PWR_SWITCH ;
else if ( ( io_level & REQ_IO_LOW ) | |
( msm_host - > caps_0 & CORE_1_8V_SUPPORT ) )
new_config | = CORE_IO_PAD_PWR_SWITCH ;
if ( config ^ new_config )
2018-06-19 08:39:21 +03:00
writel_relaxed ( new_config , host - > ioaddr +
msm_offset - > core_vendor_spec ) ;
2018-04-20 15:15:29 +03:00
}
2017-09-27 08:34:43 +03:00
if ( pwr_state )
msm_host - > curr_pwr_state = pwr_state ;
if ( io_level )
msm_host - > curr_io_level = io_level ;
2020-06-23 16:34:48 +03:00
dev_dbg ( mmc_dev ( mmc ) , " %s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x \n " ,
2017-09-27 08:34:41 +03:00
mmc_hostname ( msm_host - > mmc ) , __func__ , irq , irq_status ,
irq_ack ) ;
2016-06-24 18:07:14 +03:00
}
static irqreturn_t sdhci_msm_pwr_irq ( int irq , void * data )
{
struct sdhci_host * host = ( struct sdhci_host * ) data ;
2017-09-27 08:34:43 +03:00
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2016-06-24 18:07:14 +03:00
2017-09-27 08:34:41 +03:00
sdhci_msm_handle_pwr_irq ( host , irq ) ;
2017-09-27 08:34:43 +03:00
msm_host - > pwr_irq_flag = 1 ;
sdhci_msm_complete_pwr_irq_wait ( msm_host ) ;
2016-06-24 18:07:14 +03:00
return IRQ_HANDLED ;
}
2016-11-21 09:37:17 +03:00
static unsigned int sdhci_msm_get_max_clock ( struct sdhci_host * host )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2017-09-16 02:35:23 +03:00
struct clk * core_clk = msm_host - > bulk_clks [ 0 ] . clk ;
2016-11-21 09:37:17 +03:00
2017-09-16 02:35:23 +03:00
return clk_round_rate ( core_clk , ULONG_MAX ) ;
2016-11-21 09:37:17 +03:00
}
static unsigned int sdhci_msm_get_min_clock ( struct sdhci_host * host )
{
return SDHCI_MSM_MIN_CLOCK ;
}
2020-07-01 15:47:00 +03:00
/*
2016-11-21 09:37:20 +03:00
* __sdhci_msm_set_clock - sdhci_msm clock control .
*
* Description :
* MSM controller does not use internal divider and
* instead directly control the GCC clock as per
* HW recommendation .
* */
2017-07-31 15:00:46 +03:00
static void __sdhci_msm_set_clock ( struct sdhci_host * host , unsigned int clock )
2016-11-21 09:37:20 +03:00
{
u16 clk ;
sdhci_writew ( host , 0 , SDHCI_CLOCK_CONTROL ) ;
if ( clock = = 0 )
return ;
/*
* MSM controller do not use clock divider .
* Thus read SDHCI_CLOCK_CONTROL and only enable
* clock with no divider value programmed .
*/
clk = sdhci_readw ( host , SDHCI_CLOCK_CONTROL ) ;
sdhci_enable_clk ( host , clk ) ;
}
/* sdhci_msm_set_clock - Called with (host->lock) spinlock held. */
static void sdhci_msm_set_clock ( struct sdhci_host * host , unsigned int clock )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
if ( ! clock ) {
2020-12-14 20:21:15 +03:00
host - > mmc - > actual_clock = msm_host - > clk_rate = 0 ;
2016-11-21 09:37:20 +03:00
goto out ;
}
2017-01-10 10:00:45 +03:00
sdhci_msm_hc_select_mode ( host ) ;
2016-11-21 09:37:20 +03:00
2017-01-10 10:00:46 +03:00
msm_set_clock_rate_for_bus_mode ( host , clock ) ;
2016-11-21 09:37:20 +03:00
out :
__sdhci_msm_set_clock ( host , clock ) ;
}
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
/*****************************************************************************\
* *
* Inline Crypto Engine ( ICE ) support *
* *
\ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# ifdef CONFIG_MMC_CRYPTO
# define AES_256_XTS_KEY_SIZE 64
/* QCOM ICE registers */
# define QCOM_ICE_REG_VERSION 0x0008
# define QCOM_ICE_REG_FUSE_SETTING 0x0010
# define QCOM_ICE_FUSE_SETTING_MASK 0x1
# define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK 0x2
# define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK 0x4
# define QCOM_ICE_REG_BIST_STATUS 0x0070
# define QCOM_ICE_BIST_STATUS_MASK 0xF0000000
# define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000
# define sdhci_msm_ice_writel(host, val, reg) \
writel ( ( val ) , ( host ) - > ice_mem + ( reg ) )
# define sdhci_msm_ice_readl(host, reg) \
readl ( ( host ) - > ice_mem + ( reg ) )
static bool sdhci_msm_ice_supported ( struct sdhci_msm_host * msm_host )
{
struct device * dev = mmc_dev ( msm_host - > mmc ) ;
u32 regval = sdhci_msm_ice_readl ( msm_host , QCOM_ICE_REG_VERSION ) ;
int major = regval > > 24 ;
int minor = ( regval > > 16 ) & 0xFF ;
int step = regval & 0xFFFF ;
/* For now this driver only supports ICE version 3. */
if ( major ! = 3 ) {
dev_warn ( dev , " Unsupported ICE version: v%d.%d.%d \n " ,
major , minor , step ) ;
return false ;
}
dev_info ( dev , " Found QC Inline Crypto Engine (ICE) v%d.%d.%d \n " ,
major , minor , step ) ;
/* If fuses are blown, ICE might not work in the standard way. */
regval = sdhci_msm_ice_readl ( msm_host , QCOM_ICE_REG_FUSE_SETTING ) ;
if ( regval & ( QCOM_ICE_FUSE_SETTING_MASK |
QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK |
QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK ) ) {
dev_warn ( dev , " Fuses are blown; ICE is unusable! \n " ) ;
return false ;
}
return true ;
}
static inline struct clk * sdhci_msm_ice_get_clk ( struct device * dev )
{
return devm_clk_get ( dev , " ice " ) ;
}
static int sdhci_msm_ice_init ( struct sdhci_msm_host * msm_host ,
struct cqhci_host * cq_host )
{
struct mmc_host * mmc = msm_host - > mmc ;
struct device * dev = mmc_dev ( mmc ) ;
struct resource * res ;
if ( ! ( cqhci_readl ( cq_host , CQHCI_CAP ) & CQHCI_CAP_CS ) )
return 0 ;
res = platform_get_resource_byname ( msm_host - > pdev , IORESOURCE_MEM ,
" ice " ) ;
if ( ! res ) {
dev_warn ( dev , " ICE registers not found \n " ) ;
goto disable ;
}
if ( ! qcom_scm_ice_available ( ) ) {
dev_warn ( dev , " ICE SCM interface not found \n " ) ;
goto disable ;
}
msm_host - > ice_mem = devm_ioremap_resource ( dev , res ) ;
2021-04-09 04:54:24 +03:00
if ( IS_ERR ( msm_host - > ice_mem ) )
return PTR_ERR ( msm_host - > ice_mem ) ;
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
if ( ! sdhci_msm_ice_supported ( msm_host ) )
goto disable ;
mmc - > caps2 | = MMC_CAP2_CRYPTO ;
return 0 ;
disable :
dev_warn ( dev , " Disabling inline encryption support \n " ) ;
return 0 ;
}
static void sdhci_msm_ice_low_power_mode_enable ( struct sdhci_msm_host * msm_host )
{
u32 regval ;
regval = sdhci_msm_ice_readl ( msm_host , QCOM_ICE_REG_ADVANCED_CONTROL ) ;
/*
* Enable low power mode sequence
* [ 0 ] - 0 , [ 1 ] - 0 , [ 2 ] - 0 , [ 3 ] - E , [ 4 ] - 0 , [ 5 ] - 0 , [ 6 ] - 0 , [ 7 ] - 0
*/
regval | = 0x7000 ;
sdhci_msm_ice_writel ( msm_host , regval , QCOM_ICE_REG_ADVANCED_CONTROL ) ;
}
static void sdhci_msm_ice_optimization_enable ( struct sdhci_msm_host * msm_host )
{
u32 regval ;
/* ICE Optimizations Enable Sequence */
regval = sdhci_msm_ice_readl ( msm_host , QCOM_ICE_REG_ADVANCED_CONTROL ) ;
regval | = 0xD807100 ;
/* ICE HPG requires delay before writing */
udelay ( 5 ) ;
sdhci_msm_ice_writel ( msm_host , regval , QCOM_ICE_REG_ADVANCED_CONTROL ) ;
udelay ( 5 ) ;
}
/*
* Wait until the ICE BIST ( built - in self - test ) has completed .
*
* This may be necessary before ICE can be used .
*
* Note that we don ' t really care whether the BIST passed or failed ; we really
* just want to make sure that it isn ' t still running . This is because ( a ) the
* BIST is a FIPS compliance thing that never fails in practice , ( b ) ICE is
* documented to reject crypto requests if the BIST fails , so we needn ' t do it
* in software too , and ( c ) properly testing storage encryption requires testing
* the full storage stack anyway , and not relying on hardware - level self - tests .
*/
static int sdhci_msm_ice_wait_bist_status ( struct sdhci_msm_host * msm_host )
{
u32 regval ;
int err ;
err = readl_poll_timeout ( msm_host - > ice_mem + QCOM_ICE_REG_BIST_STATUS ,
regval , ! ( regval & QCOM_ICE_BIST_STATUS_MASK ) ,
50 , 5000 ) ;
if ( err )
dev_err ( mmc_dev ( msm_host - > mmc ) ,
" Timed out waiting for ICE self-test to complete \n " ) ;
return err ;
}
static void sdhci_msm_ice_enable ( struct sdhci_msm_host * msm_host )
{
if ( ! ( msm_host - > mmc - > caps2 & MMC_CAP2_CRYPTO ) )
return ;
sdhci_msm_ice_low_power_mode_enable ( msm_host ) ;
sdhci_msm_ice_optimization_enable ( msm_host ) ;
sdhci_msm_ice_wait_bist_status ( msm_host ) ;
}
static int __maybe_unused sdhci_msm_ice_resume ( struct sdhci_msm_host * msm_host )
{
if ( ! ( msm_host - > mmc - > caps2 & MMC_CAP2_CRYPTO ) )
return 0 ;
return sdhci_msm_ice_wait_bist_status ( msm_host ) ;
}
/*
* Program a key into a QC ICE keyslot , or evict a keyslot . QC ICE requires
* vendor - specific SCM calls for this ; it doesn ' t support the standard way .
*/
static int sdhci_msm_program_key ( struct cqhci_host * cq_host ,
const union cqhci_crypto_cfg_entry * cfg ,
int slot )
{
struct device * dev = mmc_dev ( cq_host - > mmc ) ;
union cqhci_crypto_cap_entry cap ;
union {
u8 bytes [ AES_256_XTS_KEY_SIZE ] ;
u32 words [ AES_256_XTS_KEY_SIZE / sizeof ( u32 ) ] ;
} key ;
int i ;
int err ;
if ( ! ( cfg - > config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE ) )
return qcom_scm_ice_invalidate_key ( slot ) ;
/* Only AES-256-XTS has been tested so far. */
cap = cq_host - > crypto_cap_array [ cfg - > crypto_cap_idx ] ;
if ( cap . algorithm_id ! = CQHCI_CRYPTO_ALG_AES_XTS | |
cap . key_size ! = CQHCI_CRYPTO_KEY_SIZE_256 ) {
dev_err_ratelimited ( dev ,
" Unhandled crypto capability; algorithm_id=%d, key_size=%d \n " ,
cap . algorithm_id , cap . key_size ) ;
return - EINVAL ;
}
memcpy ( key . bytes , cfg - > crypto_key , AES_256_XTS_KEY_SIZE ) ;
/*
* The SCM call byte - swaps the 32 - bit words of the key . So we have to
* do the same , in order for the final key be correct .
*/
for ( i = 0 ; i < ARRAY_SIZE ( key . words ) ; i + + )
__cpu_to_be32s ( & key . words [ i ] ) ;
err = qcom_scm_ice_set_key ( slot , key . bytes , AES_256_XTS_KEY_SIZE ,
QCOM_SCM_ICE_CIPHER_AES_256_XTS ,
cfg - > data_unit_size ) ;
memzero_explicit ( & key , sizeof ( key ) ) ;
return err ;
}
# else /* CONFIG_MMC_CRYPTO */
static inline struct clk * sdhci_msm_ice_get_clk ( struct device * dev )
{
return NULL ;
}
static inline int sdhci_msm_ice_init ( struct sdhci_msm_host * msm_host ,
struct cqhci_host * cq_host )
{
return 0 ;
}
static inline void sdhci_msm_ice_enable ( struct sdhci_msm_host * msm_host )
{
}
static inline int __maybe_unused
sdhci_msm_ice_resume ( struct sdhci_msm_host * msm_host )
{
return 0 ;
}
# endif /* !CONFIG_MMC_CRYPTO */
2020-01-16 20:03:11 +03:00
/*****************************************************************************\
* *
* MSM Command Queue Engine ( CQE ) *
* *
\ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
static u32 sdhci_msm_cqe_irq ( struct sdhci_host * host , u32 intmask )
{
int cmd_error = 0 ;
int data_error = 0 ;
if ( ! sdhci_cqe_irq ( host , intmask , & cmd_error , & data_error ) )
return intmask ;
cqhci_irq ( host - > mmc , intmask , cmd_error , data_error ) ;
return 0 ;
}
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
static void sdhci_msm_cqe_enable ( struct mmc_host * mmc )
{
struct sdhci_host * host = mmc_priv ( mmc ) ;
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
sdhci_cqe_enable ( mmc ) ;
sdhci_msm_ice_enable ( msm_host ) ;
}
2020-02-06 19:21:24 +03:00
static void sdhci_msm_cqe_disable ( struct mmc_host * mmc , bool recovery )
2020-01-16 20:03:11 +03:00
{
struct sdhci_host * host = mmc_priv ( mmc ) ;
unsigned long flags ;
u32 ctrl ;
/*
* When CQE is halted , the legacy SDHCI path operates only
* on 16 - byte descriptors in 64 bit mode .
*/
if ( host - > flags & SDHCI_USE_64_BIT_DMA )
host - > desc_sz = 16 ;
spin_lock_irqsave ( & host - > lock , flags ) ;
/*
* During CQE command transfers , command complete bit gets latched .
* So s / w should clear command complete interrupt status when CQE is
* either halted or disabled . Otherwise unexpected SDCHI legacy
* interrupt gets triggered when CQE is halted / disabled .
*/
ctrl = sdhci_readl ( host , SDHCI_INT_ENABLE ) ;
ctrl | = SDHCI_INT_RESPONSE ;
sdhci_writel ( host , ctrl , SDHCI_INT_ENABLE ) ;
sdhci_writel ( host , SDHCI_INT_RESPONSE , SDHCI_INT_STATUS ) ;
spin_unlock_irqrestore ( & host - > lock , flags ) ;
sdhci_cqe_disable ( mmc , recovery ) ;
}
static const struct cqhci_host_ops sdhci_msm_cqhci_ops = {
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
. enable = sdhci_msm_cqe_enable ,
2020-01-16 20:03:11 +03:00
. disable = sdhci_msm_cqe_disable ,
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
# ifdef CONFIG_MMC_CRYPTO
. program_key = sdhci_msm_program_key ,
# endif
2020-01-16 20:03:11 +03:00
} ;
static int sdhci_msm_cqe_add_host ( struct sdhci_host * host ,
struct platform_device * pdev )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
struct cqhci_host * cq_host ;
bool dma64 ;
u32 cqcfg ;
int ret ;
/*
* When CQE is halted , SDHC operates only on 16 byte ADMA descriptors .
* So ensure ADMA table is allocated for 16 byte descriptors .
*/
if ( host - > caps & SDHCI_CAN_64BIT )
host - > alloc_desc_sz = 16 ;
ret = sdhci_setup_host ( host ) ;
if ( ret )
return ret ;
cq_host = cqhci_pltfm_init ( pdev ) ;
if ( IS_ERR ( cq_host ) ) {
ret = PTR_ERR ( cq_host ) ;
dev_err ( & pdev - > dev , " cqhci-pltfm init: failed: %d \n " , ret ) ;
goto cleanup ;
}
msm_host - > mmc - > caps2 | = MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD ;
cq_host - > ops = & sdhci_msm_cqhci_ops ;
dma64 = host - > flags & SDHCI_USE_64_BIT_DMA ;
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
ret = sdhci_msm_ice_init ( msm_host , cq_host ) ;
if ( ret )
goto cleanup ;
2020-01-16 20:03:11 +03:00
ret = cqhci_init ( cq_host , host - > mmc , dma64 ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " %s: CQE init: failed (%d) \n " ,
mmc_hostname ( host - > mmc ) , ret ) ;
goto cleanup ;
}
/* Disable cqe reset due to cqe enable signal */
cqcfg = cqhci_readl ( cq_host , CQHCI_VENDOR_CFG1 ) ;
cqcfg | = CQHCI_VENDOR_DIS_RST_ON_CQ_EN ;
cqhci_writel ( cq_host , cqcfg , CQHCI_VENDOR_CFG1 ) ;
/*
* SDHC expects 12 byte ADMA descriptors till CQE is enabled .
* So limit desc_sz to 12 so that the data commands that are sent
* during card initialization ( before CQE gets enabled ) would
* get executed without any issues .
*/
if ( host - > flags & SDHCI_USE_64_BIT_DMA )
host - > desc_sz = 12 ;
ret = __sdhci_add_host ( host ) ;
if ( ret )
goto cleanup ;
dev_info ( & pdev - > dev , " %s: CQE init: success \n " ,
mmc_hostname ( host - > mmc ) ) ;
return ret ;
cleanup :
sdhci_cleanup_host ( host ) ;
return ret ;
}
2017-09-27 08:34:43 +03:00
/*
* Platform specific register write functions . This is so that , if any
* register write needs to be followed up by platform specific actions ,
* they can be added here . These functions can go to sleep when writes
* to certain registers are done .
* These functions are relying on sdhci_set_ios not using spinlock .
*/
static int __sdhci_msm_check_write ( struct sdhci_host * host , u16 val , int reg )
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
u32 req_type = 0 ;
switch ( reg ) {
case SDHCI_HOST_CONTROL2 :
req_type = ( val & SDHCI_CTRL_VDD_180 ) ? REQ_IO_LOW :
REQ_IO_HIGH ;
break ;
case SDHCI_SOFTWARE_RESET :
if ( host - > pwr & & ( val & SDHCI_RESET_ALL ) )
req_type = REQ_BUS_OFF ;
break ;
case SDHCI_POWER_CONTROL :
req_type = ! val ? REQ_BUS_OFF : REQ_BUS_ON ;
break ;
2018-12-04 15:25:32 +03:00
case SDHCI_TRANSFER_MODE :
msm_host - > transfer_mode = val ;
break ;
case SDHCI_COMMAND :
if ( ! msm_host - > use_cdr )
break ;
if ( ( msm_host - > transfer_mode & SDHCI_TRNS_READ ) & &
SDHCI_GET_CMD ( val ) ! = MMC_SEND_TUNING_BLOCK_HS200 & &
SDHCI_GET_CMD ( val ) ! = MMC_SEND_TUNING_BLOCK )
sdhci_msm_set_cdr ( host , true ) ;
else
sdhci_msm_set_cdr ( host , false ) ;
break ;
2017-09-27 08:34:43 +03:00
}
if ( req_type ) {
msm_host - > pwr_irq_flag = 0 ;
/*
* Since this register write may trigger a power irq , ensure
* all previous register writes are complete by this point .
*/
mb ( ) ;
}
return req_type ;
}
/* This function may sleep*/
static void sdhci_msm_writew ( struct sdhci_host * host , u16 val , int reg )
{
u32 req_type = 0 ;
req_type = __sdhci_msm_check_write ( host , val , reg ) ;
writew_relaxed ( val , host - > ioaddr + reg ) ;
if ( req_type )
sdhci_msm_check_power_status ( host , req_type ) ;
}
/* This function may sleep*/
static void sdhci_msm_writeb ( struct sdhci_host * host , u8 val , int reg )
{
u32 req_type = 0 ;
req_type = __sdhci_msm_check_write ( host , val , reg ) ;
writeb_relaxed ( val , host - > ioaddr + reg ) ;
if ( req_type )
sdhci_msm_check_power_status ( host , req_type ) ;
}
2018-04-20 15:15:28 +03:00
static void sdhci_msm_set_regulator_caps ( struct sdhci_msm_host * msm_host )
{
struct mmc_host * mmc = msm_host - > mmc ;
struct regulator * supply = mmc - > supply . vqmmc ;
2018-04-20 15:15:29 +03:00
u32 caps = 0 , config ;
struct sdhci_host * host = mmc_priv ( mmc ) ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset = msm_host - > offset ;
2018-04-20 15:15:28 +03:00
if ( ! IS_ERR ( mmc - > supply . vqmmc ) ) {
if ( regulator_is_supported_voltage ( supply , 1700000 , 1950000 ) )
caps | = CORE_1_8V_SUPPORT ;
if ( regulator_is_supported_voltage ( supply , 2700000 , 3600000 ) )
caps | = CORE_3_0V_SUPPORT ;
if ( ! caps )
pr_warn ( " %s: 1.8/3V not supported for vqmmc \n " ,
mmc_hostname ( mmc ) ) ;
}
2018-04-20 15:15:29 +03:00
if ( caps ) {
/*
* Set the PAD_PWR_SWITCH_EN bit so that the PAD_PWR_SWITCH
* bit can be used as required later on .
*/
u32 io_level = msm_host - > curr_io_level ;
2018-06-19 08:39:21 +03:00
config = readl_relaxed ( host - > ioaddr +
msm_offset - > core_vendor_spec ) ;
2018-04-20 15:15:29 +03:00
config | = CORE_IO_PAD_PWR_SWITCH_EN ;
if ( ( io_level & REQ_IO_HIGH ) & & ( caps & CORE_3_0V_SUPPORT ) )
config & = ~ CORE_IO_PAD_PWR_SWITCH ;
else if ( ( io_level & REQ_IO_LOW ) | | ( caps & CORE_1_8V_SUPPORT ) )
config | = CORE_IO_PAD_PWR_SWITCH ;
2018-06-19 08:39:21 +03:00
writel_relaxed ( config ,
host - > ioaddr + msm_offset - > core_vendor_spec ) ;
2018-04-20 15:15:29 +03:00
}
2018-04-20 15:15:28 +03:00
msm_host - > caps_0 | = caps ;
pr_debug ( " %s: supported caps: 0x%08x \n " , mmc_hostname ( mmc ) , caps ) ;
}
2020-03-06 17:08:43 +03:00
static void sdhci_msm_reset ( struct sdhci_host * host , u8 mask )
{
if ( ( host - > mmc - > caps2 & MMC_CAP2_CQE ) & & ( mask & SDHCI_RESET_ALL ) )
cqhci_deactivate ( host - > mmc ) ;
sdhci_reset ( host , mask ) ;
}
2020-06-23 16:34:48 +03:00
static int sdhci_msm_register_vreg ( struct sdhci_msm_host * msm_host )
{
int ret ;
ret = mmc_regulator_get_supply ( msm_host - > mmc ) ;
if ( ret )
return ret ;
sdhci_msm_set_regulator_caps ( msm_host ) ;
return 0 ;
}
static int sdhci_msm_start_signal_voltage_switch ( struct mmc_host * mmc ,
struct mmc_ios * ios )
{
struct sdhci_host * host = mmc_priv ( mmc ) ;
u16 ctrl , status ;
/*
* Signal Voltage Switching is only applicable for Host Controllers
* v3 .00 and above .
*/
if ( host - > version < SDHCI_SPEC_300 )
return 0 ;
ctrl = sdhci_readw ( host , SDHCI_HOST_CONTROL2 ) ;
switch ( ios - > signal_voltage ) {
case MMC_SIGNAL_VOLTAGE_330 :
if ( ! ( host - > flags & SDHCI_SIGNALING_330 ) )
return - EINVAL ;
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl & = ~ SDHCI_CTRL_VDD_180 ;
break ;
case MMC_SIGNAL_VOLTAGE_180 :
if ( ! ( host - > flags & SDHCI_SIGNALING_180 ) )
return - EINVAL ;
/* Enable 1.8V Signal Enable in the Host Control2 register */
ctrl | = SDHCI_CTRL_VDD_180 ;
break ;
default :
return - EINVAL ;
}
sdhci_writew ( host , ctrl , SDHCI_HOST_CONTROL2 ) ;
/* Wait for 5ms */
usleep_range ( 5000 , 5500 ) ;
/* regulator output should be stable within 5 ms */
status = ctrl & SDHCI_CTRL_VDD_180 ;
ctrl = sdhci_readw ( host , SDHCI_HOST_CONTROL2 ) ;
if ( ( ctrl & SDHCI_CTRL_VDD_180 ) = = status )
return 0 ;
dev_warn ( mmc_dev ( mmc ) , " %s: Regulator output did not became stable \n " ,
mmc_hostname ( mmc ) ) ;
return - EAGAIN ;
}
2020-05-22 12:32:30 +03:00
# define DRIVER_NAME "sdhci_msm"
# define SDHCI_MSM_DUMP(f, x...) \
pr_err ( " %s: " DRIVER_NAME " : " f , mmc_hostname ( host - > mmc ) , # # x )
2020-07-02 05:03:47 +03:00
static void sdhci_msm_dump_vendor_regs ( struct sdhci_host * host )
2020-05-22 12:32:30 +03:00
{
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
const struct sdhci_msm_offset * msm_offset = msm_host - > offset ;
SDHCI_MSM_DUMP ( " ----------- VENDOR REGISTER DUMP ----------- \n " ) ;
SDHCI_MSM_DUMP (
" DLL sts: 0x%08x | DLL cfg: 0x%08x | DLL cfg2: 0x%08x \n " ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_status ) ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config ) ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config_2 ) ) ;
SDHCI_MSM_DUMP (
" DLL cfg3: 0x%08x | DLL usr ctl: 0x%08x | DDR cfg: 0x%08x \n " ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_config_3 ) ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_dll_usr_ctl ) ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_ddr_config ) ) ;
SDHCI_MSM_DUMP (
" Vndr func: 0x%08x | Vndr func2 : 0x%08x Vndr func3: 0x%08x \n " ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_vendor_spec ) ,
readl_relaxed ( host - > ioaddr +
msm_offset - > core_vendor_spec_func2 ) ,
readl_relaxed ( host - > ioaddr + msm_offset - > core_vendor_spec3 ) ) ;
}
2018-06-19 08:39:19 +03:00
static const struct sdhci_msm_variant_ops mci_var_ops = {
. msm_readl_relaxed = sdhci_msm_mci_variant_readl_relaxed ,
. msm_writel_relaxed = sdhci_msm_mci_variant_writel_relaxed ,
} ;
static const struct sdhci_msm_variant_ops v5_var_ops = {
. msm_readl_relaxed = sdhci_msm_v5_variant_readl_relaxed ,
. msm_writel_relaxed = sdhci_msm_v5_variant_writel_relaxed ,
} ;
static const struct sdhci_msm_variant_info sdhci_msm_mci_var = {
. var_ops = & mci_var_ops ,
. offset = & sdhci_msm_mci_offset ,
} ;
static const struct sdhci_msm_variant_info sdhci_msm_v5_var = {
. mci_removed = true ,
. var_ops = & v5_var_ops ,
. offset = & sdhci_msm_v5_offset ,
} ;
2018-11-12 09:52:17 +03:00
static const struct sdhci_msm_variant_info sdm845_sdhci_var = {
. mci_removed = true ,
. restore_dll_config = true ,
. var_ops = & v5_var_ops ,
. offset = & sdhci_msm_v5_offset ,
} ;
2014-03-10 19:37:12 +04:00
static const struct of_device_id sdhci_msm_dt_match [ ] = {
2018-06-19 08:39:21 +03:00
{ . compatible = " qcom,sdhci-msm-v4 " , . data = & sdhci_msm_mci_var } ,
{ . compatible = " qcom,sdhci-msm-v5 " , . data = & sdhci_msm_v5_var } ,
2018-11-12 09:52:17 +03:00
{ . compatible = " qcom,sdm845-sdhci " , . data = & sdm845_sdhci_var } ,
2020-08-27 18:21:28 +03:00
{ . compatible = " qcom,sc7180-sdhci " , . data = & sdm845_sdhci_var } ,
2014-03-10 19:37:12 +04:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , sdhci_msm_dt_match ) ;
2016-02-16 16:08:21 +03:00
static const struct sdhci_ops sdhci_msm_ops = {
2020-03-06 17:08:43 +03:00
. reset = sdhci_msm_reset ,
2016-11-21 09:37:20 +03:00
. set_clock = sdhci_msm_set_clock ,
2016-11-21 09:37:17 +03:00
. get_min_clock = sdhci_msm_get_min_clock ,
. get_max_clock = sdhci_msm_get_max_clock ,
2014-06-10 22:27:19 +04:00
. set_bus_width = sdhci_set_bus_width ,
2016-07-19 17:52:25 +03:00
. set_uhs_signaling = sdhci_msm_set_uhs_signaling ,
2017-09-27 08:34:43 +03:00
. write_w = sdhci_msm_writew ,
. write_b = sdhci_msm_writeb ,
2020-01-16 20:03:11 +03:00
. irq = sdhci_msm_cqe_irq ,
2020-05-22 12:32:30 +03:00
. dump_vendor_regs = sdhci_msm_dump_vendor_regs ,
2020-06-23 16:34:48 +03:00
. set_power = sdhci_set_power_noreg ,
2014-03-10 19:37:12 +04:00
} ;
2016-02-16 16:08:21 +03:00
static const struct sdhci_pltfm_data sdhci_msm_pdata = {
. quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
2016-11-21 09:37:18 +03:00
SDHCI_QUIRK_SINGLE_POWER_WRITE |
2020-04-20 09:20:24 +03:00
SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 ,
2016-11-21 09:37:18 +03:00
. quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN ,
2016-02-16 16:08:21 +03:00
. ops = & sdhci_msm_ops ,
} ;
2020-05-22 12:32:27 +03:00
static inline void sdhci_msm_get_of_property ( struct platform_device * pdev ,
struct sdhci_host * host )
{
struct device_node * node = pdev - > dev . of_node ;
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
if ( of_property_read_u32 ( node , " qcom,ddr-config " ,
& msm_host - > ddr_config ) )
msm_host - > ddr_config = DDR_CONFIG_POR_VAL ;
2020-05-22 12:32:28 +03:00
of_property_read_u32 ( node , " qcom,dll-config " , & msm_host - > dll_config ) ;
2020-05-22 12:32:27 +03:00
}
2014-03-10 19:37:12 +04:00
static int sdhci_msm_probe ( struct platform_device * pdev )
{
struct sdhci_host * host ;
struct sdhci_pltfm_host * pltfm_host ;
struct sdhci_msm_host * msm_host ;
2017-09-16 02:35:23 +03:00
struct clk * clk ;
2014-03-10 19:37:12 +04:00
int ret ;
2015-03-23 19:47:29 +03:00
u16 host_version , core_minor ;
2016-11-21 09:37:13 +03:00
u32 core_version , config ;
2015-03-23 19:47:29 +03:00
u8 core_major ;
2018-06-19 08:39:21 +03:00
const struct sdhci_msm_offset * msm_offset ;
const struct sdhci_msm_variant_info * var_info ;
2020-01-16 20:03:11 +03:00
struct device_node * node = pdev - > dev . of_node ;
2014-03-10 19:37:12 +04:00
2016-02-16 16:08:22 +03:00
host = sdhci_pltfm_init ( pdev , & sdhci_msm_pdata , sizeof ( * msm_host ) ) ;
2014-03-10 19:37:12 +04:00
if ( IS_ERR ( host ) )
return PTR_ERR ( host ) ;
2017-08-03 15:46:14 +03:00
host - > sdma_boundary = 0 ;
2014-03-10 19:37:12 +04:00
pltfm_host = sdhci_priv ( host ) ;
2016-02-16 16:08:22 +03:00
msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2014-03-10 19:37:12 +04:00
msm_host - > mmc = host - > mmc ;
msm_host - > pdev = pdev ;
ret = mmc_of_parse ( host - > mmc ) ;
if ( ret )
goto pltfm_free ;
2018-06-19 08:39:21 +03:00
/*
* Based on the compatible string , load the required msm host info from
* the data associated with the version info .
*/
var_info = of_device_get_match_data ( & pdev - > dev ) ;
msm_host - > mci_removed = var_info - > mci_removed ;
2018-11-12 09:52:17 +03:00
msm_host - > restore_dll_config = var_info - > restore_dll_config ;
2018-06-19 08:39:21 +03:00
msm_host - > var_ops = var_info - > var_ops ;
msm_host - > offset = var_info - > offset ;
msm_offset = msm_host - > offset ;
2014-03-10 19:37:12 +04:00
sdhci_get_of_property ( pdev ) ;
2020-05-22 12:32:27 +03:00
sdhci_msm_get_of_property ( pdev , host ) ;
2014-03-10 19:37:12 +04:00
2016-11-21 09:37:24 +03:00
msm_host - > saved_tuning_phase = INVALID_TUNING_PHASE ;
2014-03-10 19:37:12 +04:00
/* Setup SDCC bus voter clock. */
msm_host - > bus_clk = devm_clk_get ( & pdev - > dev , " bus " ) ;
if ( ! IS_ERR ( msm_host - > bus_clk ) ) {
/* Vote for max. clk rate for max. performance */
ret = clk_set_rate ( msm_host - > bus_clk , INT_MAX ) ;
if ( ret )
goto pltfm_free ;
ret = clk_prepare_enable ( msm_host - > bus_clk ) ;
if ( ret )
goto pltfm_free ;
}
/* Setup main peripheral bus clock */
2017-09-16 02:35:23 +03:00
clk = devm_clk_get ( & pdev - > dev , " iface " ) ;
if ( IS_ERR ( clk ) ) {
ret = PTR_ERR ( clk ) ;
2016-06-23 12:52:05 +03:00
dev_err ( & pdev - > dev , " Peripheral clk setup failed (%d) \n " , ret ) ;
2014-03-10 19:37:12 +04:00
goto bus_clk_disable ;
}
2017-09-16 02:35:23 +03:00
msm_host - > bulk_clks [ 1 ] . clk = clk ;
2014-03-10 19:37:12 +04:00
/* Setup SDC MMC clock */
2017-09-16 02:35:23 +03:00
clk = devm_clk_get ( & pdev - > dev , " core " ) ;
if ( IS_ERR ( clk ) ) {
ret = PTR_ERR ( clk ) ;
2014-03-10 19:37:12 +04:00
dev_err ( & pdev - > dev , " SDC MMC clk setup failed (%d) \n " , ret ) ;
2017-09-16 02:35:23 +03:00
goto bus_clk_disable ;
2014-03-10 19:37:12 +04:00
}
2017-09-16 02:35:23 +03:00
msm_host - > bulk_clks [ 0 ] . clk = clk ;
2020-06-09 11:37:25 +03:00
/* Check for optional interconnect paths */
ret = dev_pm_opp_of_find_icc_paths ( & pdev - > dev , NULL ) ;
if ( ret )
goto bus_clk_disable ;
2021-03-14 19:34:03 +03:00
ret = devm_pm_opp_set_clkname ( & pdev - > dev , " core " ) ;
if ( ret )
2020-04-17 17:04:31 +03:00
goto bus_clk_disable ;
/* OPP table is optional */
2021-03-14 19:34:03 +03:00
ret = devm_pm_opp_of_add_table ( & pdev - > dev ) ;
2020-09-09 17:11:53 +03:00
if ( ret & & ret ! = - ENODEV ) {
2020-04-28 16:32:57 +03:00
dev_err ( & pdev - > dev , " Invalid OPP table in Device tree \n " ) ;
2021-03-14 19:34:03 +03:00
goto bus_clk_disable ;
2020-04-28 16:32:57 +03:00
}
2020-04-17 17:04:31 +03:00
2017-09-16 02:35:23 +03:00
/* Vote for maximum clock rate for maximum performance */
2020-04-17 17:04:31 +03:00
ret = dev_pm_opp_set_rate ( & pdev - > dev , INT_MAX ) ;
2017-09-16 02:35:23 +03:00
if ( ret )
dev_warn ( & pdev - > dev , " core clock boost failed \n " ) ;
2017-09-16 02:35:24 +03:00
clk = devm_clk_get ( & pdev - > dev , " cal " ) ;
if ( IS_ERR ( clk ) )
clk = NULL ;
msm_host - > bulk_clks [ 2 ] . clk = clk ;
clk = devm_clk_get ( & pdev - > dev , " sleep " ) ;
if ( IS_ERR ( clk ) )
clk = NULL ;
msm_host - > bulk_clks [ 3 ] . clk = clk ;
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
clk = sdhci_msm_ice_get_clk ( & pdev - > dev ) ;
if ( IS_ERR ( clk ) )
clk = NULL ;
msm_host - > bulk_clks [ 4 ] . clk = clk ;
2017-09-16 02:35:23 +03:00
ret = clk_bulk_prepare_enable ( ARRAY_SIZE ( msm_host - > bulk_clks ) ,
msm_host - > bulk_clks ) ;
if ( ret )
2021-03-14 19:34:03 +03:00
goto bus_clk_disable ;
2014-03-10 19:37:12 +04:00
2016-11-21 09:37:16 +03:00
/*
* xo clock is needed for FLL feature of cm_dll .
* In case if xo clock is not mentioned in DT , warn and proceed .
*/
msm_host - > xo_clk = devm_clk_get ( & pdev - > dev , " xo " ) ;
if ( IS_ERR ( msm_host - > xo_clk ) ) {
ret = PTR_ERR ( msm_host - > xo_clk ) ;
dev_warn ( & pdev - > dev , " TCXO clk not present (%d) \n " , ret ) ;
}
2018-06-19 08:39:21 +03:00
if ( ! msm_host - > mci_removed ) {
2019-12-15 20:51:17 +03:00
msm_host - > core_mem = devm_platform_ioremap_resource ( pdev , 1 ) ;
2018-06-19 08:39:21 +03:00
if ( IS_ERR ( msm_host - > core_mem ) ) {
ret = PTR_ERR ( msm_host - > core_mem ) ;
goto clk_disable ;
}
2014-03-10 19:37:12 +04:00
}
2017-01-10 10:00:48 +03:00
/* Reset the vendor spec register to power on reset state */
writel_relaxed ( CORE_VENDOR_SPEC_POR_VAL ,
2018-06-19 08:39:21 +03:00
host - > ioaddr + msm_offset - > core_vendor_spec ) ;
if ( ! msm_host - > mci_removed ) {
/* Set HC_MODE_EN bit in HC_MODE register */
msm_host_writel ( msm_host , HC_MODE_EN , host ,
msm_offset - > core_hc_mode ) ;
config = msm_host_readl ( msm_host , host ,
msm_offset - > core_hc_mode ) ;
config | = FF_CLK_SW_RST_DIS ;
msm_host_writel ( msm_host , config , host ,
msm_offset - > core_hc_mode ) ;
}
2016-11-21 09:37:23 +03:00
2014-03-10 19:37:12 +04:00
host_version = readw_relaxed ( ( host - > ioaddr + SDHCI_HOST_VERSION ) ) ;
dev_dbg ( & pdev - > dev , " Host Version: 0x%x Vendor Version 0x%x \n " ,
host_version , ( ( host_version & SDHCI_VENDOR_VER_MASK ) > >
SDHCI_VENDOR_VER_SHIFT ) ) ;
2018-06-19 08:39:21 +03:00
core_version = msm_host_readl ( msm_host , host ,
msm_offset - > core_mci_version ) ;
2015-03-23 19:47:29 +03:00
core_major = ( core_version & CORE_VERSION_MAJOR_MASK ) > >
CORE_VERSION_MAJOR_SHIFT ;
core_minor = core_version & CORE_VERSION_MINOR_MASK ;
dev_dbg ( & pdev - > dev , " MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x \n " ,
core_version , core_major , core_minor ) ;
2016-11-21 09:37:16 +03:00
if ( core_major = = 1 & & core_minor > = 0x42 )
msm_host - > use_14lpp_dll_reset = true ;
2016-11-21 09:37:26 +03:00
/*
* SDCC 5 controller with major version 1 , minor version 0x34 and later
* with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL .
*/
if ( core_major = = 1 & & core_minor < 0x34 )
msm_host - > use_cdclp533 = true ;
2015-03-23 19:47:29 +03:00
/*
* Support for some capabilities is not advertised by newer
* controller versions and must be explicitly enabled .
*/
if ( core_major > = 1 & & core_minor ! = 0x11 & & core_minor ! = 0x12 ) {
2016-11-21 09:37:13 +03:00
config = readl_relaxed ( host - > ioaddr + SDHCI_CAPABILITIES ) ;
config | = SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT ;
writel_relaxed ( config , host - > ioaddr +
2018-06-19 08:39:21 +03:00
msm_offset - > core_vendor_spec_capabilities0 ) ;
2015-03-23 19:47:29 +03:00
}
2019-11-26 13:19:16 +03:00
if ( core_major = = 1 & & core_minor > = 0x49 )
msm_host - > updated_ddr_cfg = true ;
2020-11-12 20:36:36 +03:00
if ( core_major = = 1 & & core_minor > = 0x71 )
msm_host - > uses_tassadar_dll = true ;
2020-06-23 16:34:48 +03:00
ret = sdhci_msm_register_vreg ( msm_host ) ;
if ( ret )
goto clk_disable ;
2017-09-27 08:34:40 +03:00
/*
* Power on reset state may trigger power irq if previous status of
* PWRCTL was either BUS_ON or IO_HIGH_V . So before enabling pwr irq
* interrupt in GIC , any pending power irq interrupt should be
* acknowledged . Otherwise power irq interrupt handler would be
* fired prematurely .
*/
2017-09-27 08:34:41 +03:00
sdhci_msm_handle_pwr_irq ( host , 0 ) ;
2017-09-27 08:34:40 +03:00
/*
* Ensure that above writes are propogated before interrupt enablement
* in GIC .
*/
mb ( ) ;
2016-06-24 18:07:14 +03:00
/* Setup IRQ for handling power/voltage tasks with PMIC */
msm_host - > pwr_irq = platform_get_irq_byname ( pdev , " pwr_irq " ) ;
if ( msm_host - > pwr_irq < 0 ) {
2016-10-26 18:04:41 +03:00
ret = msm_host - > pwr_irq ;
2016-06-24 18:07:14 +03:00
goto clk_disable ;
}
2017-09-27 08:34:43 +03:00
sdhci_msm_init_pwr_irq_wait ( msm_host ) ;
2017-09-27 08:34:40 +03:00
/* Enable pwr irq interrupts */
2018-06-19 08:39:21 +03:00
msm_host_writel ( msm_host , INT_MASK , host ,
msm_offset - > core_pwrctl_mask ) ;
2017-09-27 08:34:40 +03:00
2016-06-24 18:07:14 +03:00
ret = devm_request_threaded_irq ( & pdev - > dev , msm_host - > pwr_irq , NULL ,
sdhci_msm_pwr_irq , IRQF_ONESHOT ,
dev_name ( & pdev - > dev ) , host ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Request IRQ failed (%d) \n " , ret ) ;
goto clk_disable ;
}
2020-04-20 09:20:23 +03:00
msm_host - > mmc - > caps | = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY ;
2016-10-21 09:42:04 +03:00
pm_runtime_get_noresume ( & pdev - > dev ) ;
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
pm_runtime_set_autosuspend_delay ( & pdev - > dev ,
MSM_MMC_AUTOSUSPEND_DELAY_MS ) ;
pm_runtime_use_autosuspend ( & pdev - > dev ) ;
2020-06-23 16:34:48 +03:00
host - > mmc_host_ops . start_signal_voltage_switch =
sdhci_msm_start_signal_voltage_switch ;
2017-01-24 11:50:26 +03:00
host - > mmc_host_ops . execute_tuning = sdhci_msm_execute_tuning ;
2020-01-16 20:03:11 +03:00
if ( of_property_read_bool ( node , " supports-cqe " ) )
ret = sdhci_msm_cqe_add_host ( host , pdev ) ;
else
ret = sdhci_add_host ( host ) ;
2014-03-10 19:37:12 +04:00
if ( ret )
2016-10-21 09:42:04 +03:00
goto pm_runtime_disable ;
pm_runtime_mark_last_busy ( & pdev - > dev ) ;
pm_runtime_put_autosuspend ( & pdev - > dev ) ;
2014-03-10 19:37:12 +04:00
return 0 ;
2016-10-21 09:42:04 +03:00
pm_runtime_disable :
pm_runtime_disable ( & pdev - > dev ) ;
pm_runtime_set_suspended ( & pdev - > dev ) ;
pm_runtime_put_noidle ( & pdev - > dev ) ;
2014-03-10 19:37:12 +04:00
clk_disable :
2017-09-16 02:35:23 +03:00
clk_bulk_disable_unprepare ( ARRAY_SIZE ( msm_host - > bulk_clks ) ,
msm_host - > bulk_clks ) ;
2014-03-10 19:37:12 +04:00
bus_clk_disable :
if ( ! IS_ERR ( msm_host - > bus_clk ) )
clk_disable_unprepare ( msm_host - > bus_clk ) ;
pltfm_free :
sdhci_pltfm_free ( pdev ) ;
return ret ;
}
static int sdhci_msm_remove ( struct platform_device * pdev )
{
struct sdhci_host * host = platform_get_drvdata ( pdev ) ;
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
2016-02-16 16:08:22 +03:00
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2014-03-10 19:37:12 +04:00
int dead = ( readl_relaxed ( host - > ioaddr + SDHCI_INT_STATUS ) = =
0xffffffff ) ;
sdhci_remove_host ( host , dead ) ;
2016-10-21 09:42:04 +03:00
pm_runtime_get_sync ( & pdev - > dev ) ;
pm_runtime_disable ( & pdev - > dev ) ;
pm_runtime_put_noidle ( & pdev - > dev ) ;
2017-09-16 02:35:23 +03:00
clk_bulk_disable_unprepare ( ARRAY_SIZE ( msm_host - > bulk_clks ) ,
msm_host - > bulk_clks ) ;
2014-03-10 19:37:12 +04:00
if ( ! IS_ERR ( msm_host - > bus_clk ) )
clk_disable_unprepare ( msm_host - > bus_clk ) ;
2016-02-16 16:08:22 +03:00
sdhci_pltfm_free ( pdev ) ;
2014-03-10 19:37:12 +04:00
return 0 ;
}
2018-12-10 23:45:36 +03:00
static __maybe_unused int sdhci_msm_runtime_suspend ( struct device * dev )
2016-10-21 09:42:04 +03:00
{
struct sdhci_host * host = dev_get_drvdata ( dev ) ;
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2020-04-17 17:04:31 +03:00
/* Drop the performance vote */
dev_pm_opp_set_rate ( dev , 0 ) ;
2017-09-16 02:35:23 +03:00
clk_bulk_disable_unprepare ( ARRAY_SIZE ( msm_host - > bulk_clks ) ,
msm_host - > bulk_clks ) ;
2016-10-21 09:42:04 +03:00
return 0 ;
}
2018-12-10 23:45:36 +03:00
static __maybe_unused int sdhci_msm_runtime_resume ( struct device * dev )
2016-10-21 09:42:04 +03:00
{
struct sdhci_host * host = dev_get_drvdata ( dev ) ;
struct sdhci_pltfm_host * pltfm_host = sdhci_priv ( host ) ;
struct sdhci_msm_host * msm_host = sdhci_pltfm_priv ( pltfm_host ) ;
2018-11-12 09:52:17 +03:00
int ret ;
2016-10-21 09:42:04 +03:00
2018-11-12 09:52:17 +03:00
ret = clk_bulk_prepare_enable ( ARRAY_SIZE ( msm_host - > bulk_clks ) ,
2017-09-16 02:35:23 +03:00
msm_host - > bulk_clks ) ;
2018-11-12 09:52:17 +03:00
if ( ret )
return ret ;
/*
* Whenever core - clock is gated dynamically , it ' s needed to
* restore the SDR DLL settings when the clock is ungated .
*/
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
if ( msm_host - > restore_dll_config & & msm_host - > clk_rate ) {
2020-04-17 17:04:31 +03:00
ret = sdhci_msm_restore_sdr_dll_config ( host ) ;
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
if ( ret )
return ret ;
}
2018-11-12 09:52:17 +03:00
2020-04-17 17:04:31 +03:00
dev_pm_opp_set_rate ( dev , msm_host - > clk_rate ) ;
mmc: sdhci-msm: add Inline Crypto Engine support
Add support for Qualcomm Inline Crypto Engine (ICE) to sdhci-msm.
The standard-compliant parts, such as querying the crypto capabilities
and enabling crypto for individual MMC requests, are already handled by
cqhci-crypto.c, which itself is wired into the blk-crypto framework.
However, ICE requires vendor-specific init, enable, and resume logic,
and it requires that keys be programmed and evicted by vendor-specific
SMC calls. Make the sdhci-msm driver handle these details.
This is heavily inspired by the similar changes made for UFS, since the
UFS and eMMC ICE instances are very similar. See commit df4ec2fa7a4d
("scsi: ufs-qcom: Add Inline Crypto Engine support").
I tested this on a Sony Xperia 10, which uses the Snapdragon 630 SoC,
which has basic upstream support. Mainly, I used android-xfstests
(https://github.com/tytso/xfstests-bld/blob/master/Documentation/android-xfstests.md)
to run the ext4 and f2fs encryption tests in a Debian chroot:
android-xfstests -c ext4,f2fs -g encrypt -m inlinecrypt
These tests included tests which verify that the on-disk ciphertext is
identical to that produced by a software implementation. I also
verified that ICE was actually being used.
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20210126001456.382989-9-ebiggers@kernel.org
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2021-01-26 03:14:55 +03:00
return sdhci_msm_ice_resume ( msm_host ) ;
2016-10-21 09:42:04 +03:00
}
static const struct dev_pm_ops sdhci_msm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( pm_runtime_force_suspend ,
pm_runtime_force_resume )
SET_RUNTIME_PM_OPS ( sdhci_msm_runtime_suspend ,
sdhci_msm_runtime_resume ,
NULL )
} ;
2014-03-10 19:37:12 +04:00
static struct platform_driver sdhci_msm_driver = {
. probe = sdhci_msm_probe ,
. remove = sdhci_msm_remove ,
. driver = {
. name = " sdhci_msm " ,
. of_match_table = sdhci_msm_dt_match ,
2016-10-21 09:42:04 +03:00
. pm = & sdhci_msm_pm_ops ,
2020-09-03 02:43:15 +03:00
. probe_type = PROBE_PREFER_ASYNCHRONOUS ,
2014-03-10 19:37:12 +04:00
} ,
} ;
module_platform_driver ( sdhci_msm_driver ) ;
MODULE_DESCRIPTION ( " Qualcomm Secure Digital Host Controller Interface driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;