MMC core:
- Add new API to set VCCQ voltage - mmc_regulator_set_vqmmc() - Add new ioctl to allow userspace to send multi commands - Wait for card busy signalling before starting SDIO requests - Remove MMC_CLKGATE - Enable tuning for DDR50 mode - Some code clean-up/improvements to mmc pwrseq - Use highest priority for eMMC restart handler - Add DT bindings for eMMC hardware reset support - Extend the mmc_send_tuning() API - Improve ios show for debugfs - A couple of code optimizations MMC host: - Some generic OF improvements - Various code clean-ups - sirf: Add support for DDR50 - sunxi: Add support for card busy detection - mediatek: Use MMC_CAP_RUNTIME_RESUME - mediatek: Add support for eMMC HW-reset - mediatek: Add support for HS400 - dw_mmc: Convert to use the new mmc_regulator_set_vqmmc() API - dw_mmc: Add external DMA interface support - dw_mmc: Some various improvements - dw_mmc-rockchip: MMC tuning with the clock phase framework - sdhci: Properly clear IRQs during resume - sdhci: Enable tuning for DDR50 mode - sdhci-of-esdhc: Use IRQ mode for card detection - sdhci-of-esdhc: Support both BE and LE host controller - sdhci-pci: Build o2micro support in the same module - sdhci-pci: Support for new Intel host controllers - sdhci-acpi: Support for new Intel host controllers -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWNyuHAAoJEP4mhCVzWIwpHHUP/38kYyuhHNWEagna2taCnb5r tacx0IEfjvEmkzFNX4qLyBlu8lvDMJPx/GYx1RxE10/ZEsdllBpgtuS2+4SLHdlR naUwPsjjMigf4FavsnMxYe9yqsHsDkzFgJ2zsrRm/5h+0G4Uj3X+ejgGPjAFu3KK 6Ldha83dagR065MT8AIQRkVfjwME2mQC4RbWxmIpEQrzS2mJi3QZ0UikVWs6TWuE +1pxCobspYXK4Q9UC455JvrMJtOjDi1JNBmVyTA2fS+SBeQ1ZqbnNSbK1VAXI43b TKgUN/wp2SGqNCL+dhsebTOdEwKUgxcRRCReZRR0DBvTDvETmXRZ6ji6XUtaSAHL TY4hbNL9bWJN0JoidgdUKcQ5GjZcvDQk3eY6L0FP/C/plDEBIel7Ndf8pQco9NBQ l1CXuhjNZvkmHf9w44FgdeEGM5l/hn8J725mrVU+XNrMKv1RuNQZ56h2i33ktk6c b3+uGLMAqat59yecyaFqZibI+9WQ0pS+zz0IgQxyWxU5i86z3hrk4fbxHpmFZuEz 7awBMjiXldrbUY0fvWK6KlnTz+kqHHKxD5o9Pr9xBo6H28AwL4zGFwcU8dDpaOSk o13lLvWcZYsSMuUwO+y5jQdGejTkD2ZeJY8LqAY+SB124qAngRXLwdyBp5uqnzLS mBJh2R2ztcin5TzaJN3c =gn/m -----END PGP SIGNATURE----- Merge tag 'mmc-v4.4' of git://git.linaro.org/people/ulf.hansson/mmc Pull MMC updates from Ulf Hansson: "MMC core: - Add new API to set VCCQ voltage - mmc_regulator_set_vqmmc() - Add new ioctl to allow userspace to send multi commands - Wait for card busy signalling before starting SDIO requests - Remove MMC_CLKGATE - Enable tuning for DDR50 mode - Some code clean-up/improvements to mmc pwrseq - Use highest priority for eMMC restart handler - Add DT bindings for eMMC hardware reset support - Extend the mmc_send_tuning() API - Improve ios show for debugfs - A couple of code optimizations MMC host: - Some generic OF improvements - Various code clean-ups - sirf: Add support for DDR50 - sunxi: Add support for card busy detection - mediatek: Use MMC_CAP_RUNTIME_RESUME - mediatek: Add support for eMMC HW-reset - mediatek: Add support for HS400 - dw_mmc: Convert to use the new mmc_regulator_set_vqmmc() API - dw_mmc: Add external DMA interface support - dw_mmc: Some various improvements - dw_mmc-rockchip: MMC tuning with the clock phase framework - sdhci: Properly clear IRQs during resume - sdhci: Enable tuning for DDR50 mode - sdhci-of-esdhc: Use IRQ mode for card detection - sdhci-of-esdhc: Support both BE and LE host controller - sdhci-pci: Build o2micro support in the same module - sdhci-pci: Support for new Intel host controllers - sdhci-acpi: Support for new Intel host controllers" * tag 'mmc-v4.4' of git://git.linaro.org/people/ulf.hansson/mmc: (73 commits) mmc: dw_mmc: fix the wrong setting for UHS-DDR50 mode mmc: dw_mmc: fix the CardThreshold boundary at CardThrCtl register mmc: dw_mmc: NULL dereference in error message mmc: pwrseq: Use highest priority for eMMC restart handler mmc: mediatek: add HS400 support mmc: mmc: extend the mmc_send_tuning() mmc: mediatek: add implement of ops->hw_reset() mmc: mediatek: fix got GPD checksum error interrupt when data transfer mmc: mediatek: change the argument "ddr" to "timing" mmc: mediatek: make cmd_ints_mask to const mmc: dt-bindings: update Mediatek MMC bindings mmc: core: Add DT bindings for eMMC hardware reset support mmc: omap_hsmmc: Enable omap_hsmmc for Keystone 2 mmc: sdhci-acpi: Add more ACPI HIDs for Intel controllers mmc: sdhci-pci: Add more PCI IDs for Intel controllers arm: lpc18xx_defconfig: remove CONFIG_MMC_DW_IDMAC arm: hisi_defconfig: remove CONFIG_MMC_DW_IDMAC arm: exynos_defconfig: remove CONFIG_MMC_DW_IDMAC arc: axs10x_defconfig: remove CONFIG_MMC_DW_IDMAC mips: pistachio_defconfig: remove CONFIG_MMC_DW_IDMAC ...
This commit is contained in:
commit
17a1359034
@ -22,6 +22,8 @@ Optional properties:
|
||||
- voltage-ranges : two cells are required, first cell specifies minimum
|
||||
slot voltage (mV), second cell specifies maximum slot voltage (mV).
|
||||
Several ranges could be specified.
|
||||
- little-endian : If the host controller is little-endian mode, specify
|
||||
this property. The default endian mode is big-endian.
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -37,6 +37,7 @@ Optional properties:
|
||||
- sd-uhs-sdr104: SD UHS SDR104 speed is supported
|
||||
- sd-uhs-ddr50: SD UHS DDR50 speed is supported
|
||||
- cap-power-off-card: powering off the card is safe
|
||||
- cap-mmc-hw-reset: eMMC hardware reset is supported
|
||||
- cap-sdio-irq: enable SDIO IRQ signalling on this interface
|
||||
- full-pwr-cycle: full power cycle of the card is supported
|
||||
- mmc-ddr-1_8v: eMMC high-speed DDR mode(1.8V I/O) is supported
|
||||
|
@ -17,6 +17,11 @@ Required properties:
|
||||
- vmmc-supply: power to the Core
|
||||
- vqmmc-supply: power to the IO
|
||||
|
||||
Optional properties:
|
||||
- assigned-clocks: PLL of the source clock
|
||||
- assigned-clock-parents: parent of source clock, used for HS400 mode to get 400Mhz source clock
|
||||
- hs400-ds-delay: HS400 DS delay setting
|
||||
|
||||
Examples:
|
||||
mmc0: mmc@11230000 {
|
||||
compatible = "mediatek,mt8173-mmc", "mediatek,mt8135-mmc";
|
||||
@ -24,9 +29,13 @@ mmc0: mmc@11230000 {
|
||||
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_LOW>;
|
||||
vmmc-supply = <&mt6397_vemc_3v3_reg>;
|
||||
vqmmc-supply = <&mt6397_vio18_reg>;
|
||||
clocks = <&pericfg CLK_PERI_MSDC30_0>, <&topckgen CLK_TOP_MSDC50_0_H_SEL>;
|
||||
clocks = <&pericfg CLK_PERI_MSDC30_0>,
|
||||
<&topckgen CLK_TOP_MSDC50_0_H_SEL>;
|
||||
clock-names = "source", "hclk";
|
||||
pinctrl-names = "default", "state_uhs";
|
||||
pinctrl-0 = <&mmc0_pins_default>;
|
||||
pinctrl-1 = <&mmc0_pins_uhs>;
|
||||
assigned-clocks = <&topckgen CLK_TOP_MSDC50_0_SEL>;
|
||||
assigned-clock-parents = <&topckgen CLK_TOP_MSDCPLL_D2>;
|
||||
hs400-ds-delay = <0x14015>;
|
||||
};
|
||||
|
@ -6,11 +6,12 @@ and the properties used by the MMCIF device.
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible: must contain one of the following
|
||||
- compatible: should be "renesas,mmcif-<soctype>", "renesas,sh-mmcif" as a
|
||||
fallback. Examples with <soctype> are:
|
||||
- "renesas,mmcif-r8a7740" for the MMCIF found in r8a7740 SoCs
|
||||
- "renesas,mmcif-r8a7790" for the MMCIF found in r8a7790 SoCs
|
||||
- "renesas,mmcif-r8a7791" for the MMCIF found in r8a7791 SoCs
|
||||
- "renesas,sh-mmcif" for the generic MMCIF
|
||||
- "renesas,mmcif-r8a7794" for the MMCIF found in r8a7794 SoCs
|
||||
|
||||
- clocks: reference to the functional clock
|
||||
|
||||
|
@ -14,6 +14,19 @@ Required Properties:
|
||||
before RK3288
|
||||
- "rockchip,rk3288-dw-mshc": for Rockchip RK3288
|
||||
|
||||
Optional Properties:
|
||||
* clocks: from common clock binding: if ciu_drive and ciu_sample are
|
||||
specified in clock-names, should contain handles to these clocks.
|
||||
|
||||
* clock-names: Apart from the clock-names described in synopsys-dw-mshc.txt
|
||||
two more clocks "ciu-drive" and "ciu-sample" are supported. They are used
|
||||
to control the clock phases, "ciu-sample" is required for tuning high-
|
||||
speed modes.
|
||||
|
||||
* rockchip,default-sample-phase: The default phase to set ciu_sample at
|
||||
probing, low speeds or in case where all phases work at tuning time.
|
||||
If not specified 0 deg will be used.
|
||||
|
||||
Example:
|
||||
|
||||
rkdwmmc0@12200000 {
|
||||
|
@ -75,6 +75,12 @@ Optional properties:
|
||||
* vmmc-supply: The phandle to the regulator to use for vmmc. If this is
|
||||
specified we'll defer probe until we can find this regulator.
|
||||
|
||||
* dmas: List of DMA specifiers with the controller specific format as described
|
||||
in the generic DMA client binding. Refer to dma.txt for details.
|
||||
|
||||
* dma-names: request names for generic DMA client binding. Must be "rx-tx".
|
||||
Refer to dma.txt for details.
|
||||
|
||||
Aliases:
|
||||
|
||||
- All the MSHC controller nodes should be represented in the aliases node using
|
||||
@ -95,6 +101,8 @@ board specific portions as listed below.
|
||||
#size-cells = <0>;
|
||||
};
|
||||
|
||||
[board specific internal DMA resources]
|
||||
|
||||
dwmmc0@12200000 {
|
||||
clock-frequency = <400000000>;
|
||||
clock-freq-min-max = <400000 200000000>;
|
||||
@ -107,3 +115,20 @@ board specific portions as listed below.
|
||||
cap-mmc-highspeed;
|
||||
cap-sd-highspeed;
|
||||
};
|
||||
|
||||
[board specific generic DMA request binding]
|
||||
|
||||
dwmmc0@12200000 {
|
||||
clock-frequency = <400000000>;
|
||||
clock-freq-min-max = <400000 200000000>;
|
||||
num-slots = <1>;
|
||||
broken-cd;
|
||||
fifo-depth = <0x80>;
|
||||
card-detect-delay = <200>;
|
||||
vmmc-supply = <&buck8>;
|
||||
bus-width = <8>;
|
||||
cap-mmc-highspeed;
|
||||
cap-sd-highspeed;
|
||||
dmas = <&pdma 12>;
|
||||
dma-names = "rx-tx";
|
||||
};
|
||||
|
@ -72,13 +72,3 @@ Note on raw_rpmb_size_mult:
|
||||
"raw_rpmb_size_mult" is a mutliple of 128kB block.
|
||||
RPMB size in byte is calculated by using the following equation:
|
||||
RPMB partition size = 128kB x raw_rpmb_size_mult
|
||||
|
||||
SD/MMC/SDIO Clock Gating Attribute
|
||||
==================================
|
||||
|
||||
Read and write access is provided to following attribute.
|
||||
This attribute appears only if CONFIG_MMC_CLKGATE is enabled.
|
||||
|
||||
clkgate_delay Tune the clock gating delay with desired value in milliseconds.
|
||||
|
||||
echo <desired delay> > /sys/class/mmc_host/mmcX/clkgate_delay
|
||||
|
@ -89,7 +89,6 @@ CONFIG_MMC=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
CONFIG_EXT3_FS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -95,7 +95,6 @@ CONFIG_MMC=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
CONFIG_EXT3_FS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -96,7 +96,6 @@ CONFIG_MMC=y
|
||||
CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_PLTFM=y
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
# CONFIG_IOMMU_SUPPORT is not set
|
||||
CONFIG_EXT3_FS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
|
@ -90,7 +90,7 @@
|
||||
regulators {
|
||||
vccio_sd: LDO_REG4 {
|
||||
regulator-name = "vccio_sd";
|
||||
regulator-min-microvolt = <3300000>;
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <3300000>;
|
||||
regulator-state-mem {
|
||||
regulator-off-in-suspend;
|
||||
@ -116,7 +116,12 @@
|
||||
cap-sd-highspeed;
|
||||
card-detect-delay = <200>;
|
||||
cd-gpios = <&gpio7 5 GPIO_ACTIVE_LOW>;
|
||||
rockchip,default-sample-phase = <90>;
|
||||
num-slots = <1>;
|
||||
sd-uhs-sdr12;
|
||||
sd-uhs-sdr25;
|
||||
sd-uhs-sdr50;
|
||||
sd-uhs-sdr104;
|
||||
vmmc-supply = <&vcc33_sd>;
|
||||
vqmmc-supply = <&vccio_sd>;
|
||||
};
|
||||
|
@ -149,7 +149,9 @@
|
||||
broken-cd;
|
||||
bus-width = <8>;
|
||||
cap-mmc-highspeed;
|
||||
rockchip,default-sample-phase = <158>;
|
||||
disable-wp;
|
||||
mmc-hs200-1_8v;
|
||||
mmc-pwrseq = <&emmc_pwrseq>;
|
||||
non-removable;
|
||||
num-slots = <1>;
|
||||
@ -355,6 +357,10 @@
|
||||
num-slots = <1>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>;
|
||||
sd-uhs-sdr12;
|
||||
sd-uhs-sdr25;
|
||||
sd-uhs-sdr50;
|
||||
sd-uhs-sdr104;
|
||||
vmmc-supply = <&vcc33_sys>;
|
||||
vqmmc-supply = <&vcc18_wl>;
|
||||
};
|
||||
|
@ -222,8 +222,9 @@
|
||||
sdmmc: dwmmc@ff0c0000 {
|
||||
compatible = "rockchip,rk3288-dw-mshc";
|
||||
clock-freq-min-max = <400000 150000000>;
|
||||
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
|
||||
clock-names = "biu", "ciu";
|
||||
clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
|
||||
<&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
|
||||
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
|
||||
fifo-depth = <0x100>;
|
||||
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0xff0c0000 0x4000>;
|
||||
@ -233,8 +234,9 @@
|
||||
sdio0: dwmmc@ff0d0000 {
|
||||
compatible = "rockchip,rk3288-dw-mshc";
|
||||
clock-freq-min-max = <400000 150000000>;
|
||||
clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>;
|
||||
clock-names = "biu", "ciu";
|
||||
clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
|
||||
<&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
|
||||
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
|
||||
fifo-depth = <0x100>;
|
||||
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0xff0d0000 0x4000>;
|
||||
@ -244,8 +246,9 @@
|
||||
sdio1: dwmmc@ff0e0000 {
|
||||
compatible = "rockchip,rk3288-dw-mshc";
|
||||
clock-freq-min-max = <400000 150000000>;
|
||||
clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>;
|
||||
clock-names = "biu", "ciu";
|
||||
clocks = <&cru HCLK_SDIO1>, <&cru SCLK_SDIO1>,
|
||||
<&cru SCLK_SDIO1_DRV>, <&cru SCLK_SDIO1_SAMPLE>;
|
||||
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
|
||||
fifo-depth = <0x100>;
|
||||
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0xff0e0000 0x4000>;
|
||||
@ -255,8 +258,9 @@
|
||||
emmc: dwmmc@ff0f0000 {
|
||||
compatible = "rockchip,rk3288-dw-mshc";
|
||||
clock-freq-min-max = <400000 150000000>;
|
||||
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
|
||||
clock-names = "biu", "ciu";
|
||||
clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
|
||||
<&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
|
||||
clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
|
||||
fifo-depth = <0x100>;
|
||||
interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
|
||||
reg = <0xff0f0000 0x4000>;
|
||||
|
@ -166,7 +166,6 @@ CONFIG_MMC_SDHCI=y
|
||||
CONFIG_MMC_SDHCI_S3C=y
|
||||
CONFIG_MMC_SDHCI_S3C_DMA=y
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
CONFIG_MMC_DW_EXYNOS=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_MAX77686=y
|
||||
|
@ -69,7 +69,6 @@ CONFIG_NOP_USB_XCEIV=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
CONFIG_MMC_DW_PLTFM=y
|
||||
CONFIG_RTC_DRV_PL031=y
|
||||
CONFIG_DMADEVICES=y
|
||||
|
@ -119,7 +119,6 @@ CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
CONFIG_MMC=y
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
CONFIG_NEW_LEDS=y
|
||||
CONFIG_LEDS_CLASS=y
|
||||
CONFIG_LEDS_PCA9532=y
|
||||
|
@ -257,7 +257,6 @@ CONFIG_MMC=y
|
||||
CONFIG_MMC_BLOCK_MINORS=16
|
||||
CONFIG_MMC_TEST=m
|
||||
CONFIG_MMC_DW=y
|
||||
CONFIG_MMC_DW_IDMAC=y
|
||||
CONFIG_NEW_LEDS=y
|
||||
CONFIG_LEDS_CLASS=y
|
||||
CONFIG_RTC_CLASS=y
|
||||
|
@ -45,8 +45,8 @@ static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
|
||||
#define PSECS_PER_SEC 1000000000000LL
|
||||
|
||||
/*
|
||||
* Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
|
||||
* simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
|
||||
* Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
|
||||
* simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
|
||||
*/
|
||||
#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
|
||||
|
||||
@ -69,7 +69,7 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
|
||||
|
||||
delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
|
||||
delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
|
||||
degrees += delay_num * factor / 10000;
|
||||
degrees += DIV_ROUND_CLOSEST(delay_num * factor, 10000);
|
||||
}
|
||||
|
||||
return degrees % 360;
|
||||
@ -82,25 +82,41 @@ static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
|
||||
u8 nineties, remainder;
|
||||
u8 delay_num;
|
||||
u32 raw_value;
|
||||
u64 delay;
|
||||
|
||||
/* allow 22 to be 22.5 */
|
||||
degrees++;
|
||||
/* floor to 22.5 increment */
|
||||
degrees -= ((degrees) * 10 % 225) / 10;
|
||||
u32 delay;
|
||||
|
||||
nineties = degrees / 90;
|
||||
/* 22.5 multiples */
|
||||
remainder = (degrees % 90) / 22;
|
||||
remainder = (degrees % 90);
|
||||
|
||||
delay = PSECS_PER_SEC;
|
||||
do_div(delay, rate);
|
||||
/* / 360 / 22.5 */
|
||||
do_div(delay, 16);
|
||||
do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
|
||||
/*
|
||||
* Due to the inexact nature of the "fine" delay, we might
|
||||
* actually go non-monotonic. We don't go _too_ monotonic
|
||||
* though, so we should be OK. Here are options of how we may
|
||||
* work:
|
||||
*
|
||||
* Ideally we end up with:
|
||||
* 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0
|
||||
*
|
||||
* On one extreme (if delay is actually 44ps):
|
||||
* .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0
|
||||
* The other (if delay is actually 77ps):
|
||||
* 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
|
||||
*
|
||||
* It's possible we might make a delay that is up to 25
|
||||
* degrees off from what we think we're making. That's OK
|
||||
* though because we should be REALLY far from any bad range.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Convert to delay; do a little extra work to make sure we
|
||||
* don't overflow 32-bit / 64-bit numbers.
|
||||
*/
|
||||
delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
|
||||
delay *= remainder;
|
||||
delay_num = (u8) min(delay, 255ULL);
|
||||
delay = DIV_ROUND_CLOSEST(delay,
|
||||
(rate / 1000) * 36 *
|
||||
(ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
|
||||
|
||||
delay_num = (u8) min_t(u32, delay, 255);
|
||||
|
||||
raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
|
||||
raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
|
||||
|
@ -387,6 +387,24 @@ out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int mmc_blk_ioctl_copy_to_user(struct mmc_ioc_cmd __user *ic_ptr,
|
||||
struct mmc_blk_ioc_data *idata)
|
||||
{
|
||||
struct mmc_ioc_cmd *ic = &idata->ic;
|
||||
|
||||
if (copy_to_user(&(ic_ptr->response), ic->response,
|
||||
sizeof(ic->response)))
|
||||
return -EFAULT;
|
||||
|
||||
if (!idata->ic.write_flag) {
|
||||
if (copy_to_user((void __user *)(unsigned long)ic->data_ptr,
|
||||
idata->buf, idata->buf_bytes))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
|
||||
u32 retries_max)
|
||||
{
|
||||
@ -447,12 +465,9 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
struct mmc_ioc_cmd __user *ic_ptr)
|
||||
static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
|
||||
struct mmc_blk_ioc_data *idata)
|
||||
{
|
||||
struct mmc_blk_ioc_data *idata;
|
||||
struct mmc_blk_data *md;
|
||||
struct mmc_card *card;
|
||||
struct mmc_command cmd = {0};
|
||||
struct mmc_data data = {0};
|
||||
struct mmc_request mrq = {NULL};
|
||||
@ -461,33 +476,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
int is_rpmb = false;
|
||||
u32 status = 0;
|
||||
|
||||
/*
|
||||
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
||||
* whole block device, not on a partition. This prevents overspray
|
||||
* between sibling partitions.
|
||||
*/
|
||||
if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
|
||||
return -EPERM;
|
||||
|
||||
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
|
||||
if (IS_ERR(idata))
|
||||
return PTR_ERR(idata);
|
||||
|
||||
md = mmc_blk_get(bdev->bd_disk);
|
||||
if (!md) {
|
||||
err = -EINVAL;
|
||||
goto cmd_err;
|
||||
}
|
||||
if (!card || !md || !idata)
|
||||
return -EINVAL;
|
||||
|
||||
if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
|
||||
is_rpmb = true;
|
||||
|
||||
card = md->queue.card;
|
||||
if (IS_ERR(card)) {
|
||||
err = PTR_ERR(card);
|
||||
goto cmd_done;
|
||||
}
|
||||
|
||||
cmd.opcode = idata->ic.opcode;
|
||||
cmd.arg = idata->ic.arg;
|
||||
cmd.flags = idata->ic.flags;
|
||||
@ -530,23 +524,21 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
|
||||
mrq.cmd = &cmd;
|
||||
|
||||
mmc_get_card(card);
|
||||
|
||||
err = mmc_blk_part_switch(card, md);
|
||||
if (err)
|
||||
goto cmd_rel_host;
|
||||
return err;
|
||||
|
||||
if (idata->ic.is_acmd) {
|
||||
err = mmc_app_cmd(card->host, card);
|
||||
if (err)
|
||||
goto cmd_rel_host;
|
||||
return err;
|
||||
}
|
||||
|
||||
if (is_rpmb) {
|
||||
err = mmc_set_blockcount(card, data.blocks,
|
||||
idata->ic.write_flag & (1 << 31));
|
||||
if (err)
|
||||
goto cmd_rel_host;
|
||||
return err;
|
||||
}
|
||||
|
||||
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
|
||||
@ -557,7 +549,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
pr_err("%s: ioctl_do_sanitize() failed. err = %d",
|
||||
__func__, err);
|
||||
|
||||
goto cmd_rel_host;
|
||||
return err;
|
||||
}
|
||||
|
||||
mmc_wait_for_req(card->host, &mrq);
|
||||
@ -565,14 +557,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
if (cmd.error) {
|
||||
dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
|
||||
__func__, cmd.error);
|
||||
err = cmd.error;
|
||||
goto cmd_rel_host;
|
||||
return cmd.error;
|
||||
}
|
||||
if (data.error) {
|
||||
dev_err(mmc_dev(card->host), "%s: data error %d\n",
|
||||
__func__, data.error);
|
||||
err = data.error;
|
||||
goto cmd_rel_host;
|
||||
return data.error;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -582,18 +572,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
if (idata->ic.postsleep_min_us)
|
||||
usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
|
||||
|
||||
if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
|
||||
err = -EFAULT;
|
||||
goto cmd_rel_host;
|
||||
}
|
||||
|
||||
if (!idata->ic.write_flag) {
|
||||
if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
|
||||
idata->buf, idata->buf_bytes)) {
|
||||
err = -EFAULT;
|
||||
goto cmd_rel_host;
|
||||
}
|
||||
}
|
||||
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
|
||||
|
||||
if (is_rpmb) {
|
||||
/*
|
||||
@ -607,24 +586,132 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
__func__, status, err);
|
||||
}
|
||||
|
||||
cmd_rel_host:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
|
||||
struct mmc_ioc_cmd __user *ic_ptr)
|
||||
{
|
||||
struct mmc_blk_ioc_data *idata;
|
||||
struct mmc_blk_data *md;
|
||||
struct mmc_card *card;
|
||||
int err = 0, ioc_err = 0;
|
||||
|
||||
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
|
||||
if (IS_ERR(idata))
|
||||
return PTR_ERR(idata);
|
||||
|
||||
md = mmc_blk_get(bdev->bd_disk);
|
||||
if (!md) {
|
||||
err = -EINVAL;
|
||||
goto cmd_err;
|
||||
}
|
||||
|
||||
card = md->queue.card;
|
||||
if (IS_ERR(card)) {
|
||||
err = PTR_ERR(card);
|
||||
goto cmd_done;
|
||||
}
|
||||
|
||||
mmc_get_card(card);
|
||||
|
||||
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
|
||||
|
||||
mmc_put_card(card);
|
||||
|
||||
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
|
||||
|
||||
cmd_done:
|
||||
mmc_blk_put(md);
|
||||
cmd_err:
|
||||
kfree(idata->buf);
|
||||
kfree(idata);
|
||||
return err;
|
||||
return ioc_err ? ioc_err : err;
|
||||
}
|
||||
|
||||
static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
|
||||
struct mmc_ioc_multi_cmd __user *user)
|
||||
{
|
||||
struct mmc_blk_ioc_data **idata = NULL;
|
||||
struct mmc_ioc_cmd __user *cmds = user->cmds;
|
||||
struct mmc_card *card;
|
||||
struct mmc_blk_data *md;
|
||||
int i, err = 0, ioc_err = 0;
|
||||
__u64 num_of_cmds;
|
||||
|
||||
if (copy_from_user(&num_of_cmds, &user->num_of_cmds,
|
||||
sizeof(num_of_cmds)))
|
||||
return -EFAULT;
|
||||
|
||||
if (num_of_cmds > MMC_IOC_MAX_CMDS)
|
||||
return -EINVAL;
|
||||
|
||||
idata = kcalloc(num_of_cmds, sizeof(*idata), GFP_KERNEL);
|
||||
if (!idata)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < num_of_cmds; i++) {
|
||||
idata[i] = mmc_blk_ioctl_copy_from_user(&cmds[i]);
|
||||
if (IS_ERR(idata[i])) {
|
||||
err = PTR_ERR(idata[i]);
|
||||
num_of_cmds = i;
|
||||
goto cmd_err;
|
||||
}
|
||||
}
|
||||
|
||||
md = mmc_blk_get(bdev->bd_disk);
|
||||
if (!md)
|
||||
goto cmd_err;
|
||||
|
||||
card = md->queue.card;
|
||||
if (IS_ERR(card)) {
|
||||
err = PTR_ERR(card);
|
||||
goto cmd_done;
|
||||
}
|
||||
|
||||
mmc_get_card(card);
|
||||
|
||||
for (i = 0; i < num_of_cmds && !ioc_err; i++)
|
||||
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
|
||||
|
||||
mmc_put_card(card);
|
||||
|
||||
/* copy to user if data and response */
|
||||
for (i = 0; i < num_of_cmds && !err; i++)
|
||||
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
|
||||
|
||||
cmd_done:
|
||||
mmc_blk_put(md);
|
||||
cmd_err:
|
||||
for (i = 0; i < num_of_cmds; i++) {
|
||||
kfree(idata[i]->buf);
|
||||
kfree(idata[i]);
|
||||
}
|
||||
kfree(idata);
|
||||
return ioc_err ? ioc_err : err;
|
||||
}
|
||||
|
||||
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret = -EINVAL;
|
||||
if (cmd == MMC_IOC_CMD)
|
||||
ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
|
||||
return ret;
|
||||
/*
|
||||
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
|
||||
* whole block device, not on a partition. This prevents overspray
|
||||
* between sibling partitions.
|
||||
*/
|
||||
if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
|
||||
return -EPERM;
|
||||
|
||||
switch (cmd) {
|
||||
case MMC_IOC_CMD:
|
||||
return mmc_blk_ioctl_cmd(bdev,
|
||||
(struct mmc_ioc_cmd __user *)arg);
|
||||
case MMC_IOC_MULTI_CMD:
|
||||
return mmc_blk_ioctl_multi_cmd(bdev,
|
||||
(struct mmc_ioc_multi_cmd __user *)arg);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
|
@ -1,13 +1,3 @@
|
||||
#
|
||||
# MMC core configuration
|
||||
#
|
||||
|
||||
config MMC_CLKGATE
|
||||
bool "MMC host clock gating"
|
||||
help
|
||||
This will attempt to aggressively gate the clock to the MMC card.
|
||||
This is done to save power due to gating off the logic and bus
|
||||
noise when the MMC card is not in use. Your host driver has to
|
||||
support handling this in order for it to be of any use.
|
||||
|
||||
If unsure, say N.
|
||||
|
@ -187,8 +187,6 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
|
||||
|
||||
if (mrq->done)
|
||||
mrq->done(mrq);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,6 +204,23 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* For sdio rw commands we must wait for card busy otherwise some
|
||||
* sdio devices won't work properly.
|
||||
*/
|
||||
if (mmc_is_io_op(mrq->cmd->opcode) && host->ops->card_busy) {
|
||||
int tries = 500; /* Wait aprox 500ms at maximum */
|
||||
|
||||
while (host->ops->card_busy(host) && --tries)
|
||||
mmc_delay(1);
|
||||
|
||||
if (tries == 0) {
|
||||
mrq->cmd->error = -EBUSY;
|
||||
mmc_request_done(host, mrq);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
host->ops->request(host, mrq);
|
||||
}
|
||||
|
||||
@ -275,7 +290,6 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
|
||||
mrq->stop->mrq = mrq;
|
||||
}
|
||||
}
|
||||
mmc_host_clk_hold(host);
|
||||
led_trigger_event(host->led, LED_FULL);
|
||||
__mmc_start_request(host, mrq);
|
||||
|
||||
@ -525,11 +539,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
|
||||
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
|
||||
bool is_first_req)
|
||||
{
|
||||
if (host->ops->pre_req) {
|
||||
mmc_host_clk_hold(host);
|
||||
if (host->ops->pre_req)
|
||||
host->ops->pre_req(host, mrq, is_first_req);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -544,11 +555,8 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
|
||||
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
|
||||
int err)
|
||||
{
|
||||
if (host->ops->post_req) {
|
||||
mmc_host_clk_hold(host);
|
||||
if (host->ops->post_req)
|
||||
host->ops->post_req(host, mrq, err);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -833,9 +841,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
|
||||
unsigned int timeout_us, limit_us;
|
||||
|
||||
timeout_us = data->timeout_ns / 1000;
|
||||
if (mmc_host_clk_rate(card->host))
|
||||
if (card->host->ios.clock)
|
||||
timeout_us += data->timeout_clks * 1000 /
|
||||
(mmc_host_clk_rate(card->host) / 1000);
|
||||
(card->host->ios.clock / 1000);
|
||||
|
||||
if (data->flags & MMC_DATA_WRITE)
|
||||
/*
|
||||
@ -1033,8 +1041,6 @@ static inline void mmc_set_ios(struct mmc_host *host)
|
||||
ios->power_mode, ios->chip_select, ios->vdd,
|
||||
ios->bus_width, ios->timing);
|
||||
|
||||
if (ios->clock > 0)
|
||||
mmc_set_ungated(host);
|
||||
host->ops->set_ios(host, ios);
|
||||
}
|
||||
|
||||
@ -1043,17 +1049,15 @@ static inline void mmc_set_ios(struct mmc_host *host)
|
||||
*/
|
||||
void mmc_set_chip_select(struct mmc_host *host, int mode)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.chip_select = mode;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sets the host clock to the highest possible frequency that
|
||||
* is below "hz".
|
||||
*/
|
||||
static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
{
|
||||
WARN_ON(hz && hz < host->f_min);
|
||||
|
||||
@ -1064,68 +1068,6 @@ static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
mmc_set_ios(host);
|
||||
}
|
||||
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
__mmc_set_clock(host, hz);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
/*
|
||||
* This gates the clock by setting it to 0 Hz.
|
||||
*/
|
||||
void mmc_gate_clock(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clk_old = host->ios.clock;
|
||||
host->ios.clock = 0;
|
||||
host->clk_gated = true;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mmc_set_ios(host);
|
||||
}
|
||||
|
||||
/*
|
||||
* This restores the clock from gating by using the cached
|
||||
* clock value.
|
||||
*/
|
||||
void mmc_ungate_clock(struct mmc_host *host)
|
||||
{
|
||||
/*
|
||||
* We should previously have gated the clock, so the clock shall
|
||||
* be 0 here! The clock may however be 0 during initialization,
|
||||
* when some request operations are performed before setting
|
||||
* the frequency. When ungate is requested in that situation
|
||||
* we just ignore the call.
|
||||
*/
|
||||
if (host->clk_old) {
|
||||
BUG_ON(host->ios.clock);
|
||||
/* This call will also set host->clk_gated to false */
|
||||
__mmc_set_clock(host, host->clk_old);
|
||||
}
|
||||
}
|
||||
|
||||
void mmc_set_ungated(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* We've been given a new frequency while the clock is gated,
|
||||
* so make sure we regard this as ungating it.
|
||||
*/
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clk_gated = false;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
#else
|
||||
void mmc_set_ungated(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int mmc_execute_tuning(struct mmc_card *card)
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
@ -1140,9 +1082,7 @@ int mmc_execute_tuning(struct mmc_card *card)
|
||||
else
|
||||
opcode = MMC_SEND_TUNING_BLOCK;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
err = host->ops->execute_tuning(host, opcode);
|
||||
mmc_host_clk_release(host);
|
||||
|
||||
if (err)
|
||||
pr_err("%s: tuning execution failed\n", mmc_hostname(host));
|
||||
@ -1157,10 +1097,8 @@ int mmc_execute_tuning(struct mmc_card *card)
|
||||
*/
|
||||
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.bus_mode = mode;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1168,10 +1106,8 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
|
||||
*/
|
||||
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.bus_width = width;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1340,6 +1276,40 @@ struct device_node *mmc_of_find_child_device(struct mmc_host *host,
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
|
||||
/**
|
||||
* mmc_ocrbitnum_to_vdd - Convert a OCR bit number to its voltage
|
||||
* @vdd_bit: OCR bit number
|
||||
* @min_uV: minimum voltage value (mV)
|
||||
* @max_uV: maximum voltage value (mV)
|
||||
*
|
||||
* This function returns the voltage range according to the provided OCR
|
||||
* bit number. If conversion is not possible a negative errno value returned.
|
||||
*/
|
||||
static int mmc_ocrbitnum_to_vdd(int vdd_bit, int *min_uV, int *max_uV)
|
||||
{
|
||||
int tmp;
|
||||
|
||||
if (!vdd_bit)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* REVISIT mmc_vddrange_to_ocrmask() may have set some
|
||||
* bits this regulator doesn't quite support ... don't
|
||||
* be too picky, most cards and regulators are OK with
|
||||
* a 0.1V range goof (it's a small error percentage).
|
||||
*/
|
||||
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
|
||||
if (tmp == 0) {
|
||||
*min_uV = 1650 * 1000;
|
||||
*max_uV = 1950 * 1000;
|
||||
} else {
|
||||
*min_uV = 1900 * 1000 + tmp * 100 * 1000;
|
||||
*max_uV = *min_uV + 100 * 1000;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_regulator_get_ocrmask - return mask of supported voltages
|
||||
* @supply: regulator to use
|
||||
@ -1403,22 +1373,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
|
||||
int min_uV, max_uV;
|
||||
|
||||
if (vdd_bit) {
|
||||
int tmp;
|
||||
|
||||
/*
|
||||
* REVISIT mmc_vddrange_to_ocrmask() may have set some
|
||||
* bits this regulator doesn't quite support ... don't
|
||||
* be too picky, most cards and regulators are OK with
|
||||
* a 0.1V range goof (it's a small error percentage).
|
||||
*/
|
||||
tmp = vdd_bit - ilog2(MMC_VDD_165_195);
|
||||
if (tmp == 0) {
|
||||
min_uV = 1650 * 1000;
|
||||
max_uV = 1950 * 1000;
|
||||
} else {
|
||||
min_uV = 1900 * 1000 + tmp * 100 * 1000;
|
||||
max_uV = min_uV + 100 * 1000;
|
||||
}
|
||||
mmc_ocrbitnum_to_vdd(vdd_bit, &min_uV, &max_uV);
|
||||
|
||||
result = regulator_set_voltage(supply, min_uV, max_uV);
|
||||
if (result == 0 && !mmc->regulator_enabled) {
|
||||
@ -1439,6 +1394,84 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);
|
||||
|
||||
static int mmc_regulator_set_voltage_if_supported(struct regulator *regulator,
|
||||
int min_uV, int target_uV,
|
||||
int max_uV)
|
||||
{
|
||||
/*
|
||||
* Check if supported first to avoid errors since we may try several
|
||||
* signal levels during power up and don't want to show errors.
|
||||
*/
|
||||
if (!regulator_is_supported_voltage(regulator, min_uV, max_uV))
|
||||
return -EINVAL;
|
||||
|
||||
return regulator_set_voltage_triplet(regulator, min_uV, target_uV,
|
||||
max_uV);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_regulator_set_vqmmc - Set VQMMC as per the ios
|
||||
*
|
||||
* For 3.3V signaling, we try to match VQMMC to VMMC as closely as possible.
|
||||
* That will match the behavior of old boards where VQMMC and VMMC were supplied
|
||||
* by the same supply. The Bus Operating conditions for 3.3V signaling in the
|
||||
* SD card spec also define VQMMC in terms of VMMC.
|
||||
* If this is not possible we'll try the full 2.7-3.6V of the spec.
|
||||
*
|
||||
* For 1.2V and 1.8V signaling we'll try to get as close as possible to the
|
||||
* requested voltage. This is definitely a good idea for UHS where there's a
|
||||
* separate regulator on the card that's trying to make 1.8V and it's best if
|
||||
* we match.
|
||||
*
|
||||
* This function is expected to be used by a controller's
|
||||
* start_signal_voltage_switch() function.
|
||||
*/
|
||||
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
{
|
||||
struct device *dev = mmc_dev(mmc);
|
||||
int ret, volt, min_uV, max_uV;
|
||||
|
||||
/* If no vqmmc supply then we can't change the voltage */
|
||||
if (IS_ERR(mmc->supply.vqmmc))
|
||||
return -EINVAL;
|
||||
|
||||
switch (ios->signal_voltage) {
|
||||
case MMC_SIGNAL_VOLTAGE_120:
|
||||
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
|
||||
1100000, 1200000, 1300000);
|
||||
case MMC_SIGNAL_VOLTAGE_180:
|
||||
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
|
||||
1700000, 1800000, 1950000);
|
||||
case MMC_SIGNAL_VOLTAGE_330:
|
||||
ret = mmc_ocrbitnum_to_vdd(mmc->ios.vdd, &volt, &max_uV);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
dev_dbg(dev, "%s: found vmmc voltage range of %d-%duV\n",
|
||||
__func__, volt, max_uV);
|
||||
|
||||
min_uV = max(volt - 300000, 2700000);
|
||||
max_uV = min(max_uV + 200000, 3600000);
|
||||
|
||||
/*
|
||||
* Due to a limitation in the current implementation of
|
||||
* regulator_set_voltage_triplet() which is taking the lowest
|
||||
* voltage possible if below the target, search for a suitable
|
||||
* voltage in two steps and try to stay close to vmmc
|
||||
* with a 0.3V tolerance at first.
|
||||
*/
|
||||
if (!mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
|
||||
min_uV, volt, max_uV))
|
||||
return 0;
|
||||
|
||||
return mmc_regulator_set_voltage_if_supported(mmc->supply.vqmmc,
|
||||
2700000, volt, 3600000);
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc);
|
||||
|
||||
#endif /* CONFIG_REGULATOR */
|
||||
|
||||
int mmc_regulator_get_supply(struct mmc_host *mmc)
|
||||
@ -1515,11 +1548,8 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
|
||||
int old_signal_voltage = host->ios.signal_voltage;
|
||||
|
||||
host->ios.signal_voltage = signal_voltage;
|
||||
if (host->ops->start_signal_voltage_switch) {
|
||||
mmc_host_clk_hold(host);
|
||||
if (host->ops->start_signal_voltage_switch)
|
||||
err = host->ops->start_signal_voltage_switch(host, &host->ios);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
if (err)
|
||||
host->ios.signal_voltage = old_signal_voltage;
|
||||
@ -1553,20 +1583,17 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
|
||||
pr_warn("%s: cannot verify signal voltage switch\n",
|
||||
mmc_hostname(host));
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
cmd.opcode = SD_SWITCH_VOLTAGE;
|
||||
cmd.arg = 0;
|
||||
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
|
||||
|
||||
err = mmc_wait_for_cmd(host, &cmd, 0);
|
||||
if (err)
|
||||
goto err_command;
|
||||
return err;
|
||||
|
||||
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
|
||||
return -EIO;
|
||||
|
||||
if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
|
||||
err = -EIO;
|
||||
goto err_command;
|
||||
}
|
||||
/*
|
||||
* The card should drive cmd and dat[0:3] low immediately
|
||||
* after the response of cmd11, but wait 1 ms to be sure
|
||||
@ -1615,9 +1642,6 @@ power_cycle:
|
||||
mmc_power_cycle(host, ocr);
|
||||
}
|
||||
|
||||
err_command:
|
||||
mmc_host_clk_release(host);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1626,10 +1650,8 @@ err_command:
|
||||
*/
|
||||
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.timing = timing;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1637,10 +1659,8 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
|
||||
*/
|
||||
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
|
||||
{
|
||||
mmc_host_clk_hold(host);
|
||||
host->ios.drv_type = drv_type;
|
||||
mmc_set_ios(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
|
||||
@ -1648,7 +1668,6 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
|
||||
{
|
||||
struct mmc_host *host = card->host;
|
||||
int host_drv_type = SD_DRIVER_TYPE_B;
|
||||
int drive_strength;
|
||||
|
||||
*drv_type = 0;
|
||||
|
||||
@ -1671,14 +1690,10 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
|
||||
* information and let the hardware specific code
|
||||
* return what is possible given the options
|
||||
*/
|
||||
mmc_host_clk_hold(host);
|
||||
drive_strength = host->ops->select_drive_strength(card, max_dtr,
|
||||
host_drv_type,
|
||||
card_drv_type,
|
||||
drv_type);
|
||||
mmc_host_clk_release(host);
|
||||
|
||||
return drive_strength;
|
||||
return host->ops->select_drive_strength(card, max_dtr,
|
||||
host_drv_type,
|
||||
card_drv_type,
|
||||
drv_type);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1697,8 +1712,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
|
||||
if (host->ios.power_mode == MMC_POWER_ON)
|
||||
return;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
mmc_pwrseq_pre_power_on(host);
|
||||
|
||||
host->ios.vdd = fls(ocr) - 1;
|
||||
@ -1732,8 +1745,6 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
|
||||
* time required to reach a stable voltage.
|
||||
*/
|
||||
mmc_delay(10);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
void mmc_power_off(struct mmc_host *host)
|
||||
@ -1741,8 +1752,6 @@ void mmc_power_off(struct mmc_host *host)
|
||||
if (host->ios.power_mode == MMC_POWER_OFF)
|
||||
return;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
|
||||
mmc_pwrseq_power_off(host);
|
||||
|
||||
host->ios.clock = 0;
|
||||
@ -1758,8 +1767,6 @@ void mmc_power_off(struct mmc_host *host)
|
||||
* can be successfully turned on again.
|
||||
*/
|
||||
mmc_delay(1);
|
||||
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
|
||||
@ -1975,7 +1982,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
|
||||
*/
|
||||
timeout_clks <<= 1;
|
||||
timeout_us += (timeout_clks * 1000) /
|
||||
(mmc_host_clk_rate(card->host) / 1000);
|
||||
(card->host->ios.clock / 1000);
|
||||
|
||||
erase_timeout = timeout_us / 1000;
|
||||
|
||||
@ -2423,9 +2430,7 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
|
||||
{
|
||||
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
|
||||
return;
|
||||
mmc_host_clk_hold(host);
|
||||
host->ops->hw_reset(host);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
int mmc_hw_reset(struct mmc_host *host)
|
||||
@ -2633,10 +2638,14 @@ void mmc_start_host(struct mmc_host *host)
|
||||
host->f_init = max(freqs[0], host->f_min);
|
||||
host->rescan_disable = 0;
|
||||
host->ios.power_mode = MMC_POWER_UNDEFINED;
|
||||
|
||||
mmc_claim_host(host);
|
||||
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
|
||||
mmc_power_off(host);
|
||||
else
|
||||
mmc_power_up(host, host->ocr_avail);
|
||||
mmc_release_host(host);
|
||||
|
||||
mmc_gpiod_request_cd_irq(host);
|
||||
_mmc_detect_change(host, 0, false);
|
||||
}
|
||||
@ -2674,7 +2683,9 @@ void mmc_stop_host(struct mmc_host *host)
|
||||
|
||||
BUG_ON(host->card);
|
||||
|
||||
mmc_claim_host(host);
|
||||
mmc_power_off(host);
|
||||
mmc_release_host(host);
|
||||
}
|
||||
|
||||
int mmc_power_save_host(struct mmc_host *host)
|
||||
|
@ -40,9 +40,6 @@ void mmc_init_erase(struct mmc_card *card);
|
||||
|
||||
void mmc_set_chip_select(struct mmc_host *host, int mode);
|
||||
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
|
||||
void mmc_gate_clock(struct mmc_host *host);
|
||||
void mmc_ungate_clock(struct mmc_host *host);
|
||||
void mmc_set_ungated(struct mmc_host *host);
|
||||
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
|
||||
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
|
||||
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
|
||||
|
@ -126,6 +126,12 @@ static int mmc_ios_show(struct seq_file *s, void *data)
|
||||
case MMC_TIMING_SD_HS:
|
||||
str = "sd high-speed";
|
||||
break;
|
||||
case MMC_TIMING_UHS_SDR12:
|
||||
str = "sd uhs SDR12";
|
||||
break;
|
||||
case MMC_TIMING_UHS_SDR25:
|
||||
str = "sd uhs SDR25";
|
||||
break;
|
||||
case MMC_TIMING_UHS_SDR50:
|
||||
str = "sd uhs SDR50";
|
||||
break;
|
||||
@ -166,6 +172,25 @@ static int mmc_ios_show(struct seq_file *s, void *data)
|
||||
}
|
||||
seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str);
|
||||
|
||||
switch (ios->drv_type) {
|
||||
case MMC_SET_DRIVER_TYPE_A:
|
||||
str = "driver type A";
|
||||
break;
|
||||
case MMC_SET_DRIVER_TYPE_B:
|
||||
str = "driver type B";
|
||||
break;
|
||||
case MMC_SET_DRIVER_TYPE_C:
|
||||
str = "driver type C";
|
||||
break;
|
||||
case MMC_SET_DRIVER_TYPE_D:
|
||||
str = "driver type D";
|
||||
break;
|
||||
default:
|
||||
str = "invalid";
|
||||
break;
|
||||
}
|
||||
seq_printf(s, "driver type:\t%u (%s)\n", ios->drv_type, str);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -230,11 +255,6 @@ void mmc_add_host_debugfs(struct mmc_host *host)
|
||||
&mmc_clock_fops))
|
||||
goto err_node;
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
|
||||
root, &host->clk_delay))
|
||||
goto err_node;
|
||||
#endif
|
||||
#ifdef CONFIG_FAIL_MMC_REQUEST
|
||||
if (fail_request)
|
||||
setup_fault_attr(&fail_default_attr, fail_request);
|
||||
|
@ -61,246 +61,6 @@ void mmc_unregister_host_class(void)
|
||||
class_unregister(&mmc_host_class);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
static ssize_t clkgate_delay_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
|
||||
}
|
||||
|
||||
static ssize_t clkgate_delay_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
struct mmc_host *host = cls_dev_to_mmc_host(dev);
|
||||
unsigned long flags, value;
|
||||
|
||||
if (kstrtoul(buf, 0, &value))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clkgate_delay = value;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return count;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enabling clock gating will make the core call out to the host
|
||||
* once up and once down when it performs a request or card operation
|
||||
* intermingled in any fashion. The driver will see this through
|
||||
* set_ios() operations with ios.clock field set to 0 to gate (disable)
|
||||
* the block clock, and to the old frequency to enable it again.
|
||||
*/
|
||||
static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||
{
|
||||
unsigned long tick_ns;
|
||||
unsigned long freq = host->ios.clock;
|
||||
unsigned long flags;
|
||||
|
||||
if (!freq) {
|
||||
pr_debug("%s: frequency set to 0 in disable function, "
|
||||
"this means the clock is already disabled.\n",
|
||||
mmc_hostname(host));
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* New requests may have appeared while we were scheduling,
|
||||
* then there is no reason to delay the check before
|
||||
* clk_disable().
|
||||
*/
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
|
||||
/*
|
||||
* Delay n bus cycles (at least 8 from MMC spec) before attempting
|
||||
* to disable the MCI block clock. The reference count may have
|
||||
* gone up again after this delay due to rescheduling!
|
||||
*/
|
||||
if (!host->clk_requests) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
tick_ns = DIV_ROUND_UP(1000000000, freq);
|
||||
ndelay(host->clk_delay * tick_ns);
|
||||
} else {
|
||||
/* New users appeared while waiting for this work */
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (!host->clk_requests) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
/* This will set host->ios.clock to 0 */
|
||||
mmc_gate_clock(host);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
||||
}
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal work. Work to disable the clock at some later point.
|
||||
*/
|
||||
static void mmc_host_clk_gate_work(struct work_struct *work)
|
||||
{
|
||||
struct mmc_host *host = container_of(work, struct mmc_host,
|
||||
clk_gate_work.work);
|
||||
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_hold - ungate hardware MCI clocks
|
||||
* @host: host to ungate.
|
||||
*
|
||||
* Makes sure the host ios.clock is restored to a non-zero value
|
||||
* past this call. Increase clock reference count and ungate clock
|
||||
* if we're the first user.
|
||||
*/
|
||||
void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* cancel any clock gating work scheduled by mmc_host_clk_release() */
|
||||
cancel_delayed_work_sync(&host->clk_gate_work);
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mmc_ungate_clock(host);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
|
||||
}
|
||||
host->clk_requests++;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_may_gate_card - check if this card may be gated
|
||||
* @card: card to check.
|
||||
*/
|
||||
static bool mmc_host_may_gate_card(struct mmc_card *card)
|
||||
{
|
||||
/* If there is no card we may gate it */
|
||||
if (!card)
|
||||
return true;
|
||||
/*
|
||||
* Don't gate SDIO cards! These need to be clocked at all times
|
||||
* since they may be independent systems generating interrupts
|
||||
* and other events. The clock requests counter from the core will
|
||||
* go down to zero since the core does not need it, but we will not
|
||||
* gate the clock, because there is somebody out there that may still
|
||||
* be using it.
|
||||
*/
|
||||
return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_release - gate off hardware MCI clocks
|
||||
* @host: host to gate.
|
||||
*
|
||||
* Calls the host driver with ios.clock set to zero as often as possible
|
||||
* in order to gate off hardware MCI clocks. Decrease clock reference
|
||||
* count and schedule disabling of clock.
|
||||
*/
|
||||
void mmc_host_clk_release(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
host->clk_requests--;
|
||||
if (mmc_host_may_gate_card(host->card) &&
|
||||
!host->clk_requests)
|
||||
schedule_delayed_work(&host->clk_gate_work,
|
||||
msecs_to_jiffies(host->clkgate_delay));
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_rate - get current clock frequency setting
|
||||
* @host: host to get the clock frequency for.
|
||||
*
|
||||
* Returns current clock frequency regardless of gating.
|
||||
*/
|
||||
unsigned int mmc_host_clk_rate(struct mmc_host *host)
|
||||
{
|
||||
unsigned long freq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated)
|
||||
freq = host->clk_old;
|
||||
else
|
||||
freq = host->ios.clock;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return freq;
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_init - set up clock gating code
|
||||
* @host: host with potential clock to control
|
||||
*/
|
||||
static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
{
|
||||
host->clk_requests = 0;
|
||||
/* Hold MCI clock for 8 cycles by default */
|
||||
host->clk_delay = 8;
|
||||
/*
|
||||
* Default clock gating delay is 0ms to avoid wasting power.
|
||||
* This value can be tuned by writing into sysfs entry.
|
||||
*/
|
||||
host->clkgate_delay = 0;
|
||||
host->clk_gated = false;
|
||||
INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||
spin_lock_init(&host->clk_lock);
|
||||
mutex_init(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* mmc_host_clk_exit - shut down clock gating code
|
||||
* @host: host with potential clock to control
|
||||
*/
|
||||
static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
{
|
||||
/*
|
||||
* Wait for any outstanding gate and then make sure we're
|
||||
* ungated before exiting.
|
||||
*/
|
||||
if (cancel_delayed_work_sync(&host->clk_gate_work))
|
||||
mmc_host_clk_gate_delayed(host);
|
||||
if (host->clk_gated)
|
||||
mmc_host_clk_hold(host);
|
||||
/* There should be only one user now */
|
||||
WARN_ON(host->clk_requests > 1);
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
|
||||
{
|
||||
host->clkgate_delay_attr.show = clkgate_delay_show;
|
||||
host->clkgate_delay_attr.store = clkgate_delay_store;
|
||||
sysfs_attr_init(&host->clkgate_delay_attr.attr);
|
||||
host->clkgate_delay_attr.attr.name = "clkgate_delay";
|
||||
host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
|
||||
if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
|
||||
pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
|
||||
mmc_hostname(host));
|
||||
}
|
||||
#else
|
||||
|
||||
static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_exit(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
void mmc_retune_enable(struct mmc_host *host)
|
||||
{
|
||||
host->can_retune = 1;
|
||||
@ -507,6 +267,8 @@ int mmc_of_parse(struct mmc_host *host)
|
||||
host->caps |= MMC_CAP_UHS_DDR50;
|
||||
if (of_property_read_bool(np, "cap-power-off-card"))
|
||||
host->caps |= MMC_CAP_POWER_OFF_CARD;
|
||||
if (of_property_read_bool(np, "cap-mmc-hw-reset"))
|
||||
host->caps |= MMC_CAP_HW_RESET;
|
||||
if (of_property_read_bool(np, "cap-sdio-irq"))
|
||||
host->caps |= MMC_CAP_SDIO_IRQ;
|
||||
if (of_property_read_bool(np, "full-pwr-cycle"))
|
||||
@ -583,8 +345,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
mmc_host_clk_init(host);
|
||||
|
||||
spin_lock_init(&host->lock);
|
||||
init_waitqueue_head(&host->wq);
|
||||
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
|
||||
@ -633,7 +393,6 @@ int mmc_add_host(struct mmc_host *host)
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
mmc_add_host_debugfs(host);
|
||||
#endif
|
||||
mmc_host_clk_sysfs_init(host);
|
||||
|
||||
mmc_start_host(host);
|
||||
register_pm_notifier(&host->pm_notify);
|
||||
@ -663,8 +422,6 @@ void mmc_remove_host(struct mmc_host *host)
|
||||
device_del(&host->class_dev);
|
||||
|
||||
led_trigger_unregister_simple(host->led);
|
||||
|
||||
mmc_host_clk_exit(host);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(mmc_remove_host);
|
||||
|
@ -1931,14 +1931,12 @@ static int mmc_reset(struct mmc_host *host)
|
||||
if (!mmc_can_reset(card))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
mmc_set_clock(host, host->f_init);
|
||||
|
||||
host->ops->hw_reset(host);
|
||||
|
||||
/* Set initial state and call mmc_set_ios */
|
||||
mmc_set_initial_state(host);
|
||||
mmc_host_clk_release(host);
|
||||
|
||||
return mmc_init_card(host, card->ocr, card);
|
||||
}
|
||||
@ -2006,14 +2004,13 @@ int mmc_attach_mmc(struct mmc_host *host)
|
||||
|
||||
mmc_release_host(host);
|
||||
err = mmc_add_card(host->card);
|
||||
mmc_claim_host(host);
|
||||
if (err)
|
||||
goto remove_card;
|
||||
|
||||
mmc_claim_host(host);
|
||||
return 0;
|
||||
|
||||
remove_card:
|
||||
mmc_release_host(host);
|
||||
mmc_remove_card(host->card);
|
||||
mmc_claim_host(host);
|
||||
host->card = NULL;
|
||||
|
@ -579,7 +579,6 @@ out:
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mmc_switch);
|
||||
|
||||
int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
unsigned int timeout_ms)
|
||||
@ -589,7 +588,7 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmc_switch);
|
||||
|
||||
int mmc_send_tuning(struct mmc_host *host)
|
||||
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error)
|
||||
{
|
||||
struct mmc_request mrq = {NULL};
|
||||
struct mmc_command cmd = {0};
|
||||
@ -599,16 +598,13 @@ int mmc_send_tuning(struct mmc_host *host)
|
||||
const u8 *tuning_block_pattern;
|
||||
int size, err = 0;
|
||||
u8 *data_buf;
|
||||
u32 opcode;
|
||||
|
||||
if (ios->bus_width == MMC_BUS_WIDTH_8) {
|
||||
tuning_block_pattern = tuning_blk_pattern_8bit;
|
||||
size = sizeof(tuning_blk_pattern_8bit);
|
||||
opcode = MMC_SEND_TUNING_BLOCK_HS200;
|
||||
} else if (ios->bus_width == MMC_BUS_WIDTH_4) {
|
||||
tuning_block_pattern = tuning_blk_pattern_4bit;
|
||||
size = sizeof(tuning_blk_pattern_4bit);
|
||||
opcode = MMC_SEND_TUNING_BLOCK;
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
@ -639,6 +635,9 @@ int mmc_send_tuning(struct mmc_host *host)
|
||||
|
||||
mmc_wait_for_req(host, &mrq);
|
||||
|
||||
if (cmd_error)
|
||||
*cmd_error = cmd.error;
|
||||
|
||||
if (cmd.error) {
|
||||
err = cmd.error;
|
||||
goto out;
|
||||
|
@ -28,6 +28,9 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width);
|
||||
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
|
||||
int mmc_can_ext_csd(struct mmc_card *card);
|
||||
int mmc_switch_status_error(struct mmc_host *host, u32 status);
|
||||
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
|
||||
unsigned int timeout_ms, bool use_busy_signal, bool send_status,
|
||||
bool ignore_crc);
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -76,7 +76,7 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
|
||||
if (!pwrseq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
|
||||
pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(pwrseq->reset_gpio)) {
|
||||
ret = PTR_ERR(pwrseq->reset_gpio);
|
||||
goto free;
|
||||
@ -84,11 +84,11 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
|
||||
|
||||
/*
|
||||
* register reset handler to ensure emmc reset also from
|
||||
* emergency_reboot(), priority 129 schedules it just before
|
||||
* system reboot
|
||||
* emergency_reboot(), priority 255 is the highest priority
|
||||
* so it will be executed before any system reboot handler.
|
||||
*/
|
||||
pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
|
||||
pwrseq->reset_nb.priority = 129;
|
||||
pwrseq->reset_nb.priority = 255;
|
||||
register_restart_handler(&pwrseq->reset_nb);
|
||||
|
||||
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
|
||||
|
@ -23,18 +23,21 @@ struct mmc_pwrseq_simple {
|
||||
struct mmc_pwrseq pwrseq;
|
||||
bool clk_enabled;
|
||||
struct clk *ext_clk;
|
||||
int nr_gpios;
|
||||
struct gpio_desc *reset_gpios[0];
|
||||
struct gpio_descs *reset_gpios;
|
||||
};
|
||||
|
||||
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
|
||||
int value)
|
||||
{
|
||||
int i;
|
||||
struct gpio_descs *reset_gpios = pwrseq->reset_gpios;
|
||||
int values[reset_gpios->ndescs];
|
||||
|
||||
for (i = 0; i < pwrseq->nr_gpios; i++)
|
||||
if (!IS_ERR(pwrseq->reset_gpios[i]))
|
||||
gpiod_set_value_cansleep(pwrseq->reset_gpios[i], value);
|
||||
for (i = 0; i < reset_gpios->ndescs; i++)
|
||||
values[i] = value;
|
||||
|
||||
gpiod_set_array_value_cansleep(reset_gpios->ndescs, reset_gpios->desc,
|
||||
values);
|
||||
}
|
||||
|
||||
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
|
||||
@ -75,11 +78,8 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
|
||||
{
|
||||
struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
|
||||
struct mmc_pwrseq_simple, pwrseq);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < pwrseq->nr_gpios; i++)
|
||||
if (!IS_ERR(pwrseq->reset_gpios[i]))
|
||||
gpiod_put(pwrseq->reset_gpios[i]);
|
||||
gpiod_put_array(pwrseq->reset_gpios);
|
||||
|
||||
if (!IS_ERR(pwrseq->ext_clk))
|
||||
clk_put(pwrseq->ext_clk);
|
||||
@ -98,14 +98,9 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
|
||||
struct device *dev)
|
||||
{
|
||||
struct mmc_pwrseq_simple *pwrseq;
|
||||
int i, nr_gpios, ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
|
||||
if (nr_gpios < 0)
|
||||
nr_gpios = 0;
|
||||
|
||||
pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
|
||||
sizeof(struct gpio_desc *), GFP_KERNEL);
|
||||
pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
|
||||
if (!pwrseq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -116,22 +111,12 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
|
||||
goto free;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_gpios; i++) {
|
||||
pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
|
||||
GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(pwrseq->reset_gpios[i]) &&
|
||||
PTR_ERR(pwrseq->reset_gpios[i]) != -ENOENT &&
|
||||
PTR_ERR(pwrseq->reset_gpios[i]) != -ENOSYS) {
|
||||
ret = PTR_ERR(pwrseq->reset_gpios[i]);
|
||||
|
||||
while (i--)
|
||||
gpiod_put(pwrseq->reset_gpios[i]);
|
||||
|
||||
goto clk_put;
|
||||
}
|
||||
pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
|
||||
if (IS_ERR(pwrseq->reset_gpios)) {
|
||||
ret = PTR_ERR(pwrseq->reset_gpios);
|
||||
goto clk_put;
|
||||
}
|
||||
|
||||
pwrseq->nr_gpios = nr_gpios;
|
||||
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
|
||||
|
||||
return &pwrseq->pwrseq;
|
||||
|
@ -35,25 +35,7 @@
|
||||
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This hook just adds a quirk for all sdio devices
|
||||
*/
|
||||
static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
|
||||
{
|
||||
if (mmc_card_sdio(card))
|
||||
card->quirks |= data;
|
||||
}
|
||||
|
||||
static const struct mmc_fixup mmc_fixup_methods[] = {
|
||||
/* by default sdio devices are considered CLK_GATING broken */
|
||||
/* good cards will be whitelisted as they are tested */
|
||||
SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
|
||||
add_quirk_for_sdio_devices,
|
||||
MMC_QUIRK_BROKEN_CLK_GATING),
|
||||
|
||||
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
|
||||
remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
|
||||
|
||||
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
|
||||
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
|
||||
|
||||
|
@ -357,8 +357,6 @@ int mmc_sd_switch_hs(struct mmc_card *card)
|
||||
if (card->sw_caps.hs_max_dtr == 0)
|
||||
return 0;
|
||||
|
||||
err = -EIO;
|
||||
|
||||
status = kmalloc(64, GFP_KERNEL);
|
||||
if (!status) {
|
||||
pr_err("%s: could not allocate a buffer for "
|
||||
@ -628,9 +626,25 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
|
||||
* SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
|
||||
*/
|
||||
if (!mmc_host_is_spi(card->host) &&
|
||||
(card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
|
||||
card->sd_bus_speed == UHS_SDR104_BUS_SPEED))
|
||||
(card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
|
||||
card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
|
||||
card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
|
||||
err = mmc_execute_tuning(card);
|
||||
|
||||
/*
|
||||
* As SD Specifications Part1 Physical Layer Specification
|
||||
* Version 3.01 says, CMD19 tuning is available for unlocked
|
||||
* cards in transfer state of 1.8V signaling mode. The small
|
||||
* difference between v3.00 and 3.01 spec means that CMD19
|
||||
* tuning is also available for DDR50 mode.
|
||||
*/
|
||||
if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
|
||||
pr_warn("%s: ddr50 tuning failed\n",
|
||||
mmc_hostname(card->host));
|
||||
err = 0;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(status);
|
||||
|
||||
@ -786,9 +800,7 @@ static int mmc_sd_get_ro(struct mmc_host *host)
|
||||
if (!host->ops->get_ro)
|
||||
return -1;
|
||||
|
||||
mmc_host_clk_hold(host);
|
||||
ro = host->ops->get_ro(host);
|
||||
mmc_host_clk_release(host);
|
||||
|
||||
return ro;
|
||||
}
|
||||
@ -1231,14 +1243,13 @@ int mmc_attach_sd(struct mmc_host *host)
|
||||
|
||||
mmc_release_host(host);
|
||||
err = mmc_add_card(host->card);
|
||||
mmc_claim_host(host);
|
||||
if (err)
|
||||
goto remove_card;
|
||||
|
||||
mmc_claim_host(host);
|
||||
return 0;
|
||||
|
||||
remove_card:
|
||||
mmc_release_host(host);
|
||||
mmc_remove_card(host->card);
|
||||
host->card = NULL;
|
||||
mmc_claim_host(host);
|
||||
|
@ -897,11 +897,10 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
|
||||
*/
|
||||
static int mmc_sdio_suspend(struct mmc_host *host)
|
||||
{
|
||||
if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
|
||||
mmc_claim_host(host);
|
||||
mmc_claim_host(host);
|
||||
|
||||
if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
|
||||
sdio_disable_wide(host->card);
|
||||
mmc_release_host(host);
|
||||
}
|
||||
|
||||
if (!mmc_card_keep_power(host)) {
|
||||
mmc_power_off(host);
|
||||
@ -910,6 +909,8 @@ static int mmc_sdio_suspend(struct mmc_host *host)
|
||||
mmc_retune_needed(host);
|
||||
}
|
||||
|
||||
mmc_release_host(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -955,13 +956,10 @@ static int mmc_sdio_resume(struct mmc_host *host)
|
||||
}
|
||||
|
||||
if (!err && host->sdio_irqs) {
|
||||
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
|
||||
if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
|
||||
wake_up_process(host->sdio_irq_thread);
|
||||
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
|
||||
mmc_host_clk_hold(host);
|
||||
else if (host->caps & MMC_CAP_SDIO_IRQ)
|
||||
host->ops->enable_sdio_irq(host, 1);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
mmc_release_host(host);
|
||||
@ -1018,15 +1016,24 @@ out:
|
||||
static int mmc_sdio_runtime_suspend(struct mmc_host *host)
|
||||
{
|
||||
/* No references to the card, cut the power to it. */
|
||||
mmc_claim_host(host);
|
||||
mmc_power_off(host);
|
||||
mmc_release_host(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmc_sdio_runtime_resume(struct mmc_host *host)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Restore power and re-initialize. */
|
||||
mmc_claim_host(host);
|
||||
mmc_power_up(host, host->card->ocr);
|
||||
return mmc_sdio_power_restore(host);
|
||||
ret = mmc_sdio_power_restore(host);
|
||||
mmc_release_host(host);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mmc_sdio_reset(struct mmc_host *host)
|
||||
|
@ -168,21 +168,15 @@ static int sdio_irq_thread(void *_host)
|
||||
}
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (host->caps & MMC_CAP_SDIO_IRQ) {
|
||||
mmc_host_clk_hold(host);
|
||||
if (host->caps & MMC_CAP_SDIO_IRQ)
|
||||
host->ops->enable_sdio_irq(host, 1);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
if (!kthread_should_stop())
|
||||
schedule_timeout(period);
|
||||
set_current_state(TASK_RUNNING);
|
||||
} while (!kthread_should_stop());
|
||||
|
||||
if (host->caps & MMC_CAP_SDIO_IRQ) {
|
||||
mmc_host_clk_hold(host);
|
||||
if (host->caps & MMC_CAP_SDIO_IRQ)
|
||||
host->ops->enable_sdio_irq(host, 0);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
|
||||
pr_debug("%s: IRQ thread exiting with code %d\n",
|
||||
mmc_hostname(host), ret);
|
||||
@ -208,9 +202,7 @@ static int sdio_card_irq_get(struct mmc_card *card)
|
||||
return err;
|
||||
}
|
||||
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
|
||||
mmc_host_clk_hold(host);
|
||||
host->ops->enable_sdio_irq(host, 1);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,9 +221,7 @@ static int sdio_card_irq_put(struct mmc_card *card)
|
||||
atomic_set(&host->sdio_irq_thread_abort, 1);
|
||||
kthread_stop(host->sdio_irq_thread);
|
||||
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
|
||||
mmc_host_clk_hold(host);
|
||||
host->ops->enable_sdio_irq(host, 0);
|
||||
mmc_host_clk_release(host);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -12,6 +12,8 @@
|
||||
#ifndef _MMC_SDIO_OPS_H
|
||||
#define _MMC_SDIO_OPS_H
|
||||
|
||||
#include <linux/mmc/sdio.h>
|
||||
|
||||
int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr);
|
||||
int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,
|
||||
unsigned addr, u8 in, u8* out);
|
||||
@ -19,5 +21,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
|
||||
unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz);
|
||||
int sdio_reset(struct mmc_host *host);
|
||||
|
||||
static inline bool mmc_is_io_op(u32 opcode)
|
||||
{
|
||||
return opcode == SD_IO_RW_DIRECT || opcode == SD_IO_RW_EXTENDED;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -67,7 +67,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
|
||||
has the effect of scrambling the addresses and formats of data
|
||||
accessed in sizes other than the datum size.
|
||||
|
||||
This is the case for the Freescale eSDHC and Nintendo Wii SDHCI.
|
||||
This is the case for the Nintendo Wii SDHCI.
|
||||
|
||||
config MMC_SDHCI_PCI
|
||||
tristate "SDHCI support on PCI bus"
|
||||
@ -140,8 +140,8 @@ config MMC_SDHCI_OF_AT91
|
||||
config MMC_SDHCI_OF_ESDHC
|
||||
tristate "SDHCI OF support for the Freescale eSDHC controller"
|
||||
depends on MMC_SDHCI_PLTFM
|
||||
depends on PPC
|
||||
select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER
|
||||
depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE
|
||||
select MMC_SDHCI_IO_ACCESSORS
|
||||
help
|
||||
This selects the Freescale eSDHC controller support.
|
||||
|
||||
@ -366,7 +366,7 @@ config MMC_OMAP
|
||||
config MMC_OMAP_HS
|
||||
tristate "TI OMAP High Speed Multimedia Card Interface support"
|
||||
depends on HAS_DMA
|
||||
depends on ARCH_OMAP2PLUS || COMPILE_TEST
|
||||
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
|
||||
help
|
||||
This selects the TI OMAP High Speed Multimedia card Interface.
|
||||
If you have an omap2plus board with a Multimedia Card slot,
|
||||
@ -473,7 +473,7 @@ config MMC_DAVINCI
|
||||
|
||||
config MMC_GOLDFISH
|
||||
tristate "goldfish qemu Multimedia Card Interface support"
|
||||
depends on GOLDFISH
|
||||
depends on GOLDFISH || COMPILE_TEST
|
||||
help
|
||||
This selects the Goldfish Multimedia card Interface emulation
|
||||
found on the Goldfish Android virtual device emulation.
|
||||
@ -615,15 +615,7 @@ config MMC_DW
|
||||
help
|
||||
This selects support for the Synopsys DesignWare Mobile Storage IP
|
||||
block, this provides host support for SD and MMC interfaces, in both
|
||||
PIO and external DMA modes.
|
||||
|
||||
config MMC_DW_IDMAC
|
||||
bool "Internal DMAC interface"
|
||||
depends on MMC_DW
|
||||
help
|
||||
This selects support for the internal DMAC block within the Synopsys
|
||||
Designware Mobile Storage IP block. This disables the external DMA
|
||||
interface.
|
||||
PIO, internal DMA mode and external DMA mode.
|
||||
|
||||
config MMC_DW_PLTFM
|
||||
tristate "Synopsys Designware MCI Support as platform device"
|
||||
@ -652,7 +644,6 @@ config MMC_DW_K3
|
||||
tristate "K3 specific extensions for Synopsys DW Memory Card Interface"
|
||||
depends on MMC_DW
|
||||
select MMC_DW_PLTFM
|
||||
select MMC_DW_IDMAC
|
||||
help
|
||||
This selects support for Hisilicon K3 SoC specific extensions to the
|
||||
Synopsys DesignWare Memory Card Interface driver. Select this option
|
||||
|
@ -9,8 +9,8 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
|
||||
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
|
||||
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
|
||||
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
|
||||
sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o
|
||||
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
|
||||
obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o
|
||||
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
|
||||
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
|
||||
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
|
||||
|
@ -446,7 +446,7 @@ out:
|
||||
return loc;
|
||||
}
|
||||
|
||||
static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
|
||||
static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
||||
{
|
||||
struct dw_mci *host = slot->host;
|
||||
struct dw_mci_exynos_priv_data *priv = host->priv;
|
||||
@ -461,7 +461,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot)
|
||||
mci_writel(host, TMOUT, ~0);
|
||||
smpl = dw_mci_exynos_move_next_clksmpl(host);
|
||||
|
||||
if (!mmc_send_tuning(mmc))
|
||||
if (!mmc_send_tuning(mmc, opcode, NULL))
|
||||
candiates |= (1 << smpl);
|
||||
|
||||
} while (start_smpl != smpl);
|
||||
|
@ -59,6 +59,8 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
|
||||
host->pdata = pdev->dev.platform_data;
|
||||
|
||||
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
/* Get registers' physical base address */
|
||||
host->phy_regs = (void *)(regs->start);
|
||||
host->regs = devm_ioremap_resource(&pdev->dev, regs);
|
||||
if (IS_ERR(host->regs))
|
||||
return PTR_ERR(host->regs);
|
||||
|
@ -13,12 +13,19 @@
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/dw_mmc.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "dw_mmc.h"
|
||||
#include "dw_mmc-pltfm.h"
|
||||
|
||||
#define RK3288_CLKGEN_DIV 2
|
||||
|
||||
struct dw_mci_rockchip_priv_data {
|
||||
struct clk *drv_clk;
|
||||
struct clk *sample_clk;
|
||||
int default_sample_phase;
|
||||
};
|
||||
|
||||
static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
|
||||
{
|
||||
*cmdr |= SDMMC_CMD_USE_HOLD_REG;
|
||||
@ -33,6 +40,7 @@ static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
|
||||
|
||||
static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
|
||||
{
|
||||
struct dw_mci_rockchip_priv_data *priv = host->priv;
|
||||
int ret;
|
||||
unsigned int cclkin;
|
||||
u32 bus_hz;
|
||||
@ -66,6 +74,158 @@ static void dw_mci_rk3288_set_ios(struct dw_mci *host, struct mmc_ios *ios)
|
||||
/* force dw_mci_setup_bus() */
|
||||
host->current_speed = 0;
|
||||
}
|
||||
|
||||
/* Make sure we use phases which we can enumerate with */
|
||||
if (!IS_ERR(priv->sample_clk))
|
||||
clk_set_phase(priv->sample_clk, priv->default_sample_phase);
|
||||
}
|
||||
|
||||
#define NUM_PHASES 360
|
||||
#define TUNING_ITERATION_TO_PHASE(i) (DIV_ROUND_UP((i) * 360, NUM_PHASES))
|
||||
|
||||
static int dw_mci_rk3288_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
|
||||
{
|
||||
struct dw_mci *host = slot->host;
|
||||
struct dw_mci_rockchip_priv_data *priv = host->priv;
|
||||
struct mmc_host *mmc = slot->mmc;
|
||||
int ret = 0;
|
||||
int i;
|
||||
bool v, prev_v = 0, first_v;
|
||||
struct range_t {
|
||||
int start;
|
||||
int end; /* inclusive */
|
||||
};
|
||||
struct range_t *ranges;
|
||||
unsigned int range_count = 0;
|
||||
int longest_range_len = -1;
|
||||
int longest_range = -1;
|
||||
int middle_phase;
|
||||
|
||||
if (IS_ERR(priv->sample_clk)) {
|
||||
dev_err(host->dev, "Tuning clock (sample_clk) not defined.\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
ranges = kmalloc_array(NUM_PHASES / 2 + 1, sizeof(*ranges), GFP_KERNEL);
|
||||
if (!ranges)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Try each phase and extract good ranges */
|
||||
for (i = 0; i < NUM_PHASES; ) {
|
||||
clk_set_phase(priv->sample_clk, TUNING_ITERATION_TO_PHASE(i));
|
||||
|
||||
v = !mmc_send_tuning(mmc, opcode, NULL);
|
||||
|
||||
if (i == 0)
|
||||
first_v = v;
|
||||
|
||||
if ((!prev_v) && v) {
|
||||
range_count++;
|
||||
ranges[range_count-1].start = i;
|
||||
}
|
||||
if (v) {
|
||||
ranges[range_count-1].end = i;
|
||||
i++;
|
||||
} else if (i == NUM_PHASES - 1) {
|
||||
/* No extra skipping rules if we're at the end */
|
||||
i++;
|
||||
} else {
|
||||
/*
|
||||
* No need to check too close to an invalid
|
||||
* one since testing bad phases is slow. Skip
|
||||
* 20 degrees.
|
||||
*/
|
||||
i += DIV_ROUND_UP(20 * NUM_PHASES, 360);
|
||||
|
||||
/* Always test the last one */
|
||||
if (i >= NUM_PHASES)
|
||||
i = NUM_PHASES - 1;
|
||||
}
|
||||
|
||||
prev_v = v;
|
||||
}
|
||||
|
||||
if (range_count == 0) {
|
||||
dev_warn(host->dev, "All phases bad!");
|
||||
ret = -EIO;
|
||||
goto free;
|
||||
}
|
||||
|
||||
/* wrap around case, merge the end points */
|
||||
if ((range_count > 1) && first_v && v) {
|
||||
ranges[0].start = ranges[range_count-1].start;
|
||||
range_count--;
|
||||
}
|
||||
|
||||
if (ranges[0].start == 0 && ranges[0].end == NUM_PHASES - 1) {
|
||||
clk_set_phase(priv->sample_clk, priv->default_sample_phase);
|
||||
dev_info(host->dev, "All phases work, using default phase %d.",
|
||||
priv->default_sample_phase);
|
||||
goto free;
|
||||
}
|
||||
|
||||
/* Find the longest range */
|
||||
for (i = 0; i < range_count; i++) {
|
||||
int len = (ranges[i].end - ranges[i].start + 1);
|
||||
|
||||
if (len < 0)
|
||||
len += NUM_PHASES;
|
||||
|
||||
if (longest_range_len < len) {
|
||||
longest_range_len = len;
|
||||
longest_range = i;
|
||||
}
|
||||
|
||||
dev_dbg(host->dev, "Good phase range %d-%d (%d len)\n",
|
||||
TUNING_ITERATION_TO_PHASE(ranges[i].start),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[i].end),
|
||||
len
|
||||
);
|
||||
}
|
||||
|
||||
dev_dbg(host->dev, "Best phase range %d-%d (%d len)\n",
|
||||
TUNING_ITERATION_TO_PHASE(ranges[longest_range].start),
|
||||
TUNING_ITERATION_TO_PHASE(ranges[longest_range].end),
|
||||
longest_range_len
|
||||
);
|
||||
|
||||
middle_phase = ranges[longest_range].start + longest_range_len / 2;
|
||||
middle_phase %= NUM_PHASES;
|
||||
dev_info(host->dev, "Successfully tuned phase to %d\n",
|
||||
TUNING_ITERATION_TO_PHASE(middle_phase));
|
||||
|
||||
clk_set_phase(priv->sample_clk,
|
||||
TUNING_ITERATION_TO_PHASE(middle_phase));
|
||||
|
||||
free:
|
||||
kfree(ranges);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dw_mci_rk3288_parse_dt(struct dw_mci *host)
|
||||
{
|
||||
struct device_node *np = host->dev->of_node;
|
||||
struct dw_mci_rockchip_priv_data *priv;
|
||||
|
||||
priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
if (of_property_read_u32(np, "rockchip,default-sample-phase",
|
||||
&priv->default_sample_phase))
|
||||
priv->default_sample_phase = 0;
|
||||
|
||||
priv->drv_clk = devm_clk_get(host->dev, "ciu-drive");
|
||||
if (IS_ERR(priv->drv_clk))
|
||||
dev_dbg(host->dev, "ciu_drv not available\n");
|
||||
|
||||
priv->sample_clk = devm_clk_get(host->dev, "ciu-sample");
|
||||
if (IS_ERR(priv->sample_clk))
|
||||
dev_dbg(host->dev, "ciu_sample not available\n");
|
||||
|
||||
host->priv = priv;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_mci_rockchip_init(struct dw_mci *host)
|
||||
@ -95,6 +255,8 @@ static const struct dw_mci_drv_data rk3288_drv_data = {
|
||||
.caps = dw_mci_rk3288_dwmmc_caps,
|
||||
.prepare_command = dw_mci_rockchip_prepare_command,
|
||||
.set_ios = dw_mci_rk3288_set_ios,
|
||||
.execute_tuning = dw_mci_rk3288_execute_tuning,
|
||||
.parse_dt = dw_mci_rk3288_parse_dt,
|
||||
.setup_clock = dw_mci_rk3288_setup_clock,
|
||||
.init = dw_mci_rockchip_init,
|
||||
};
|
||||
|
@ -56,7 +56,6 @@
|
||||
#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
|
||||
#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */
|
||||
|
||||
#ifdef CONFIG_MMC_DW_IDMAC
|
||||
#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
|
||||
SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
|
||||
SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
|
||||
@ -102,7 +101,6 @@ struct idmac_desc {
|
||||
|
||||
/* Each descriptor can transfer up to 4KB of data in chained mode */
|
||||
#define DW_MCI_DESC_DATA_LENGTH 0x1000
|
||||
#endif /* CONFIG_MMC_DW_IDMAC */
|
||||
|
||||
static bool dw_mci_reset(struct dw_mci *host);
|
||||
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset);
|
||||
@ -407,7 +405,6 @@ static int dw_mci_get_dma_dir(struct mmc_data *data)
|
||||
return DMA_FROM_DEVICE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_DW_IDMAC
|
||||
static void dw_mci_dma_cleanup(struct dw_mci *host)
|
||||
{
|
||||
struct mmc_data *data = host->data;
|
||||
@ -445,12 +442,21 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host)
|
||||
mci_writel(host, BMOD, temp);
|
||||
}
|
||||
|
||||
static void dw_mci_idmac_complete_dma(struct dw_mci *host)
|
||||
static void dw_mci_dmac_complete_dma(void *arg)
|
||||
{
|
||||
struct dw_mci *host = arg;
|
||||
struct mmc_data *data = host->data;
|
||||
|
||||
dev_vdbg(host->dev, "DMA complete\n");
|
||||
|
||||
if ((host->use_dma == TRANS_MODE_EDMAC) &&
|
||||
data && (data->flags & MMC_DATA_READ))
|
||||
/* Invalidate cache after read */
|
||||
dma_sync_sg_for_cpu(mmc_dev(host->cur_slot->mmc),
|
||||
data->sg,
|
||||
data->sg_len,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
host->dma_ops->cleanup(host);
|
||||
|
||||
/*
|
||||
@ -564,7 +570,7 @@ static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data,
|
||||
wmb(); /* drain writebuffer */
|
||||
}
|
||||
|
||||
static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
|
||||
static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
@ -589,6 +595,8 @@ static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
|
||||
|
||||
/* Start it running */
|
||||
mci_writel(host, PLDMND, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_mci_idmac_init(struct dw_mci *host)
|
||||
@ -669,10 +677,110 @@ static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
|
||||
.init = dw_mci_idmac_init,
|
||||
.start = dw_mci_idmac_start_dma,
|
||||
.stop = dw_mci_idmac_stop_dma,
|
||||
.complete = dw_mci_idmac_complete_dma,
|
||||
.complete = dw_mci_dmac_complete_dma,
|
||||
.cleanup = dw_mci_dma_cleanup,
|
||||
};
|
||||
|
||||
static void dw_mci_edmac_stop_dma(struct dw_mci *host)
|
||||
{
|
||||
dmaengine_terminate_all(host->dms->ch);
|
||||
}
|
||||
|
||||
static int dw_mci_edmac_start_dma(struct dw_mci *host,
|
||||
unsigned int sg_len)
|
||||
{
|
||||
struct dma_slave_config cfg;
|
||||
struct dma_async_tx_descriptor *desc = NULL;
|
||||
struct scatterlist *sgl = host->data->sg;
|
||||
const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
|
||||
u32 sg_elems = host->data->sg_len;
|
||||
u32 fifoth_val;
|
||||
u32 fifo_offset = host->fifo_reg - host->regs;
|
||||
int ret = 0;
|
||||
|
||||
/* Set external dma config: burst size, burst width */
|
||||
cfg.dst_addr = (dma_addr_t)(host->phy_regs + fifo_offset);
|
||||
cfg.src_addr = cfg.dst_addr;
|
||||
cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
/* Match burst msize with external dma config */
|
||||
fifoth_val = mci_readl(host, FIFOTH);
|
||||
cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
|
||||
cfg.src_maxburst = cfg.dst_maxburst;
|
||||
|
||||
if (host->data->flags & MMC_DATA_WRITE)
|
||||
cfg.direction = DMA_MEM_TO_DEV;
|
||||
else
|
||||
cfg.direction = DMA_DEV_TO_MEM;
|
||||
|
||||
ret = dmaengine_slave_config(host->dms->ch, &cfg);
|
||||
if (ret) {
|
||||
dev_err(host->dev, "Failed to config edmac.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
|
||||
sg_len, cfg.direction,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
dev_err(host->dev, "Can't prepare slave sg.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Set dw_mci_dmac_complete_dma as callback */
|
||||
desc->callback = dw_mci_dmac_complete_dma;
|
||||
desc->callback_param = (void *)host;
|
||||
dmaengine_submit(desc);
|
||||
|
||||
/* Flush cache before write */
|
||||
if (host->data->flags & MMC_DATA_WRITE)
|
||||
dma_sync_sg_for_device(mmc_dev(host->cur_slot->mmc), sgl,
|
||||
sg_elems, DMA_TO_DEVICE);
|
||||
|
||||
dma_async_issue_pending(host->dms->ch);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dw_mci_edmac_init(struct dw_mci *host)
|
||||
{
|
||||
/* Request external dma channel */
|
||||
host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
|
||||
if (!host->dms)
|
||||
return -ENOMEM;
|
||||
|
||||
host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx");
|
||||
if (!host->dms->ch) {
|
||||
dev_err(host->dev, "Failed to get external DMA channel.\n");
|
||||
kfree(host->dms);
|
||||
host->dms = NULL;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dw_mci_edmac_exit(struct dw_mci *host)
|
||||
{
|
||||
if (host->dms) {
|
||||
if (host->dms->ch) {
|
||||
dma_release_channel(host->dms->ch);
|
||||
host->dms->ch = NULL;
|
||||
}
|
||||
kfree(host->dms);
|
||||
host->dms = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
|
||||
.init = dw_mci_edmac_init,
|
||||
.exit = dw_mci_edmac_exit,
|
||||
.start = dw_mci_edmac_start_dma,
|
||||
.stop = dw_mci_edmac_stop_dma,
|
||||
.complete = dw_mci_dmac_complete_dma,
|
||||
.cleanup = dw_mci_dma_cleanup,
|
||||
};
|
||||
#endif /* CONFIG_MMC_DW_IDMAC */
|
||||
|
||||
static int dw_mci_pre_dma_transfer(struct dw_mci *host,
|
||||
struct mmc_data *data,
|
||||
@ -752,7 +860,6 @@ static void dw_mci_post_req(struct mmc_host *mmc,
|
||||
|
||||
static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
|
||||
{
|
||||
#ifdef CONFIG_MMC_DW_IDMAC
|
||||
unsigned int blksz = data->blksz;
|
||||
const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
|
||||
u32 fifo_width = 1 << host->data_shift;
|
||||
@ -760,6 +867,10 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
|
||||
u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
|
||||
int idx = ARRAY_SIZE(mszs) - 1;
|
||||
|
||||
/* pio should ship this scenario */
|
||||
if (!host->use_dma)
|
||||
return;
|
||||
|
||||
tx_wmark = (host->fifo_depth) / 2;
|
||||
tx_wmark_invers = host->fifo_depth - tx_wmark;
|
||||
|
||||
@ -788,7 +899,6 @@ static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
|
||||
done:
|
||||
fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
|
||||
mci_writel(host, FIFOTH, fifoth_val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data)
|
||||
@ -850,10 +960,12 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
|
||||
|
||||
host->using_dma = 1;
|
||||
|
||||
dev_vdbg(host->dev,
|
||||
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
|
||||
(unsigned long)host->sg_cpu, (unsigned long)host->sg_dma,
|
||||
sg_len);
|
||||
if (host->use_dma == TRANS_MODE_IDMAC)
|
||||
dev_vdbg(host->dev,
|
||||
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
|
||||
(unsigned long)host->sg_cpu,
|
||||
(unsigned long)host->sg_dma,
|
||||
sg_len);
|
||||
|
||||
/*
|
||||
* Decide the MSIZE and RX/TX Watermark.
|
||||
@ -875,7 +987,11 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
|
||||
mci_writel(host, INTMASK, temp);
|
||||
spin_unlock_irqrestore(&host->irq_lock, irqflags);
|
||||
|
||||
host->dma_ops->start(host, sg_len);
|
||||
if (host->dma_ops->start(host, sg_len)) {
|
||||
/* We can't do DMA */
|
||||
dev_err(host->dev, "%s: failed to start DMA.\n", __func__);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1177,6 +1293,7 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
|
||||
/* DDR mode set */
|
||||
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
|
||||
ios->timing == MMC_TIMING_UHS_DDR50 ||
|
||||
ios->timing == MMC_TIMING_MMC_HS400)
|
||||
regs |= ((0x1 << slot->id) << 16);
|
||||
else
|
||||
@ -1279,7 +1396,6 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
const struct dw_mci_drv_data *drv_data = host->drv_data;
|
||||
u32 uhs;
|
||||
u32 v18 = SDMMC_UHS_18V << slot->id;
|
||||
int min_uv, max_uv;
|
||||
int ret;
|
||||
|
||||
if (drv_data && drv_data->switch_voltage)
|
||||
@ -1291,22 +1407,18 @@ static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
* does no harm but you need to set the regulator directly. Try both.
|
||||
*/
|
||||
uhs = mci_readl(host, UHS_REG);
|
||||
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
|
||||
min_uv = 2700000;
|
||||
max_uv = 3600000;
|
||||
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
|
||||
uhs &= ~v18;
|
||||
} else {
|
||||
min_uv = 1700000;
|
||||
max_uv = 1950000;
|
||||
else
|
||||
uhs |= v18;
|
||||
}
|
||||
|
||||
if (!IS_ERR(mmc->supply.vqmmc)) {
|
||||
ret = regulator_set_voltage(mmc->supply.vqmmc, min_uv, max_uv);
|
||||
ret = mmc_regulator_set_vqmmc(mmc, ios);
|
||||
|
||||
if (ret) {
|
||||
dev_dbg(&mmc->class_dev,
|
||||
"Regulator set error %d: %d - %d\n",
|
||||
ret, min_uv, max_uv);
|
||||
"Regulator set error %d - %s V\n",
|
||||
ret, uhs & v18 ? "1.8" : "3.3");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -1427,7 +1539,7 @@ static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
int err = -EINVAL;
|
||||
|
||||
if (drv_data && drv_data->execute_tuning)
|
||||
err = drv_data->execute_tuning(slot);
|
||||
err = drv_data->execute_tuning(slot, opcode);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2343,15 +2455,17 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_DW_IDMAC
|
||||
/* Handle DMA interrupts */
|
||||
if (host->use_dma != TRANS_MODE_IDMAC)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
/* Handle IDMA interrupts */
|
||||
if (host->dma_64bit_address == 1) {
|
||||
pending = mci_readl(host, IDSTS64);
|
||||
if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
|
||||
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
|
||||
SDMMC_IDMAC_INT_RI);
|
||||
mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
|
||||
host->dma_ops->complete(host);
|
||||
host->dma_ops->complete((void *)host);
|
||||
}
|
||||
} else {
|
||||
pending = mci_readl(host, IDSTS);
|
||||
@ -2359,10 +2473,9 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
|
||||
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
|
||||
SDMMC_IDMAC_INT_RI);
|
||||
mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
|
||||
host->dma_ops->complete(host);
|
||||
host->dma_ops->complete((void *)host);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -2471,13 +2584,21 @@ static int dw_mci_init_slot(struct dw_mci *host, unsigned int id)
|
||||
goto err_host_allocated;
|
||||
|
||||
/* Useful defaults if platform data is unset. */
|
||||
if (host->use_dma) {
|
||||
if (host->use_dma == TRANS_MODE_IDMAC) {
|
||||
mmc->max_segs = host->ring_size;
|
||||
mmc->max_blk_size = 65536;
|
||||
mmc->max_seg_size = 0x1000;
|
||||
mmc->max_req_size = mmc->max_seg_size * host->ring_size;
|
||||
mmc->max_blk_count = mmc->max_req_size / 512;
|
||||
} else if (host->use_dma == TRANS_MODE_EDMAC) {
|
||||
mmc->max_segs = 64;
|
||||
mmc->max_blk_size = 65536;
|
||||
mmc->max_blk_count = 65535;
|
||||
mmc->max_req_size =
|
||||
mmc->max_blk_size * mmc->max_blk_count;
|
||||
mmc->max_seg_size = mmc->max_req_size;
|
||||
} else {
|
||||
/* TRANS_MODE_PIO */
|
||||
mmc->max_segs = 64;
|
||||
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
|
||||
mmc->max_blk_count = 512;
|
||||
@ -2517,38 +2638,74 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
|
||||
static void dw_mci_init_dma(struct dw_mci *host)
|
||||
{
|
||||
int addr_config;
|
||||
/* Check ADDR_CONFIG bit in HCON to find IDMAC address bus width */
|
||||
addr_config = (mci_readl(host, HCON) >> 27) & 0x01;
|
||||
struct device *dev = host->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
|
||||
if (addr_config == 1) {
|
||||
/* host supports IDMAC in 64-bit address mode */
|
||||
host->dma_64bit_address = 1;
|
||||
dev_info(host->dev, "IDMAC supports 64-bit address mode.\n");
|
||||
if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
|
||||
dma_set_coherent_mask(host->dev, DMA_BIT_MASK(64));
|
||||
/*
|
||||
* Check tansfer mode from HCON[17:16]
|
||||
* Clear the ambiguous description of dw_mmc databook:
|
||||
* 2b'00: No DMA Interface -> Actually means using Internal DMA block
|
||||
* 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
|
||||
* 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
|
||||
* 2b'11: Non DW DMA Interface -> pio only
|
||||
* Compared to DesignWare DMA Interface, Generic DMA Interface has a
|
||||
* simpler request/acknowledge handshake mechanism and both of them
|
||||
* are regarded as external dma master for dw_mmc.
|
||||
*/
|
||||
host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
|
||||
if (host->use_dma == DMA_INTERFACE_IDMA) {
|
||||
host->use_dma = TRANS_MODE_IDMAC;
|
||||
} else if (host->use_dma == DMA_INTERFACE_DWDMA ||
|
||||
host->use_dma == DMA_INTERFACE_GDMA) {
|
||||
host->use_dma = TRANS_MODE_EDMAC;
|
||||
} else {
|
||||
/* host supports IDMAC in 32-bit address mode */
|
||||
host->dma_64bit_address = 0;
|
||||
dev_info(host->dev, "IDMAC supports 32-bit address mode.\n");
|
||||
}
|
||||
|
||||
/* Alloc memory for sg translation */
|
||||
host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
|
||||
&host->sg_dma, GFP_KERNEL);
|
||||
if (!host->sg_cpu) {
|
||||
dev_err(host->dev, "%s: could not alloc DMA memory\n",
|
||||
__func__);
|
||||
goto no_dma;
|
||||
}
|
||||
|
||||
/* Determine which DMA interface to use */
|
||||
#ifdef CONFIG_MMC_DW_IDMAC
|
||||
host->dma_ops = &dw_mci_idmac_ops;
|
||||
dev_info(host->dev, "Using internal DMA controller.\n");
|
||||
#endif
|
||||
if (host->use_dma == TRANS_MODE_IDMAC) {
|
||||
/*
|
||||
* Check ADDR_CONFIG bit in HCON to find
|
||||
* IDMAC address bus width
|
||||
*/
|
||||
addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
|
||||
|
||||
if (!host->dma_ops)
|
||||
goto no_dma;
|
||||
if (addr_config == 1) {
|
||||
/* host supports IDMAC in 64-bit address mode */
|
||||
host->dma_64bit_address = 1;
|
||||
dev_info(host->dev,
|
||||
"IDMAC supports 64-bit address mode.\n");
|
||||
if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
|
||||
dma_set_coherent_mask(host->dev,
|
||||
DMA_BIT_MASK(64));
|
||||
} else {
|
||||
/* host supports IDMAC in 32-bit address mode */
|
||||
host->dma_64bit_address = 0;
|
||||
dev_info(host->dev,
|
||||
"IDMAC supports 32-bit address mode.\n");
|
||||
}
|
||||
|
||||
/* Alloc memory for sg translation */
|
||||
host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE,
|
||||
&host->sg_dma, GFP_KERNEL);
|
||||
if (!host->sg_cpu) {
|
||||
dev_err(host->dev,
|
||||
"%s: could not alloc DMA memory\n",
|
||||
__func__);
|
||||
goto no_dma;
|
||||
}
|
||||
|
||||
host->dma_ops = &dw_mci_idmac_ops;
|
||||
dev_info(host->dev, "Using internal DMA controller.\n");
|
||||
} else {
|
||||
/* TRANS_MODE_EDMAC: check dma bindings again */
|
||||
if ((of_property_count_strings(np, "dma-names") < 0) ||
|
||||
(!of_find_property(np, "dmas", NULL))) {
|
||||
goto no_dma;
|
||||
}
|
||||
host->dma_ops = &dw_mci_edmac_ops;
|
||||
dev_info(host->dev, "Using external DMA controller.\n");
|
||||
}
|
||||
|
||||
if (host->dma_ops->init && host->dma_ops->start &&
|
||||
host->dma_ops->stop && host->dma_ops->cleanup) {
|
||||
@ -2562,12 +2719,11 @@ static void dw_mci_init_dma(struct dw_mci *host)
|
||||
goto no_dma;
|
||||
}
|
||||
|
||||
host->use_dma = 1;
|
||||
return;
|
||||
|
||||
no_dma:
|
||||
dev_info(host->dev, "Using PIO mode.\n");
|
||||
host->use_dma = 0;
|
||||
host->use_dma = TRANS_MODE_PIO;
|
||||
}
|
||||
|
||||
static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
|
||||
@ -2650,10 +2806,9 @@ static bool dw_mci_reset(struct dw_mci *host)
|
||||
}
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_MMC_DW_IDMAC)
|
||||
/* It is also recommended that we reset and reprogram idmac */
|
||||
dw_mci_idmac_reset(host);
|
||||
#endif
|
||||
if (host->use_dma == TRANS_MODE_IDMAC)
|
||||
/* It is also recommended that we reset and reprogram idmac */
|
||||
dw_mci_idmac_reset(host);
|
||||
|
||||
ret = true;
|
||||
|
||||
@ -2890,7 +3045,7 @@ int dw_mci_probe(struct dw_mci *host)
|
||||
* Get the host data width - this assumes that HCON has been set with
|
||||
* the correct values.
|
||||
*/
|
||||
i = (mci_readl(host, HCON) >> 7) & 0x7;
|
||||
i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
|
||||
if (!i) {
|
||||
host->push_data = dw_mci_push_data16;
|
||||
host->pull_data = dw_mci_pull_data16;
|
||||
@ -2972,7 +3127,7 @@ int dw_mci_probe(struct dw_mci *host)
|
||||
if (host->pdata->num_slots)
|
||||
host->num_slots = host->pdata->num_slots;
|
||||
else
|
||||
host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1;
|
||||
host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
|
||||
|
||||
/*
|
||||
* Enable interrupts for command done, data over, data empty,
|
||||
@ -3067,6 +3222,9 @@ EXPORT_SYMBOL(dw_mci_remove);
|
||||
*/
|
||||
int dw_mci_suspend(struct dw_mci *host)
|
||||
{
|
||||
if (host->use_dma && host->dma_ops->exit)
|
||||
host->dma_ops->exit(host);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dw_mci_suspend);
|
||||
|
@ -148,6 +148,15 @@
|
||||
#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \
|
||||
((r) & 0xFFF) << 16 | \
|
||||
((t) & 0xFFF))
|
||||
/* HCON register defines */
|
||||
#define DMA_INTERFACE_IDMA (0x0)
|
||||
#define DMA_INTERFACE_DWDMA (0x1)
|
||||
#define DMA_INTERFACE_GDMA (0x2)
|
||||
#define DMA_INTERFACE_NODMA (0x3)
|
||||
#define SDMMC_GET_TRANS_MODE(x) (((x)>>16) & 0x3)
|
||||
#define SDMMC_GET_SLOT_NUM(x) ((((x)>>1) & 0x1F) + 1)
|
||||
#define SDMMC_GET_HDATA_WIDTH(x) (((x)>>7) & 0x7)
|
||||
#define SDMMC_GET_ADDR_CONFIG(x) (((x)>>27) & 0x1)
|
||||
/* Internal DMAC interrupt defines */
|
||||
#define SDMMC_IDMAC_INT_AI BIT(9)
|
||||
#define SDMMC_IDMAC_INT_NI BIT(8)
|
||||
@ -163,7 +172,7 @@
|
||||
/* Version ID register define */
|
||||
#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
|
||||
/* Card read threshold */
|
||||
#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x))
|
||||
#define SDMMC_SET_RD_THLD(v, x) (((v) & 0xFFF) << 16 | (x))
|
||||
#define SDMMC_UHS_18V BIT(0)
|
||||
/* All ctrl reset bits */
|
||||
#define SDMMC_CTRL_ALL_RESET_FLAGS \
|
||||
@ -281,7 +290,7 @@ struct dw_mci_drv_data {
|
||||
void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
|
||||
void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
|
||||
int (*parse_dt)(struct dw_mci *host);
|
||||
int (*execute_tuning)(struct dw_mci_slot *slot);
|
||||
int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
|
||||
int (*prepare_hs400_tuning)(struct dw_mci *host,
|
||||
struct mmc_ios *ios);
|
||||
int (*switch_voltage)(struct mmc_host *mmc,
|
||||
|
@ -1511,6 +1511,7 @@ static const struct of_device_id mmc_spi_of_match_table[] = {
|
||||
{ .compatible = "mmc-spi-slot", },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mmc_spi_of_match_table);
|
||||
|
||||
static struct spi_driver mmc_spi_driver = {
|
||||
.driver = {
|
||||
|
@ -711,6 +711,7 @@ static const struct of_device_id moxart_mmc_match[] = {
|
||||
{ .compatible = "faraday,ftsdc010" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, moxart_mmc_match);
|
||||
|
||||
static struct platform_driver moxart_mmc_driver = {
|
||||
.probe = moxart_probe,
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <linux/mmc/card.h>
|
||||
@ -64,6 +65,7 @@
|
||||
#define SDC_RESP2 0x48
|
||||
#define SDC_RESP3 0x4c
|
||||
#define SDC_BLK_NUM 0x50
|
||||
#define EMMC_IOCON 0x7c
|
||||
#define SDC_ACMD_RESP 0x80
|
||||
#define MSDC_DMA_SA 0x90
|
||||
#define MSDC_DMA_CTRL 0x98
|
||||
@ -71,6 +73,8 @@
|
||||
#define MSDC_PATCH_BIT 0xb0
|
||||
#define MSDC_PATCH_BIT1 0xb4
|
||||
#define MSDC_PAD_TUNE 0xec
|
||||
#define PAD_DS_TUNE 0x188
|
||||
#define EMMC50_CFG0 0x208
|
||||
|
||||
/*--------------------------------------------------------------------------*/
|
||||
/* Register Mask */
|
||||
@ -87,6 +91,7 @@
|
||||
#define MSDC_CFG_CKSTB (0x1 << 7) /* R */
|
||||
#define MSDC_CFG_CKDIV (0xff << 8) /* RW */
|
||||
#define MSDC_CFG_CKMOD (0x3 << 16) /* RW */
|
||||
#define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */
|
||||
|
||||
/* MSDC_IOCON mask */
|
||||
#define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */
|
||||
@ -204,6 +209,17 @@
|
||||
#define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */
|
||||
#define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */
|
||||
|
||||
#define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */
|
||||
#define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */
|
||||
|
||||
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
|
||||
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
|
||||
#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
|
||||
|
||||
#define EMMC50_CFG_PADCMD_LATCHCK (0x1 << 0) /* RW */
|
||||
#define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */
|
||||
#define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */
|
||||
|
||||
#define REQ_CMD_EIO (0x1 << 0)
|
||||
#define REQ_CMD_TMO (0x1 << 1)
|
||||
#define REQ_DAT_ERR (0x1 << 2)
|
||||
@ -219,6 +235,7 @@
|
||||
#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
|
||||
#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
|
||||
|
||||
#define PAD_DELAY_MAX 32 /* PAD delay cells */
|
||||
/*--------------------------------------------------------------------------*/
|
||||
/* Descriptor Structure */
|
||||
/*--------------------------------------------------------------------------*/
|
||||
@ -265,6 +282,14 @@ struct msdc_save_para {
|
||||
u32 pad_tune;
|
||||
u32 patch_bit0;
|
||||
u32 patch_bit1;
|
||||
u32 pad_ds_tune;
|
||||
u32 emmc50_cfg0;
|
||||
};
|
||||
|
||||
struct msdc_delay_phase {
|
||||
u8 maxlen;
|
||||
u8 start;
|
||||
u8 final_phase;
|
||||
};
|
||||
|
||||
struct msdc_host {
|
||||
@ -297,8 +322,9 @@ struct msdc_host {
|
||||
u32 mclk; /* mmc subsystem clock frequency */
|
||||
u32 src_clk_freq; /* source clock frequency */
|
||||
u32 sclk; /* SD/MS bus clock frequency */
|
||||
bool ddr;
|
||||
unsigned char timing;
|
||||
bool vqmmc_enabled;
|
||||
u32 hs400_ds_delay;
|
||||
struct msdc_save_para save_para; /* used when gate HCLK */
|
||||
};
|
||||
|
||||
@ -353,7 +379,10 @@ static void msdc_reset_hw(struct msdc_host *host)
|
||||
static void msdc_cmd_next(struct msdc_host *host,
|
||||
struct mmc_request *mrq, struct mmc_command *cmd);
|
||||
|
||||
static u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
|
||||
static const u32 cmd_ints_mask = MSDC_INTEN_CMDRDY | MSDC_INTEN_RSPCRCERR |
|
||||
MSDC_INTEN_CMDTMO | MSDC_INTEN_ACMDRDY |
|
||||
MSDC_INTEN_ACMDCRCERR | MSDC_INTEN_ACMDTMO;
|
||||
static const u32 data_ints_mask = MSDC_INTEN_XFER_COMPL | MSDC_INTEN_DATTMO |
|
||||
MSDC_INTEN_DATCRCERR | MSDC_INTEN_DMA_BDCSERR |
|
||||
MSDC_INTEN_DMA_GPDCSERR | MSDC_INTEN_DMA_PROTECT;
|
||||
|
||||
@ -485,7 +514,7 @@ static void msdc_ungate_clock(struct msdc_host *host)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
|
||||
static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
|
||||
{
|
||||
u32 mode;
|
||||
u32 flags;
|
||||
@ -501,8 +530,15 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
|
||||
|
||||
flags = readl(host->base + MSDC_INTEN);
|
||||
sdr_clr_bits(host->base + MSDC_INTEN, flags);
|
||||
if (ddr) { /* may need to modify later */
|
||||
mode = 0x2; /* ddr mode and use divisor */
|
||||
sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE);
|
||||
if (timing == MMC_TIMING_UHS_DDR50 ||
|
||||
timing == MMC_TIMING_MMC_DDR52 ||
|
||||
timing == MMC_TIMING_MMC_HS400) {
|
||||
if (timing == MMC_TIMING_MMC_HS400)
|
||||
mode = 0x3;
|
||||
else
|
||||
mode = 0x2; /* ddr mode and use divisor */
|
||||
|
||||
if (hz >= (host->src_clk_freq >> 2)) {
|
||||
div = 0; /* mean div = 1/4 */
|
||||
sclk = host->src_clk_freq >> 2; /* sclk = clk / 4 */
|
||||
@ -511,6 +547,14 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
|
||||
sclk = (host->src_clk_freq >> 2) / div;
|
||||
div = (div >> 1);
|
||||
}
|
||||
|
||||
if (timing == MMC_TIMING_MMC_HS400 &&
|
||||
hz >= (host->src_clk_freq >> 1)) {
|
||||
sdr_set_bits(host->base + MSDC_CFG,
|
||||
MSDC_CFG_HS400_CK_MODE);
|
||||
sclk = host->src_clk_freq >> 1;
|
||||
div = 0; /* div is ignore when bit18 is set */
|
||||
}
|
||||
} else if (hz >= host->src_clk_freq) {
|
||||
mode = 0x1; /* no divisor */
|
||||
div = 0;
|
||||
@ -532,12 +576,12 @@ static void msdc_set_mclk(struct msdc_host *host, int ddr, u32 hz)
|
||||
cpu_relax();
|
||||
host->sclk = sclk;
|
||||
host->mclk = hz;
|
||||
host->ddr = ddr;
|
||||
host->timing = timing;
|
||||
/* need because clk changed. */
|
||||
msdc_set_timeout(host, host->timeout_ns, host->timeout_clks);
|
||||
sdr_set_bits(host->base + MSDC_INTEN, flags);
|
||||
|
||||
dev_dbg(host->dev, "sclk: %d, ddr: %d\n", host->sclk, ddr);
|
||||
dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->sclk, timing);
|
||||
}
|
||||
|
||||
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
|
||||
@ -725,11 +769,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
|
||||
if (done)
|
||||
return true;
|
||||
|
||||
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY |
|
||||
MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO |
|
||||
MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR |
|
||||
MSDC_INTEN_ACMDTMO);
|
||||
writel(cmd->arg, host->base + SDC_ARG);
|
||||
sdr_clr_bits(host->base + MSDC_INTEN, cmd_ints_mask);
|
||||
|
||||
if (cmd->flags & MMC_RSP_PRESENT) {
|
||||
if (cmd->flags & MMC_RSP_136) {
|
||||
@ -819,10 +859,7 @@ static void msdc_start_command(struct msdc_host *host,
|
||||
rawcmd = msdc_cmd_prepare_raw_cmd(host, mrq, cmd);
|
||||
mod_delayed_work(system_wq, &host->req_timeout, DAT_TIMEOUT);
|
||||
|
||||
sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CMDRDY |
|
||||
MSDC_INTEN_RSPCRCERR | MSDC_INTEN_CMDTMO |
|
||||
MSDC_INTEN_ACMDRDY | MSDC_INTEN_ACMDCRCERR |
|
||||
MSDC_INTEN_ACMDTMO);
|
||||
sdr_set_bits(host->base + MSDC_INTEN, cmd_ints_mask);
|
||||
writel(cmd->arg, host->base + SDC_ARG);
|
||||
writel(rawcmd, host->base + SDC_CMD);
|
||||
}
|
||||
@ -896,7 +933,7 @@ static void msdc_data_xfer_next(struct msdc_host *host,
|
||||
struct mmc_request *mrq, struct mmc_data *data)
|
||||
{
|
||||
if (mmc_op_multi(mrq->cmd->opcode) && mrq->stop && !mrq->stop->error &&
|
||||
(!data->bytes_xfered || !mrq->sbc))
|
||||
!mrq->sbc)
|
||||
msdc_start_command(host, mrq, mrq->stop);
|
||||
else
|
||||
msdc_request_done(host, mrq);
|
||||
@ -942,6 +979,8 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events,
|
||||
|
||||
if (events & MSDC_INT_DATTMO)
|
||||
data->error = -ETIMEDOUT;
|
||||
else if (events & MSDC_INT_DATCRCERR)
|
||||
data->error = -EILSEQ;
|
||||
|
||||
dev_err(host->dev, "%s: cmd=%d; blocks=%d",
|
||||
__func__, mrq->cmd->opcode, data->blocks);
|
||||
@ -1113,10 +1152,12 @@ static void msdc_init_hw(struct msdc_host *host)
|
||||
|
||||
writel(0, host->base + MSDC_PAD_TUNE);
|
||||
writel(0, host->base + MSDC_IOCON);
|
||||
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 1);
|
||||
writel(0x403c004f, host->base + MSDC_PATCH_BIT);
|
||||
sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0);
|
||||
writel(0x403c0046, host->base + MSDC_PATCH_BIT);
|
||||
sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1);
|
||||
writel(0xffff0089, host->base + MSDC_PATCH_BIT1);
|
||||
sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL);
|
||||
|
||||
/* Configure to enable SDIO mode.
|
||||
* it's must otherwise sdio cmd5 failed
|
||||
*/
|
||||
@ -1148,11 +1189,14 @@ static void msdc_init_gpd_bd(struct msdc_host *host, struct msdc_dma *dma)
|
||||
struct mt_bdma_desc *bd = dma->bd;
|
||||
int i;
|
||||
|
||||
memset(gpd, 0, sizeof(struct mt_gpdma_desc));
|
||||
memset(gpd, 0, sizeof(struct mt_gpdma_desc) * 2);
|
||||
|
||||
gpd->gpd_info = GPDMA_DESC_BDP; /* hwo, cs, bd pointer */
|
||||
gpd->ptr = (u32)dma->bd_addr; /* physical address */
|
||||
|
||||
/* gpd->next is must set for desc DMA
|
||||
* That's why must alloc 2 gpd structure.
|
||||
*/
|
||||
gpd->next = (u32)dma->gpd_addr + sizeof(struct mt_gpdma_desc);
|
||||
memset(bd, 0, sizeof(struct mt_bdma_desc) * MAX_BD_NUM);
|
||||
for (i = 0; i < (MAX_BD_NUM - 1); i++)
|
||||
bd[i].next = (u32)dma->bd_addr + sizeof(*bd) * (i + 1);
|
||||
@ -1162,20 +1206,16 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
int ret;
|
||||
u32 ddr = 0;
|
||||
|
||||
pm_runtime_get_sync(host->dev);
|
||||
|
||||
if (ios->timing == MMC_TIMING_UHS_DDR50 ||
|
||||
ios->timing == MMC_TIMING_MMC_DDR52)
|
||||
ddr = 1;
|
||||
|
||||
msdc_set_buswidth(host, ios->bus_width);
|
||||
|
||||
/* Suspend/Resume will do power off/on */
|
||||
switch (ios->power_mode) {
|
||||
case MMC_POWER_UP:
|
||||
if (!IS_ERR(mmc->supply.vmmc)) {
|
||||
msdc_init_hw(host);
|
||||
ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
|
||||
ios->vdd);
|
||||
if (ret) {
|
||||
@ -1206,14 +1246,207 @@ static void msdc_ops_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
break;
|
||||
}
|
||||
|
||||
if (host->mclk != ios->clock || host->ddr != ddr)
|
||||
msdc_set_mclk(host, ddr, ios->clock);
|
||||
if (host->mclk != ios->clock || host->timing != ios->timing)
|
||||
msdc_set_mclk(host, ios->timing, ios->clock);
|
||||
|
||||
end:
|
||||
pm_runtime_mark_last_busy(host->dev);
|
||||
pm_runtime_put_autosuspend(host->dev);
|
||||
}
|
||||
|
||||
static u32 test_delay_bit(u32 delay, u32 bit)
|
||||
{
|
||||
bit %= PAD_DELAY_MAX;
|
||||
return delay & (1 << bit);
|
||||
}
|
||||
|
||||
static int get_delay_len(u32 delay, u32 start_bit)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < (PAD_DELAY_MAX - start_bit); i++) {
|
||||
if (test_delay_bit(delay, start_bit + i) == 0)
|
||||
return i;
|
||||
}
|
||||
return PAD_DELAY_MAX - start_bit;
|
||||
}
|
||||
|
||||
static struct msdc_delay_phase get_best_delay(struct msdc_host *host, u32 delay)
|
||||
{
|
||||
int start = 0, len = 0;
|
||||
int start_final = 0, len_final = 0;
|
||||
u8 final_phase = 0xff;
|
||||
struct msdc_delay_phase delay_phase;
|
||||
|
||||
if (delay == 0) {
|
||||
dev_err(host->dev, "phase error: [map:%x]\n", delay);
|
||||
delay_phase.final_phase = final_phase;
|
||||
return delay_phase;
|
||||
}
|
||||
|
||||
while (start < PAD_DELAY_MAX) {
|
||||
len = get_delay_len(delay, start);
|
||||
if (len_final < len) {
|
||||
start_final = start;
|
||||
len_final = len;
|
||||
}
|
||||
start += len ? len : 1;
|
||||
if (len >= 8 && start_final < 4)
|
||||
break;
|
||||
}
|
||||
|
||||
/* The rule is that to find the smallest delay cell */
|
||||
if (start_final == 0)
|
||||
final_phase = (start_final + len_final / 3) % PAD_DELAY_MAX;
|
||||
else
|
||||
final_phase = (start_final + len_final / 2) % PAD_DELAY_MAX;
|
||||
dev_info(host->dev, "phase: [map:%x] [maxlen:%d] [final:%d]\n",
|
||||
delay, len_final, final_phase);
|
||||
|
||||
delay_phase.maxlen = len_final;
|
||||
delay_phase.start = start_final;
|
||||
delay_phase.final_phase = final_phase;
|
||||
return delay_phase;
|
||||
}
|
||||
|
||||
static int msdc_tune_response(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
u32 rise_delay = 0, fall_delay = 0;
|
||||
struct msdc_delay_phase final_rise_delay, final_fall_delay;
|
||||
u8 final_delay, final_maxlen;
|
||||
int cmd_err;
|
||||
int i;
|
||||
|
||||
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
|
||||
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE,
|
||||
MSDC_PAD_TUNE_CMDRDLY, i);
|
||||
mmc_send_tuning(mmc, opcode, &cmd_err);
|
||||
if (!cmd_err)
|
||||
rise_delay |= (1 << i);
|
||||
}
|
||||
|
||||
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
|
||||
for (i = 0; i < PAD_DELAY_MAX; i++) {
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE,
|
||||
MSDC_PAD_TUNE_CMDRDLY, i);
|
||||
mmc_send_tuning(mmc, opcode, &cmd_err);
|
||||
if (!cmd_err)
|
||||
fall_delay |= (1 << i);
|
||||
}
|
||||
|
||||
final_rise_delay = get_best_delay(host, rise_delay);
|
||||
final_fall_delay = get_best_delay(host, fall_delay);
|
||||
|
||||
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
|
||||
if (final_maxlen == final_rise_delay.maxlen) {
|
||||
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
|
||||
final_rise_delay.final_phase);
|
||||
final_delay = final_rise_delay.final_phase;
|
||||
} else {
|
||||
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL);
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY,
|
||||
final_fall_delay.final_phase);
|
||||
final_delay = final_fall_delay.final_phase;
|
||||
}
|
||||
|
||||
return final_delay == 0xff ? -EIO : 0;
|
||||
}
|
||||
|
||||
static int msdc_tune_data(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
u32 rise_delay = 0, fall_delay = 0;
|
||||
struct msdc_delay_phase final_rise_delay, final_fall_delay;
|
||||
u8 final_delay, final_maxlen;
|
||||
int i, ret;
|
||||
|
||||
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
|
||||
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
|
||||
for (i = 0 ; i < PAD_DELAY_MAX; i++) {
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE,
|
||||
MSDC_PAD_TUNE_DATRRDLY, i);
|
||||
ret = mmc_send_tuning(mmc, opcode, NULL);
|
||||
if (!ret)
|
||||
rise_delay |= (1 << i);
|
||||
}
|
||||
|
||||
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
|
||||
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
|
||||
for (i = 0; i < PAD_DELAY_MAX; i++) {
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE,
|
||||
MSDC_PAD_TUNE_DATRRDLY, i);
|
||||
ret = mmc_send_tuning(mmc, opcode, NULL);
|
||||
if (!ret)
|
||||
fall_delay |= (1 << i);
|
||||
}
|
||||
|
||||
final_rise_delay = get_best_delay(host, rise_delay);
|
||||
final_fall_delay = get_best_delay(host, fall_delay);
|
||||
|
||||
final_maxlen = max(final_rise_delay.maxlen, final_fall_delay.maxlen);
|
||||
/* Rising edge is more stable, prefer to use it */
|
||||
if (final_rise_delay.maxlen >= 10)
|
||||
final_maxlen = final_rise_delay.maxlen;
|
||||
if (final_maxlen == final_rise_delay.maxlen) {
|
||||
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
|
||||
sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE,
|
||||
MSDC_PAD_TUNE_DATRRDLY,
|
||||
final_rise_delay.final_phase);
|
||||
final_delay = final_rise_delay.final_phase;
|
||||
} else {
|
||||
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL);
|
||||
sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL);
|
||||
sdr_set_field(host->base + MSDC_PAD_TUNE,
|
||||
MSDC_PAD_TUNE_DATRRDLY,
|
||||
final_fall_delay.final_phase);
|
||||
final_delay = final_fall_delay.final_phase;
|
||||
}
|
||||
|
||||
return final_delay == 0xff ? -EIO : 0;
|
||||
}
|
||||
|
||||
static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
int ret;
|
||||
|
||||
pm_runtime_get_sync(host->dev);
|
||||
ret = msdc_tune_response(mmc, opcode);
|
||||
if (ret == -EIO) {
|
||||
dev_err(host->dev, "Tune response fail!\n");
|
||||
goto out;
|
||||
}
|
||||
ret = msdc_tune_data(mmc, opcode);
|
||||
if (ret == -EIO)
|
||||
dev_err(host->dev, "Tune data fail!\n");
|
||||
|
||||
out:
|
||||
pm_runtime_mark_last_busy(host->dev);
|
||||
pm_runtime_put_autosuspend(host->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
|
||||
writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void msdc_hw_reset(struct mmc_host *mmc)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
|
||||
sdr_set_bits(host->base + EMMC_IOCON, 1);
|
||||
udelay(10); /* 10us is enough */
|
||||
sdr_clr_bits(host->base + EMMC_IOCON, 1);
|
||||
}
|
||||
|
||||
static struct mmc_host_ops mt_msdc_ops = {
|
||||
.post_req = msdc_post_req,
|
||||
.pre_req = msdc_pre_req,
|
||||
@ -1221,6 +1454,9 @@ static struct mmc_host_ops mt_msdc_ops = {
|
||||
.set_ios = msdc_ops_set_ios,
|
||||
.start_signal_voltage_switch = msdc_ops_switch_volt,
|
||||
.card_busy = msdc_card_busy,
|
||||
.execute_tuning = msdc_execute_tuning,
|
||||
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
|
||||
.hw_reset = msdc_hw_reset,
|
||||
};
|
||||
|
||||
static int msdc_drv_probe(struct platform_device *pdev)
|
||||
@ -1294,6 +1530,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
||||
goto host_free;
|
||||
}
|
||||
|
||||
if (!of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
|
||||
&host->hs400_ds_delay))
|
||||
dev_dbg(&pdev->dev, "hs400-ds-delay: %x\n",
|
||||
host->hs400_ds_delay);
|
||||
|
||||
host->dev = &pdev->dev;
|
||||
host->mmc = mmc;
|
||||
host->src_clk_freq = clk_get_rate(host->src_clk);
|
||||
@ -1302,6 +1543,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
||||
mmc->f_min = host->src_clk_freq / (4 * 255);
|
||||
|
||||
mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
|
||||
mmc->caps |= MMC_CAP_RUNTIME_RESUME;
|
||||
/* MMC core transfer sizes tunable parameters */
|
||||
mmc->max_segs = MAX_BD_NUM;
|
||||
mmc->max_seg_size = BDMA_DESC_BUFLEN;
|
||||
@ -1313,7 +1555,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
||||
|
||||
host->timeout_clks = 3 * 1048576;
|
||||
host->dma.gpd = dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(struct mt_gpdma_desc),
|
||||
2 * sizeof(struct mt_gpdma_desc),
|
||||
&host->dma.gpd_addr, GFP_KERNEL);
|
||||
host->dma.bd = dma_alloc_coherent(&pdev->dev,
|
||||
MAX_BD_NUM * sizeof(struct mt_bdma_desc),
|
||||
@ -1354,7 +1596,7 @@ release:
|
||||
release_mem:
|
||||
if (host->dma.gpd)
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(struct mt_gpdma_desc),
|
||||
2 * sizeof(struct mt_gpdma_desc),
|
||||
host->dma.gpd, host->dma.gpd_addr);
|
||||
if (host->dma.bd)
|
||||
dma_free_coherent(&pdev->dev,
|
||||
@ -1403,6 +1645,8 @@ static void msdc_save_reg(struct msdc_host *host)
|
||||
host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE);
|
||||
host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT);
|
||||
host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1);
|
||||
host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE);
|
||||
host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0);
|
||||
}
|
||||
|
||||
static void msdc_restore_reg(struct msdc_host *host)
|
||||
@ -1413,6 +1657,8 @@ static void msdc_restore_reg(struct msdc_host *host)
|
||||
writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE);
|
||||
writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT);
|
||||
writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1);
|
||||
writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE);
|
||||
writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0);
|
||||
}
|
||||
|
||||
static int msdc_runtime_suspend(struct device *dev)
|
||||
|
@ -1490,6 +1490,7 @@ static const struct of_device_id mmc_omap_match[] = {
|
||||
{ .compatible = "ti,omap2420-mmc", },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mmc_omap_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver mmc_omap_driver = {
|
||||
|
@ -207,7 +207,9 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = {
|
||||
.caps2 = MMC_CAP2_HC_ERASE_SZ,
|
||||
.flags = SDHCI_ACPI_RUNTIME_PM,
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC,
|
||||
.quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
|
||||
SDHCI_QUIRK2_STOP_WITH_TC |
|
||||
SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
|
||||
.probe_slot = sdhci_acpi_emmc_probe_slot,
|
||||
};
|
||||
|
||||
@ -239,6 +241,9 @@ struct sdhci_acpi_uid_slot {
|
||||
};
|
||||
|
||||
static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
|
||||
{ "80865ACA", NULL, &sdhci_acpi_slot_int_sd },
|
||||
{ "80865ACC", NULL, &sdhci_acpi_slot_int_emmc },
|
||||
{ "80865AD0", NULL, &sdhci_acpi_slot_int_sdio },
|
||||
{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc },
|
||||
{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd },
|
||||
{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd },
|
||||
@ -247,11 +252,15 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
|
||||
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
|
||||
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
|
||||
{ "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
|
||||
{ "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd },
|
||||
{ "PNP0D40" },
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct acpi_device_id sdhci_acpi_ids[] = {
|
||||
{ "80865ACA" },
|
||||
{ "80865ACC" },
|
||||
{ "80865AD0" },
|
||||
{ "80860F14" },
|
||||
{ "80860F16" },
|
||||
{ "INT33BB" },
|
||||
|
@ -273,7 +273,7 @@ static int sdhci_bcm_kona_probe(struct platform_device *pdev)
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
|
||||
|
||||
dev_dbg(dev, "is_8bit=%c\n",
|
||||
(host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
|
||||
(host->mmc->caps & MMC_CAP_8_BIT_DATA) ? 'Y' : 'N');
|
||||
|
||||
ret = sdhci_bcm_kona_sd_reset(host);
|
||||
if (ret)
|
||||
|
@ -759,7 +759,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
|
||||
min = ESDHC_TUNE_CTRL_MIN;
|
||||
while (min < ESDHC_TUNE_CTRL_MAX) {
|
||||
esdhc_prepare_tuning(host, min);
|
||||
if (!mmc_send_tuning(host->mmc))
|
||||
if (!mmc_send_tuning(host->mmc, opcode, NULL))
|
||||
break;
|
||||
min += ESDHC_TUNE_CTRL_STEP;
|
||||
}
|
||||
@ -768,7 +768,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
|
||||
max = min + ESDHC_TUNE_CTRL_STEP;
|
||||
while (max < ESDHC_TUNE_CTRL_MAX) {
|
||||
esdhc_prepare_tuning(host, max);
|
||||
if (mmc_send_tuning(host->mmc)) {
|
||||
if (mmc_send_tuning(host->mmc, opcode, NULL)) {
|
||||
max -= ESDHC_TUNE_CTRL_STEP;
|
||||
break;
|
||||
}
|
||||
@ -778,7 +778,7 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode)
|
||||
/* use average delay to get the best timing */
|
||||
avg = (min + max) / 2;
|
||||
esdhc_prepare_tuning(host, avg);
|
||||
ret = mmc_send_tuning(host->mmc);
|
||||
ret = mmc_send_tuning(host->mmc, opcode, NULL);
|
||||
esdhc_post_tuning(host);
|
||||
|
||||
dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n",
|
||||
|
@ -24,6 +24,8 @@
|
||||
SDHCI_QUIRK_PIO_NEEDS_DELAY | \
|
||||
SDHCI_QUIRK_NO_HISPD_BIT)
|
||||
|
||||
#define ESDHC_PROCTL 0x28
|
||||
|
||||
#define ESDHC_SYSTEM_CONTROL 0x2c
|
||||
#define ESDHC_CLOCK_MASK 0x0000fff0
|
||||
#define ESDHC_PREDIV_SHIFT 8
|
||||
|
@ -373,7 +373,7 @@ retry:
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = mmc_send_tuning(mmc);
|
||||
rc = mmc_send_tuning(mmc, opcode, NULL);
|
||||
if (!rc) {
|
||||
/* Tuning is successful at this tuning point */
|
||||
tuned_phases[tuned_phase_cnt++] = phase;
|
||||
|
@ -111,7 +111,6 @@ static int sdhci_at91_probe(struct platform_device *pdev)
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "failed to set gck");
|
||||
goto hclock_disable_unprepare;
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* We need to check if we have the requested rate for gck because in
|
||||
|
@ -24,122 +24,324 @@
|
||||
|
||||
#define VENDOR_V_22 0x12
|
||||
#define VENDOR_V_23 0x13
|
||||
static u32 esdhc_readl(struct sdhci_host *host, int reg)
|
||||
|
||||
struct sdhci_esdhc {
|
||||
u8 vendor_ver;
|
||||
u8 spec_ver;
|
||||
};
|
||||
|
||||
/**
|
||||
* esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register
|
||||
* to make it compatible with SD spec.
|
||||
*
|
||||
* @host: pointer to sdhci_host
|
||||
* @spec_reg: SD spec register address
|
||||
* @value: 32bit eSDHC register value on spec_reg address
|
||||
*
|
||||
* In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
|
||||
* registers are 32 bits. There are differences in register size, register
|
||||
* address, register function, bit position and function between eSDHC spec
|
||||
* and SD spec.
|
||||
*
|
||||
* Return a fixed up register value
|
||||
*/
|
||||
static u32 esdhc_readl_fixup(struct sdhci_host *host,
|
||||
int spec_reg, u32 value)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = pltfm_host->priv;
|
||||
u32 ret;
|
||||
|
||||
ret = in_be32(host->ioaddr + reg);
|
||||
/*
|
||||
* The bit of ADMA flag in eSDHC is not compatible with standard
|
||||
* SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is
|
||||
* supported by eSDHC.
|
||||
* And for many FSL eSDHC controller, the reset value of field
|
||||
* SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA,
|
||||
* SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA,
|
||||
* only these vendor version is greater than 2.2/0x12 support ADMA.
|
||||
* For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the
|
||||
* the verdor version number, oxFE is SDHCI_HOST_VERSION.
|
||||
*/
|
||||
if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) {
|
||||
u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
|
||||
tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
|
||||
if (tmp > VENDOR_V_22)
|
||||
ret |= SDHCI_CAN_DO_ADMA2;
|
||||
if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) {
|
||||
if (esdhc->vendor_ver > VENDOR_V_22) {
|
||||
ret = value | SDHCI_CAN_DO_ADMA2;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = value;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u16 esdhc_readw(struct sdhci_host *host, int reg)
|
||||
static u16 esdhc_readw_fixup(struct sdhci_host *host,
|
||||
int spec_reg, u32 value)
|
||||
{
|
||||
u16 ret;
|
||||
int base = reg & ~0x3;
|
||||
int shift = (reg & 0x2) * 8;
|
||||
int shift = (spec_reg & 0x2) * 8;
|
||||
|
||||
if (unlikely(reg == SDHCI_HOST_VERSION))
|
||||
ret = in_be32(host->ioaddr + base) & 0xffff;
|
||||
if (spec_reg == SDHCI_HOST_VERSION)
|
||||
ret = value & 0xffff;
|
||||
else
|
||||
ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;
|
||||
ret = (value >> shift) & 0xffff;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 esdhc_readb(struct sdhci_host *host, int reg)
|
||||
static u8 esdhc_readb_fixup(struct sdhci_host *host,
|
||||
int spec_reg, u32 value)
|
||||
{
|
||||
int base = reg & ~0x3;
|
||||
int shift = (reg & 0x3) * 8;
|
||||
u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
|
||||
u8 ret;
|
||||
u8 dma_bits;
|
||||
int shift = (spec_reg & 0x3) * 8;
|
||||
|
||||
ret = (value >> shift) & 0xff;
|
||||
|
||||
/*
|
||||
* "DMA select" locates at offset 0x28 in SD specification, but on
|
||||
* P5020 or P3041, it locates at 0x29.
|
||||
*/
|
||||
if (reg == SDHCI_HOST_CONTROL) {
|
||||
u32 dma_bits;
|
||||
|
||||
dma_bits = in_be32(host->ioaddr + reg);
|
||||
if (spec_reg == SDHCI_HOST_CONTROL) {
|
||||
/* DMA select is 22,23 bits in Protocol Control Register */
|
||||
dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
|
||||
|
||||
dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK;
|
||||
/* fixup the result */
|
||||
ret &= ~SDHCI_CTRL_DMA_MASK;
|
||||
ret |= dma_bits;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* esdhc_write*_fixup - Fixup the SD spec register value so that it could be
|
||||
* written into eSDHC register.
|
||||
*
|
||||
* @host: pointer to sdhci_host
|
||||
* @spec_reg: SD spec register address
|
||||
* @value: 8/16/32bit SD spec register value that would be written
|
||||
* @old_value: 32bit eSDHC register value on spec_reg address
|
||||
*
|
||||
* In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC
|
||||
* registers are 32 bits. There are differences in register size, register
|
||||
* address, register function, bit position and function between eSDHC spec
|
||||
* and SD spec.
|
||||
*
|
||||
* Return a fixed up register value
|
||||
*/
|
||||
static u32 esdhc_writel_fixup(struct sdhci_host *host,
|
||||
int spec_reg, u32 value, u32 old_value)
|
||||
{
|
||||
u32 ret;
|
||||
|
||||
/*
|
||||
* Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
|
||||
* when SYSCTL[RSTD] is set for some special operations.
|
||||
* No any impact on other operation.
|
||||
*/
|
||||
if (spec_reg == SDHCI_INT_ENABLE)
|
||||
ret = value | SDHCI_INT_BLK_GAP;
|
||||
else
|
||||
ret = value;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esdhc_writel(struct sdhci_host *host, u32 val, int reg)
|
||||
static u32 esdhc_writew_fixup(struct sdhci_host *host,
|
||||
int spec_reg, u16 value, u32 old_value)
|
||||
{
|
||||
/*
|
||||
* Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE]
|
||||
* when SYSCTL[RSTD]) is set for some special operations.
|
||||
* No any impact other operation.
|
||||
*/
|
||||
if (reg == SDHCI_INT_ENABLE)
|
||||
val |= SDHCI_INT_BLK_GAP;
|
||||
sdhci_be32bs_writel(host, val, reg);
|
||||
}
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
int shift = (spec_reg & 0x2) * 8;
|
||||
u32 ret;
|
||||
|
||||
static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
{
|
||||
if (reg == SDHCI_BLOCK_SIZE) {
|
||||
switch (spec_reg) {
|
||||
case SDHCI_TRANSFER_MODE:
|
||||
/*
|
||||
* Postpone this write, we must do it together with a
|
||||
* command write that is down below. Return old value.
|
||||
*/
|
||||
pltfm_host->xfer_mode_shadow = value;
|
||||
return old_value;
|
||||
case SDHCI_COMMAND:
|
||||
ret = (value << 16) | pltfm_host->xfer_mode_shadow;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = old_value & (~(0xffff << shift));
|
||||
ret |= (value << shift);
|
||||
|
||||
if (spec_reg == SDHCI_BLOCK_SIZE) {
|
||||
/*
|
||||
* Two last DMA bits are reserved, and first one is used for
|
||||
* non-standard blksz of 4096 bytes that we don't support
|
||||
* yet. So clear the DMA boundary bits.
|
||||
*/
|
||||
val &= ~SDHCI_MAKE_BLKSZ(0x7, 0);
|
||||
ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0));
|
||||
}
|
||||
sdhci_be32bs_writew(host, val, reg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
|
||||
static u32 esdhc_writeb_fixup(struct sdhci_host *host,
|
||||
int spec_reg, u8 value, u32 old_value)
|
||||
{
|
||||
u32 ret;
|
||||
u32 dma_bits;
|
||||
u8 tmp;
|
||||
int shift = (spec_reg & 0x3) * 8;
|
||||
|
||||
/*
|
||||
* eSDHC doesn't have a standard power control register, so we do
|
||||
* nothing here to avoid incorrect operation.
|
||||
*/
|
||||
if (spec_reg == SDHCI_POWER_CONTROL)
|
||||
return old_value;
|
||||
/*
|
||||
* "DMA select" location is offset 0x28 in SD specification, but on
|
||||
* P5020 or P3041, it's located at 0x29.
|
||||
*/
|
||||
if (reg == SDHCI_HOST_CONTROL) {
|
||||
u32 dma_bits;
|
||||
|
||||
if (spec_reg == SDHCI_HOST_CONTROL) {
|
||||
/*
|
||||
* If host control register is not standard, exit
|
||||
* this function
|
||||
*/
|
||||
if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL)
|
||||
return;
|
||||
return old_value;
|
||||
|
||||
/* DMA select is 22,23 bits in Protocol Control Register */
|
||||
dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
|
||||
clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
|
||||
dma_bits);
|
||||
val &= ~SDHCI_CTRL_DMA_MASK;
|
||||
val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
|
||||
dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5;
|
||||
ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits;
|
||||
tmp = (value & (~SDHCI_CTRL_DMA_MASK)) |
|
||||
(old_value & SDHCI_CTRL_DMA_MASK);
|
||||
ret = (ret & (~0xff)) | tmp;
|
||||
|
||||
/* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */
|
||||
ret &= ~ESDHC_HOST_CONTROL_RES;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
|
||||
if (reg == SDHCI_HOST_CONTROL)
|
||||
val &= ~ESDHC_HOST_CONTROL_RES;
|
||||
sdhci_be32bs_writeb(host, val, reg);
|
||||
ret = (old_value & (~(0xff << shift))) | (value << shift);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 esdhc_be_readl(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u32 ret;
|
||||
u32 value;
|
||||
|
||||
value = ioread32be(host->ioaddr + reg);
|
||||
ret = esdhc_readl_fixup(host, reg, value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32 esdhc_le_readl(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u32 ret;
|
||||
u32 value;
|
||||
|
||||
value = ioread32(host->ioaddr + reg);
|
||||
ret = esdhc_readl_fixup(host, reg, value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u16 esdhc_be_readw(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u16 ret;
|
||||
u32 value;
|
||||
int base = reg & ~0x3;
|
||||
|
||||
value = ioread32be(host->ioaddr + base);
|
||||
ret = esdhc_readw_fixup(host, reg, value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u16 esdhc_le_readw(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u16 ret;
|
||||
u32 value;
|
||||
int base = reg & ~0x3;
|
||||
|
||||
value = ioread32(host->ioaddr + base);
|
||||
ret = esdhc_readw_fixup(host, reg, value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 esdhc_be_readb(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u8 ret;
|
||||
u32 value;
|
||||
int base = reg & ~0x3;
|
||||
|
||||
value = ioread32be(host->ioaddr + base);
|
||||
ret = esdhc_readb_fixup(host, reg, value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u8 esdhc_le_readb(struct sdhci_host *host, int reg)
|
||||
{
|
||||
u8 ret;
|
||||
u32 value;
|
||||
int base = reg & ~0x3;
|
||||
|
||||
value = ioread32(host->ioaddr + base);
|
||||
ret = esdhc_readb_fixup(host, reg, value);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
value = esdhc_writel_fixup(host, reg, val, 0);
|
||||
iowrite32be(value, host->ioaddr + reg);
|
||||
}
|
||||
|
||||
static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
value = esdhc_writel_fixup(host, reg, val, 0);
|
||||
iowrite32(value, host->ioaddr + reg);
|
||||
}
|
||||
|
||||
static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
{
|
||||
int base = reg & ~0x3;
|
||||
u32 value;
|
||||
u32 ret;
|
||||
|
||||
value = ioread32be(host->ioaddr + base);
|
||||
ret = esdhc_writew_fixup(host, reg, val, value);
|
||||
if (reg != SDHCI_TRANSFER_MODE)
|
||||
iowrite32be(ret, host->ioaddr + base);
|
||||
}
|
||||
|
||||
static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg)
|
||||
{
|
||||
int base = reg & ~0x3;
|
||||
u32 value;
|
||||
u32 ret;
|
||||
|
||||
value = ioread32(host->ioaddr + base);
|
||||
ret = esdhc_writew_fixup(host, reg, val, value);
|
||||
if (reg != SDHCI_TRANSFER_MODE)
|
||||
iowrite32(ret, host->ioaddr + base);
|
||||
}
|
||||
|
||||
static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg)
|
||||
{
|
||||
int base = reg & ~0x3;
|
||||
u32 value;
|
||||
u32 ret;
|
||||
|
||||
value = ioread32be(host->ioaddr + base);
|
||||
ret = esdhc_writeb_fixup(host, reg, val, value);
|
||||
iowrite32be(ret, host->ioaddr + base);
|
||||
}
|
||||
|
||||
static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg)
|
||||
{
|
||||
int base = reg & ~0x3;
|
||||
u32 value;
|
||||
u32 ret;
|
||||
|
||||
value = ioread32(host->ioaddr + base);
|
||||
ret = esdhc_writeb_fixup(host, reg, val, value);
|
||||
iowrite32(ret, host->ioaddr + base);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -149,19 +351,17 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
|
||||
* For Continue, apply soft reset for data(SYSCTL[RSTD]);
|
||||
* and re-issue the entire read transaction from beginning.
|
||||
*/
|
||||
static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
|
||||
static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
|
||||
{
|
||||
u32 tmp;
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = pltfm_host->priv;
|
||||
bool applicable;
|
||||
dma_addr_t dmastart;
|
||||
dma_addr_t dmanow;
|
||||
|
||||
tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
|
||||
tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
|
||||
|
||||
applicable = (intmask & SDHCI_INT_DATA_END) &&
|
||||
(intmask & SDHCI_INT_BLK_GAP) &&
|
||||
(tmp == VENDOR_V_23);
|
||||
(intmask & SDHCI_INT_BLK_GAP) &&
|
||||
(esdhc->vendor_ver == VENDOR_V_23);
|
||||
if (!applicable)
|
||||
return;
|
||||
|
||||
@ -179,7 +379,11 @@ static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask)
|
||||
|
||||
static int esdhc_of_enable_dma(struct sdhci_host *host)
|
||||
{
|
||||
setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP);
|
||||
u32 value;
|
||||
|
||||
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
|
||||
value |= ESDHC_DMA_SNOOP;
|
||||
sdhci_writel(host, value, ESDHC_DMA_SYSCTL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -199,6 +403,8 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)
|
||||
|
||||
static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_esdhc *esdhc = pltfm_host->priv;
|
||||
int pre_div = 1;
|
||||
int div = 1;
|
||||
u32 temp;
|
||||
@ -209,9 +415,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
return;
|
||||
|
||||
/* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */
|
||||
temp = esdhc_readw(host, SDHCI_HOST_VERSION);
|
||||
temp = (temp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
|
||||
if (temp < VENDOR_V_23)
|
||||
if (esdhc->vendor_ver < VENDOR_V_23)
|
||||
pre_div = 2;
|
||||
|
||||
/* Workaround to reduce the clock frequency for p1010 esdhc */
|
||||
@ -247,39 +451,26 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
mdelay(1);
|
||||
}
|
||||
|
||||
static void esdhc_of_platform_init(struct sdhci_host *host)
|
||||
{
|
||||
u32 vvn;
|
||||
|
||||
vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS);
|
||||
vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT;
|
||||
if (vvn == VENDOR_V_22)
|
||||
host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
|
||||
|
||||
if (vvn > VENDOR_V_22)
|
||||
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
}
|
||||
|
||||
static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width)
|
||||
{
|
||||
u32 ctrl;
|
||||
|
||||
ctrl = sdhci_readl(host, ESDHC_PROCTL);
|
||||
ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK);
|
||||
switch (width) {
|
||||
case MMC_BUS_WIDTH_8:
|
||||
ctrl = ESDHC_CTRL_8BITBUS;
|
||||
ctrl |= ESDHC_CTRL_8BITBUS;
|
||||
break;
|
||||
|
||||
case MMC_BUS_WIDTH_4:
|
||||
ctrl = ESDHC_CTRL_4BITBUS;
|
||||
ctrl |= ESDHC_CTRL_4BITBUS;
|
||||
break;
|
||||
|
||||
default:
|
||||
ctrl = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL,
|
||||
ESDHC_CTRL_BUSWIDTH_MASK, ctrl);
|
||||
sdhci_writel(host, ctrl, ESDHC_PROCTL);
|
||||
}
|
||||
|
||||
static void esdhc_reset(struct sdhci_host *host, u8 mask)
|
||||
@ -290,32 +481,13 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
|
||||
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
|
||||
}
|
||||
|
||||
static const struct sdhci_ops sdhci_esdhc_ops = {
|
||||
.read_l = esdhc_readl,
|
||||
.read_w = esdhc_readw,
|
||||
.read_b = esdhc_readb,
|
||||
.write_l = esdhc_writel,
|
||||
.write_w = esdhc_writew,
|
||||
.write_b = esdhc_writeb,
|
||||
.set_clock = esdhc_of_set_clock,
|
||||
.enable_dma = esdhc_of_enable_dma,
|
||||
.get_max_clock = esdhc_of_get_max_clock,
|
||||
.get_min_clock = esdhc_of_get_min_clock,
|
||||
.platform_init = esdhc_of_platform_init,
|
||||
.adma_workaround = esdhci_of_adma_workaround,
|
||||
.set_bus_width = esdhc_pltfm_set_bus_width,
|
||||
.reset = esdhc_reset,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static u32 esdhc_proctl;
|
||||
static int esdhc_of_suspend(struct device *dev)
|
||||
{
|
||||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
|
||||
esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL);
|
||||
esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL);
|
||||
|
||||
return sdhci_suspend_host(host);
|
||||
}
|
||||
@ -328,9 +500,8 @@ static int esdhc_of_resume(struct device *dev)
|
||||
if (ret == 0) {
|
||||
/* Isn't this already done by sdhci_resume_host() ? --rmk */
|
||||
esdhc_of_enable_dma(host);
|
||||
sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
|
||||
sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -343,37 +514,103 @@ static const struct dev_pm_ops esdhc_pmops = {
|
||||
#define ESDHC_PMOPS NULL
|
||||
#endif
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_esdhc_pdata = {
|
||||
/*
|
||||
* card detection could be handled via GPIO
|
||||
* eSDHC cannot support End Attribute in NOP ADMA descriptor
|
||||
*/
|
||||
static const struct sdhci_ops sdhci_esdhc_be_ops = {
|
||||
.read_l = esdhc_be_readl,
|
||||
.read_w = esdhc_be_readw,
|
||||
.read_b = esdhc_be_readb,
|
||||
.write_l = esdhc_be_writel,
|
||||
.write_w = esdhc_be_writew,
|
||||
.write_b = esdhc_be_writeb,
|
||||
.set_clock = esdhc_of_set_clock,
|
||||
.enable_dma = esdhc_of_enable_dma,
|
||||
.get_max_clock = esdhc_of_get_max_clock,
|
||||
.get_min_clock = esdhc_of_get_min_clock,
|
||||
.adma_workaround = esdhc_of_adma_workaround,
|
||||
.set_bus_width = esdhc_pltfm_set_bus_width,
|
||||
.reset = esdhc_reset,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
};
|
||||
|
||||
static const struct sdhci_ops sdhci_esdhc_le_ops = {
|
||||
.read_l = esdhc_le_readl,
|
||||
.read_w = esdhc_le_readw,
|
||||
.read_b = esdhc_le_readb,
|
||||
.write_l = esdhc_le_writel,
|
||||
.write_w = esdhc_le_writew,
|
||||
.write_b = esdhc_le_writeb,
|
||||
.set_clock = esdhc_of_set_clock,
|
||||
.enable_dma = esdhc_of_enable_dma,
|
||||
.get_max_clock = esdhc_of_get_max_clock,
|
||||
.get_min_clock = esdhc_of_get_min_clock,
|
||||
.adma_workaround = esdhc_of_adma_workaround,
|
||||
.set_bus_width = esdhc_pltfm_set_bus_width,
|
||||
.reset = esdhc_reset,
|
||||
.set_uhs_signaling = sdhci_set_uhs_signaling,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
|
||||
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
|
||||
| SDHCI_QUIRK_NO_CARD_NO_RESET
|
||||
| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.ops = &sdhci_esdhc_ops,
|
||||
.ops = &sdhci_esdhc_be_ops,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = {
|
||||
.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION
|
||||
| SDHCI_QUIRK_NO_CARD_NO_RESET
|
||||
| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
|
||||
.ops = &sdhci_esdhc_le_ops,
|
||||
};
|
||||
|
||||
static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host;
|
||||
struct sdhci_esdhc *esdhc;
|
||||
u16 host_ver;
|
||||
|
||||
pltfm_host = sdhci_priv(host);
|
||||
esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc),
|
||||
GFP_KERNEL);
|
||||
|
||||
host_ver = sdhci_readw(host, SDHCI_HOST_VERSION);
|
||||
esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >>
|
||||
SDHCI_VENDOR_VER_SHIFT;
|
||||
esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK;
|
||||
|
||||
pltfm_host->priv = esdhc;
|
||||
}
|
||||
|
||||
static int sdhci_esdhc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sdhci_host *host;
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0);
|
||||
np = pdev->dev.of_node;
|
||||
|
||||
if (of_get_property(np, "little-endian", NULL))
|
||||
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0);
|
||||
else
|
||||
host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0);
|
||||
|
||||
if (IS_ERR(host))
|
||||
return PTR_ERR(host);
|
||||
|
||||
esdhc_init(pdev, host);
|
||||
|
||||
sdhci_get_of_property(pdev);
|
||||
|
||||
np = pdev->dev.of_node;
|
||||
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p1020-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,t1040-esdhc"))
|
||||
of_device_is_compatible(np, "fsl,t1040-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
|
||||
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
|
||||
/*
|
||||
* Freescale messed up with P2020 as it has a non-standard
|
||||
|
@ -444,11 +444,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)
|
||||
else
|
||||
scratch &= ~0x47;
|
||||
|
||||
ret = pci_write_config_byte(chip->pdev, 0xAE, scratch);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return pci_write_config_byte(chip->pdev, 0xAE, scratch);
|
||||
}
|
||||
|
||||
static int jmicron_probe(struct sdhci_pci_chip *chip)
|
||||
@ -1112,6 +1108,62 @@ static const struct pci_device_id pci_ids[] = {
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_DNV_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXT_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXT_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_BXT_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_APL_EMMC,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_APL_SDIO,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_INTEL,
|
||||
.device = PCI_DEVICE_ID_INTEL_APL_SD,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_O2,
|
||||
.device = PCI_DEVICE_ID_O2_8120,
|
@ -60,7 +60,7 @@ static void o2_pci_led_enable(struct sdhci_pci_chip *chip)
|
||||
|
||||
}
|
||||
|
||||
void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
|
||||
static void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
|
||||
{
|
||||
u32 scratch_32;
|
||||
int ret;
|
||||
@ -145,7 +145,6 @@ void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip)
|
||||
scratch_32 |= 0x00080000;
|
||||
pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init);
|
||||
|
||||
int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
|
||||
{
|
||||
@ -179,7 +178,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot);
|
||||
|
||||
int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
|
||||
{
|
||||
@ -385,11 +383,9 @@ int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe);
|
||||
|
||||
int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip)
|
||||
{
|
||||
sdhci_pci_o2_probe(chip);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume);
|
||||
|
@ -64,8 +64,6 @@
|
||||
#define O2_SD_VENDOR_SETTING 0x110
|
||||
#define O2_SD_VENDOR_SETTING2 0x1C8
|
||||
|
||||
extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip);
|
||||
|
||||
extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot);
|
||||
|
||||
extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip);
|
||||
|
@ -24,6 +24,13 @@
|
||||
#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
|
||||
#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
|
||||
#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
|
||||
#define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db
|
||||
#define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca
|
||||
#define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc
|
||||
#define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0
|
||||
#define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca
|
||||
#define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc
|
||||
#define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0
|
||||
|
||||
/*
|
||||
* PCI registers
|
||||
|
@ -71,9 +71,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct sdhci_host *host = platform_get_drvdata(pdev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
const __be32 *clk;
|
||||
u32 bus_width;
|
||||
int size;
|
||||
|
||||
if (of_get_property(np, "sdhci,auto-cmd12", NULL))
|
||||
host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
|
||||
@ -101,9 +99,7 @@ void sdhci_get_of_property(struct platform_device *pdev)
|
||||
of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
|
||||
|
||||
clk = of_get_property(np, "clock-frequency", &size);
|
||||
if (clk && size == sizeof(*clk) && *clk)
|
||||
pltfm_host->clock = be32_to_cpup(clk);
|
||||
of_property_read_u32(np, "clock-frequency", &pltfm_host->clock);
|
||||
|
||||
if (of_find_property(np, "keep-power-in-suspend", NULL))
|
||||
host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
|
||||
|
@ -50,7 +50,8 @@ static u32 sdhci_sirf_readl_le(struct sdhci_host *host, int reg)
|
||||
if (unlikely((reg == SDHCI_CAPABILITIES_1) &&
|
||||
(host->mmc->caps & MMC_CAP_UHS_SDR50))) {
|
||||
/* fake CAP_1 register */
|
||||
val = SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
|
||||
val = SDHCI_SUPPORT_DDR50 |
|
||||
SDHCI_SUPPORT_SDR50 | SDHCI_USE_SDR50_TUNING;
|
||||
}
|
||||
|
||||
if (unlikely(reg == SDHCI_SLOT_INT_STATUS)) {
|
||||
@ -97,7 +98,7 @@ retry:
|
||||
clock_setting | phase,
|
||||
SDHCI_CLK_DELAY_SETTING);
|
||||
|
||||
if (!mmc_send_tuning(mmc)) {
|
||||
if (!mmc_send_tuning(mmc, opcode, NULL)) {
|
||||
/* Tuning is successful at this tuning point */
|
||||
tuned_phase_cnt++;
|
||||
dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
|
||||
|
@ -1895,9 +1895,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
tuning_count = host->tuning_count;
|
||||
|
||||
/*
|
||||
* The Host Controller needs tuning only in case of SDR104 mode
|
||||
* and for SDR50 mode when Use Tuning for SDR50 is set in the
|
||||
* Capabilities register.
|
||||
* The Host Controller needs tuning in case of SDR104 and DDR50
|
||||
* mode, and for SDR50 mode when Use Tuning for SDR50 is set in
|
||||
* the Capabilities register.
|
||||
* If the Host Controller supports the HS200 mode then the
|
||||
* tuning function has to be executed.
|
||||
*/
|
||||
@ -1917,6 +1917,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
break;
|
||||
|
||||
case MMC_TIMING_UHS_SDR104:
|
||||
case MMC_TIMING_UHS_DDR50:
|
||||
break;
|
||||
|
||||
case MMC_TIMING_UHS_SDR50:
|
||||
@ -2716,17 +2717,6 @@ int sdhci_resume_host(struct sdhci_host *host)
|
||||
host->ops->enable_dma(host);
|
||||
}
|
||||
|
||||
if (!device_may_wakeup(mmc_dev(host->mmc))) {
|
||||
ret = request_threaded_irq(host->irq, sdhci_irq,
|
||||
sdhci_thread_irq, IRQF_SHARED,
|
||||
mmc_hostname(host->mmc), host);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
sdhci_disable_irq_wakeups(host);
|
||||
disable_irq_wake(host->irq);
|
||||
}
|
||||
|
||||
if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
|
||||
(host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
|
||||
/* Card keeps power but host controller does not */
|
||||
@ -2739,6 +2729,17 @@ int sdhci_resume_host(struct sdhci_host *host)
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
if (!device_may_wakeup(mmc_dev(host->mmc))) {
|
||||
ret = request_threaded_irq(host->irq, sdhci_irq,
|
||||
sdhci_thread_irq, IRQF_SHARED,
|
||||
mmc_hostname(host->mmc), host);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
sdhci_disable_irq_wakeups(host);
|
||||
disable_irq_wake(host->irq);
|
||||
}
|
||||
|
||||
sdhci_enable_card_detection(host);
|
||||
|
||||
return ret;
|
||||
|
@ -873,6 +873,13 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
spin_unlock_irqrestore(&host->lock, iflags);
|
||||
}
|
||||
|
||||
static int sunxi_mmc_card_busy(struct mmc_host *mmc)
|
||||
{
|
||||
struct sunxi_mmc_host *host = mmc_priv(mmc);
|
||||
|
||||
return !!(mmc_readl(host, REG_STAS) & SDXC_CARD_DATA_BUSY);
|
||||
}
|
||||
|
||||
static const struct of_device_id sunxi_mmc_of_match[] = {
|
||||
{ .compatible = "allwinner,sun4i-a10-mmc", },
|
||||
{ .compatible = "allwinner,sun5i-a13-mmc", },
|
||||
@ -888,6 +895,7 @@ static struct mmc_host_ops sunxi_mmc_ops = {
|
||||
.get_cd = mmc_gpio_get_cd,
|
||||
.enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
|
||||
.hw_reset = sunxi_mmc_hw_reset,
|
||||
.card_busy = sunxi_mmc_card_busy,
|
||||
};
|
||||
|
||||
static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
|
||||
|
@ -563,7 +563,7 @@ static void add_offloaded_reg(struct vub300_mmc_host *vub300,
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
__add_offloaded_reg_to_fifo(vub300, register_access, func);
|
||||
}
|
||||
|
||||
@ -1372,7 +1372,7 @@ static void download_offload_pseudocode(struct vub300_mmc_host *vub300)
|
||||
l += snprintf(vub300->vub_name + l,
|
||||
sizeof(vub300->vub_name) - l, "_%04X%04X",
|
||||
sf->vendor, sf->device);
|
||||
};
|
||||
}
|
||||
snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin");
|
||||
dev_info(&vub300->udev->dev, "requesting offload firmware %s\n",
|
||||
vub300->vub_name);
|
||||
@ -1893,7 +1893,7 @@ static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300,
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
}
|
||||
if (vub300->total_offload_count == 0)
|
||||
return 0;
|
||||
else if (vub300->fn[func].offload_count == 0)
|
||||
|
@ -809,7 +809,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
cmd->error = -EINVAL;
|
||||
|
||||
goto done;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -269,7 +269,6 @@ struct mmc_card {
|
||||
/* for byte mode */
|
||||
#define MMC_QUIRK_NONSTD_SDIO (1<<2) /* non-standard SDIO card attached */
|
||||
/* (missing CIA registers) */
|
||||
#define MMC_QUIRK_BROKEN_CLK_GATING (1<<3) /* clock gating the sdio bus will make card fail */
|
||||
#define MMC_QUIRK_NONSTD_FUNC_IF (1<<4) /* SDIO card has nonstd function interfaces */
|
||||
#define MMC_QUIRK_DISABLE_CD (1<<5) /* disconnect CD/DAT[3] resistor */
|
||||
#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
|
||||
|
@ -152,10 +152,8 @@ extern int mmc_app_cmd(struct mmc_host *, struct mmc_card *);
|
||||
extern int mmc_wait_for_app_cmd(struct mmc_host *, struct mmc_card *,
|
||||
struct mmc_command *, int);
|
||||
extern void mmc_start_bkops(struct mmc_card *card, bool from_exception);
|
||||
extern int __mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int, bool,
|
||||
bool, bool);
|
||||
extern int mmc_switch(struct mmc_card *, u8, u8, u8, unsigned int);
|
||||
extern int mmc_send_tuning(struct mmc_host *host);
|
||||
extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
|
||||
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
|
||||
|
||||
#define MMC_ERASE_ARG 0x00000000
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/mmc/core.h>
|
||||
#include <linux/dmaengine.h>
|
||||
|
||||
#define MAX_MCI_SLOTS 2
|
||||
|
||||
@ -40,6 +41,17 @@ enum {
|
||||
|
||||
struct mmc_data;
|
||||
|
||||
enum {
|
||||
TRANS_MODE_PIO = 0,
|
||||
TRANS_MODE_IDMAC,
|
||||
TRANS_MODE_EDMAC
|
||||
};
|
||||
|
||||
struct dw_mci_dma_slave {
|
||||
struct dma_chan *ch;
|
||||
enum dma_transfer_direction direction;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dw_mci - MMC controller state shared between all slots
|
||||
* @lock: Spinlock protecting the queue and associated data.
|
||||
@ -154,7 +166,14 @@ struct dw_mci {
|
||||
dma_addr_t sg_dma;
|
||||
void *sg_cpu;
|
||||
const struct dw_mci_dma_ops *dma_ops;
|
||||
/* For idmac */
|
||||
unsigned int ring_size;
|
||||
|
||||
/* For edmac */
|
||||
struct dw_mci_dma_slave *dms;
|
||||
/* Registers's physical base address */
|
||||
void *phy_regs;
|
||||
|
||||
u32 cmd_status;
|
||||
u32 data_status;
|
||||
u32 stop_cmdr;
|
||||
@ -208,8 +227,8 @@ struct dw_mci {
|
||||
struct dw_mci_dma_ops {
|
||||
/* DMA Ops */
|
||||
int (*init)(struct dw_mci *host);
|
||||
void (*start)(struct dw_mci *host, unsigned int sg_len);
|
||||
void (*complete)(struct dw_mci *host);
|
||||
int (*start)(struct dw_mci *host, unsigned int sg_len);
|
||||
void (*complete)(void *host);
|
||||
void (*stop)(struct dw_mci *host);
|
||||
void (*cleanup)(struct dw_mci *host);
|
||||
void (*exit)(struct dw_mci *host);
|
||||
|
@ -292,18 +292,6 @@ struct mmc_host {
|
||||
|
||||
mmc_pm_flag_t pm_caps; /* supported pm features */
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
int clk_requests; /* internal reference counter */
|
||||
unsigned int clk_delay; /* number of MCI clk hold cycles */
|
||||
bool clk_gated; /* clock gated */
|
||||
struct delayed_work clk_gate_work; /* delayed clock gate */
|
||||
unsigned int clk_old; /* old clock value cache */
|
||||
spinlock_t clk_lock; /* lock for clk fields */
|
||||
struct mutex clk_gate_mutex; /* mutex for clock gating */
|
||||
struct device_attribute clkgate_delay_attr;
|
||||
unsigned long clkgate_delay;
|
||||
#endif
|
||||
|
||||
/* host specific block data */
|
||||
unsigned int max_seg_size; /* see blk_queue_max_segment_size */
|
||||
unsigned short max_segs; /* see blk_queue_max_segments */
|
||||
@ -423,6 +411,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply);
|
||||
int mmc_regulator_set_ocr(struct mmc_host *mmc,
|
||||
struct regulator *supply,
|
||||
unsigned short vdd_bit);
|
||||
int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios);
|
||||
#else
|
||||
static inline int mmc_regulator_get_ocrmask(struct regulator *supply)
|
||||
{
|
||||
@ -435,6 +424,12 @@ static inline int mmc_regulator_set_ocr(struct mmc_host *mmc,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
int mmc_regulator_get_supply(struct mmc_host *mmc);
|
||||
@ -479,26 +474,6 @@ static inline int mmc_host_packed_wr(struct mmc_host *host)
|
||||
return host->caps2 & MMC_CAP2_PACKED_WR;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MMC_CLKGATE
|
||||
void mmc_host_clk_hold(struct mmc_host *host);
|
||||
void mmc_host_clk_release(struct mmc_host *host);
|
||||
unsigned int mmc_host_clk_rate(struct mmc_host *host);
|
||||
|
||||
#else
|
||||
static inline void mmc_host_clk_hold(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void mmc_host_clk_release(struct mmc_host *host)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
|
||||
{
|
||||
return host->ios.clock;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int mmc_card_hs(struct mmc_card *card)
|
||||
{
|
||||
return card->host->ios.timing == MMC_TIMING_SD_HS ||
|
||||
|
@ -45,8 +45,24 @@ struct mmc_ioc_cmd {
|
||||
};
|
||||
#define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr
|
||||
|
||||
#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
|
||||
/**
|
||||
* struct mmc_ioc_multi_cmd - multi command information
|
||||
* @num_of_cmds: Number of commands to send. Must be equal to or less than
|
||||
* MMC_IOC_MAX_CMDS.
|
||||
* @cmds: Array of commands with length equal to 'num_of_cmds'
|
||||
*/
|
||||
struct mmc_ioc_multi_cmd {
|
||||
__u64 num_of_cmds;
|
||||
struct mmc_ioc_cmd cmds[0];
|
||||
};
|
||||
|
||||
#define MMC_IOC_CMD _IOWR(MMC_BLOCK_MAJOR, 0, struct mmc_ioc_cmd)
|
||||
/*
|
||||
* MMC_IOC_MULTI_CMD: Used to send an array of MMC commands described by
|
||||
* the structure mmc_ioc_multi_cmd. The MMC driver will issue all
|
||||
* commands in array in sequence to card.
|
||||
*/
|
||||
#define MMC_IOC_MULTI_CMD _IOWR(MMC_BLOCK_MAJOR, 1, struct mmc_ioc_multi_cmd)
|
||||
/*
|
||||
* Since this ioctl is only meant to enhance (and not replace) normal access
|
||||
* to the mmc bus device, an upper data transfer limit of MMC_IOC_MAX_BYTES
|
||||
@ -54,4 +70,5 @@ struct mmc_ioc_cmd {
|
||||
* block device operations.
|
||||
*/
|
||||
#define MMC_IOC_MAX_BYTES (512L * 256)
|
||||
#define MMC_IOC_MAX_CMDS 255
|
||||
#endif /* LINUX_MMC_IOCTL_H */
|
||||
|
Loading…
Reference in New Issue
Block a user