2016-10-19 21:18:24 +03:00
/*
* Amlogic SD / eMMC driver for the GX / S905 family SoCs
*
* Copyright ( c ) 2016 BayLibre , SAS .
* Author : Kevin Hilman < khilman @ baylibre . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but
* WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , see < http : //www.gnu.org/licenses/>.
* The full GNU General Public License is included in this distribution
* in the file called COPYING .
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/device.h>
# include <linux/of_device.h>
# include <linux/platform_device.h>
# include <linux/ioport.h>
# include <linux/spinlock.h>
# include <linux/dma-mapping.h>
# include <linux/mmc/host.h>
# include <linux/mmc/mmc.h>
# include <linux/mmc/sdio.h>
# include <linux/mmc/slot-gpio.h>
# include <linux/io.h>
# include <linux/clk.h>
# include <linux/clk-provider.h>
# include <linux/regulator/consumer.h>
2016-12-30 15:47:23 +03:00
# include <linux/interrupt.h>
2017-03-25 13:23:24 +03:00
# include <linux/bitfield.h>
2016-10-19 21:18:24 +03:00
# define DRIVER_NAME "meson-gx-mmc"
# define SD_EMMC_CLOCK 0x0
2017-03-25 13:23:24 +03:00
# define CLK_DIV_MASK GENMASK(5, 0)
2016-10-19 21:18:24 +03:00
# define CLK_DIV_MAX 63
2017-03-25 13:23:24 +03:00
# define CLK_SRC_MASK GENMASK(7, 6)
2016-10-19 21:18:24 +03:00
# define CLK_SRC_XTAL 0 /* external crystal */
# define CLK_SRC_PLL 1 /* FCLK_DIV2 */
2017-03-25 13:23:24 +03:00
# define CLK_CORE_PHASE_MASK GENMASK(9, 8)
2017-04-04 22:03:22 +03:00
# define CLK_TX_PHASE_MASK GENMASK(11, 10)
# define CLK_RX_PHASE_MASK GENMASK(13, 12)
2016-10-19 21:18:24 +03:00
# define CLK_PHASE_0 0
# define CLK_PHASE_90 1
# define CLK_PHASE_180 2
# define CLK_PHASE_270 3
# define CLK_ALWAYS_ON BIT(24)
2017-08-21 19:02:48 +03:00
# define SD_EMMC_DELAY 0x4
2016-10-19 21:18:24 +03:00
# define SD_EMMC_ADJUST 0x8
# define SD_EMMC_CALOUT 0x10
# define SD_EMMC_START 0x40
# define START_DESC_INIT BIT(0)
# define START_DESC_BUSY BIT(1)
2017-03-25 13:23:24 +03:00
# define START_DESC_ADDR_MASK GENMASK(31, 2)
2016-10-19 21:18:24 +03:00
# define SD_EMMC_CFG 0x44
2017-03-25 13:23:24 +03:00
# define CFG_BUS_WIDTH_MASK GENMASK(1, 0)
2016-10-19 21:18:24 +03:00
# define CFG_BUS_WIDTH_1 0x0
# define CFG_BUS_WIDTH_4 0x1
# define CFG_BUS_WIDTH_8 0x2
# define CFG_DDR BIT(2)
2017-03-25 13:23:24 +03:00
# define CFG_BLK_LEN_MASK GENMASK(7, 4)
# define CFG_RESP_TIMEOUT_MASK GENMASK(11, 8)
# define CFG_RC_CC_MASK GENMASK(15, 12)
2016-10-19 21:18:24 +03:00
# define CFG_STOP_CLOCK BIT(22)
# define CFG_CLK_ALWAYS_ON BIT(18)
2017-02-08 00:35:59 +03:00
# define CFG_CHK_DS BIT(20)
2016-10-19 21:18:24 +03:00
# define CFG_AUTO_CLK BIT(23)
# define SD_EMMC_STATUS 0x48
# define STATUS_BUSY BIT(31)
# define SD_EMMC_IRQ_EN 0x4c
2017-03-25 13:23:24 +03:00
# define IRQ_EN_MASK GENMASK(13, 0)
# define IRQ_RXD_ERR_MASK GENMASK(7, 0)
2016-10-19 21:18:24 +03:00
# define IRQ_TXD_ERR BIT(8)
# define IRQ_DESC_ERR BIT(9)
# define IRQ_RESP_ERR BIT(10)
# define IRQ_RESP_TIMEOUT BIT(11)
# define IRQ_DESC_TIMEOUT BIT(12)
# define IRQ_END_OF_CHAIN BIT(13)
# define IRQ_RESP_STATUS BIT(14)
# define IRQ_SDIO BIT(15)
# define SD_EMMC_CMD_CFG 0x50
# define SD_EMMC_CMD_ARG 0x54
# define SD_EMMC_CMD_DAT 0x58
# define SD_EMMC_CMD_RSP 0x5c
# define SD_EMMC_CMD_RSP1 0x60
# define SD_EMMC_CMD_RSP2 0x64
# define SD_EMMC_CMD_RSP3 0x68
# define SD_EMMC_RXD 0x94
# define SD_EMMC_TXD 0x94
# define SD_EMMC_LAST_REG SD_EMMC_TXD
# define SD_EMMC_CFG_BLK_SIZE 512 /* internal buffer max: 512 bytes */
# define SD_EMMC_CFG_RESP_TIMEOUT 256 /* in clock cycles */
2017-03-04 15:37:46 +03:00
# define SD_EMMC_CMD_TIMEOUT 1024 /* in ms */
# define SD_EMMC_CMD_TIMEOUT_DATA 4096 /* in ms */
2016-10-19 21:18:24 +03:00
# define SD_EMMC_CFG_CMD_GAP 16 /* in clock cycles */
2017-04-07 22:22:44 +03:00
# define SD_EMMC_DESC_BUF_LEN PAGE_SIZE
# define SD_EMMC_PRE_REQ_DONE BIT(0)
# define SD_EMMC_DESC_CHAIN_MODE BIT(1)
2016-10-19 21:18:24 +03:00
# define MUX_CLK_NUM_PARENTS 2
2017-04-04 22:03:22 +03:00
struct meson_tuning_params {
u8 core_phase ;
u8 tx_phase ;
u8 rx_phase ;
} ;
2017-04-07 22:22:44 +03:00
struct sd_emmc_desc {
u32 cmd_cfg ;
u32 cmd_arg ;
u32 cmd_data ;
u32 cmd_resp ;
} ;
2016-10-19 21:18:24 +03:00
struct meson_host {
struct device * dev ;
struct mmc_host * mmc ;
struct mmc_command * cmd ;
spinlock_t lock ;
void __iomem * regs ;
struct clk * core_clk ;
struct clk_mux mux ;
struct clk * mux_clk ;
2017-02-08 00:34:32 +03:00
unsigned long current_clock ;
2016-10-19 21:18:24 +03:00
struct clk_divider cfg_div ;
struct clk * cfg_div_clk ;
unsigned int bounce_buf_size ;
void * bounce_buf ;
dma_addr_t bounce_dma_addr ;
2017-04-07 22:22:44 +03:00
struct sd_emmc_desc * descs ;
dma_addr_t descs_dma_addr ;
2016-10-19 21:18:24 +03:00
2017-04-04 22:03:22 +03:00
struct meson_tuning_params tp ;
2016-10-19 21:18:24 +03:00
bool vqmmc_enabled ;
} ;
2017-03-25 13:23:24 +03:00
# define CMD_CFG_LENGTH_MASK GENMASK(8, 0)
2016-10-19 21:18:24 +03:00
# define CMD_CFG_BLOCK_MODE BIT(9)
# define CMD_CFG_R1B BIT(10)
# define CMD_CFG_END_OF_CHAIN BIT(11)
2017-03-25 13:23:24 +03:00
# define CMD_CFG_TIMEOUT_MASK GENMASK(15, 12)
2016-10-19 21:18:24 +03:00
# define CMD_CFG_NO_RESP BIT(16)
# define CMD_CFG_NO_CMD BIT(17)
# define CMD_CFG_DATA_IO BIT(18)
# define CMD_CFG_DATA_WR BIT(19)
# define CMD_CFG_RESP_NOCRC BIT(20)
# define CMD_CFG_RESP_128 BIT(21)
# define CMD_CFG_RESP_NUM BIT(22)
# define CMD_CFG_DATA_NUM BIT(23)
2017-03-25 13:23:24 +03:00
# define CMD_CFG_CMD_INDEX_MASK GENMASK(29, 24)
2016-10-19 21:18:24 +03:00
# define CMD_CFG_ERROR BIT(30)
# define CMD_CFG_OWNER BIT(31)
2017-03-25 13:23:24 +03:00
# define CMD_DATA_MASK GENMASK(31, 2)
2016-10-19 21:18:24 +03:00
# define CMD_DATA_BIG_ENDIAN BIT(1)
# define CMD_DATA_SRAM BIT(0)
2017-03-25 13:23:24 +03:00
# define CMD_RESP_MASK GENMASK(31, 1)
2016-10-19 21:18:24 +03:00
# define CMD_RESP_SRAM BIT(0)
2017-03-25 13:26:18 +03:00
static unsigned int meson_mmc_get_timeout_msecs ( struct mmc_data * data )
{
unsigned int timeout = data - > timeout_ns / NSEC_PER_MSEC ;
if ( ! timeout )
return SD_EMMC_CMD_TIMEOUT_DATA ;
timeout = roundup_pow_of_two ( timeout ) ;
return min ( timeout , 32768U ) ; /* max. 2^15 ms */
}
2017-03-25 13:28:13 +03:00
static struct mmc_command * meson_mmc_get_next_command ( struct mmc_command * cmd )
{
if ( cmd - > opcode = = MMC_SET_BLOCK_COUNT & & ! cmd - > error )
return cmd - > mrq - > cmd ;
else if ( mmc_op_multi ( cmd - > opcode ) & &
( ! cmd - > mrq - > sbc | | cmd - > error | | cmd - > data - > error ) )
return cmd - > mrq - > stop ;
else
return NULL ;
}
2017-04-07 22:22:44 +03:00
static void meson_mmc_get_transfer_mode ( struct mmc_host * mmc ,
struct mmc_request * mrq )
{
struct mmc_data * data = mrq - > data ;
struct scatterlist * sg ;
int i ;
bool use_desc_chain_mode = true ;
2017-06-10 14:36:38 +03:00
/*
* Broken SDIO with AP6255 - based WiFi on Khadas VIM Pro has been
* reported . For some strange reason this occurs in descriptor
* chain mode only . So let ' s fall back to bounce buffer mode
* for command SD_IO_RW_EXTENDED .
*/
if ( mrq - > cmd - > opcode = = SD_IO_RW_EXTENDED )
return ;
2017-04-07 22:22:44 +03:00
for_each_sg ( data - > sg , sg , data - > sg_len , i )
/* check for 8 byte alignment */
if ( sg - > offset & 7 ) {
WARN_ONCE ( 1 , " unaligned scatterlist buffer \n " ) ;
use_desc_chain_mode = false ;
break ;
}
if ( use_desc_chain_mode )
data - > host_cookie | = SD_EMMC_DESC_CHAIN_MODE ;
}
static inline bool meson_mmc_desc_chain_mode ( const struct mmc_data * data )
{
return data - > host_cookie & SD_EMMC_DESC_CHAIN_MODE ;
}
static inline bool meson_mmc_bounce_buf_read ( const struct mmc_data * data )
{
return data & & data - > flags & MMC_DATA_READ & &
! meson_mmc_desc_chain_mode ( data ) ;
}
static void meson_mmc_pre_req ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct mmc_data * data = mrq - > data ;
if ( ! data )
return ;
meson_mmc_get_transfer_mode ( mmc , mrq ) ;
data - > host_cookie | = SD_EMMC_PRE_REQ_DONE ;
if ( ! meson_mmc_desc_chain_mode ( data ) )
return ;
data - > sg_count = dma_map_sg ( mmc_dev ( mmc ) , data - > sg , data - > sg_len ,
mmc_get_dma_dir ( data ) ) ;
if ( ! data - > sg_count )
dev_err ( mmc_dev ( mmc ) , " dma_map_sg failed " ) ;
}
static void meson_mmc_post_req ( struct mmc_host * mmc , struct mmc_request * mrq ,
int err )
{
struct mmc_data * data = mrq - > data ;
if ( data & & meson_mmc_desc_chain_mode ( data ) & & data - > sg_count )
dma_unmap_sg ( mmc_dev ( mmc ) , data - > sg , data - > sg_len ,
mmc_get_dma_dir ( data ) ) ;
}
2016-10-19 21:18:24 +03:00
static int meson_mmc_clk_set ( struct meson_host * host , unsigned long clk_rate )
{
struct mmc_host * mmc = host - > mmc ;
2017-02-08 00:34:32 +03:00
int ret ;
2016-10-19 21:18:24 +03:00
u32 cfg ;
if ( clk_rate ) {
if ( WARN_ON ( clk_rate > mmc - > f_max ) )
clk_rate = mmc - > f_max ;
else if ( WARN_ON ( clk_rate < mmc - > f_min ) )
clk_rate = mmc - > f_min ;
}
2017-02-08 00:34:32 +03:00
if ( clk_rate = = host - > current_clock )
2016-10-19 21:18:24 +03:00
return 0 ;
/* stop clock */
cfg = readl ( host - > regs + SD_EMMC_CFG ) ;
if ( ! ( cfg & CFG_STOP_CLOCK ) ) {
cfg | = CFG_STOP_CLOCK ;
writel ( cfg , host - > regs + SD_EMMC_CFG ) ;
}
dev_dbg ( host - > dev , " change clock rate %u -> %lu \n " ,
mmc - > actual_clock , clk_rate ) ;
2017-02-08 00:34:32 +03:00
if ( ! clk_rate ) {
2016-10-19 21:18:24 +03:00
mmc - > actual_clock = 0 ;
2017-02-08 00:34:32 +03:00
host - > current_clock = 0 ;
/* return with clock being stopped */
2016-10-19 21:18:24 +03:00
return 0 ;
}
ret = clk_set_rate ( host - > cfg_div_clk , clk_rate ) ;
2017-02-08 00:34:32 +03:00
if ( ret ) {
dev_err ( host - > dev , " Unable to set cfg_div_clk to %lu. ret=%d \n " ,
clk_rate , ret ) ;
return ret ;
2016-10-19 21:18:24 +03:00
}
2017-02-08 00:34:32 +03:00
mmc - > actual_clock = clk_get_rate ( host - > cfg_div_clk ) ;
host - > current_clock = clk_rate ;
if ( clk_rate ! = mmc - > actual_clock )
dev_dbg ( host - > dev ,
" divider requested rate %lu != actual rate %u \n " ,
clk_rate , mmc - > actual_clock ) ;
/* (re)start clock */
cfg = readl ( host - > regs + SD_EMMC_CFG ) ;
cfg & = ~ CFG_STOP_CLOCK ;
writel ( cfg , host - > regs + SD_EMMC_CFG ) ;
return 0 ;
2016-10-19 21:18:24 +03:00
}
/*
* The SD / eMMC IP block has an internal mux and divider used for
* generating the MMC clock . Use the clock framework to create and
* manage these clocks .
*/
static int meson_mmc_clk_init ( struct meson_host * host )
{
struct clk_init_data init ;
char clk_name [ 32 ] ;
int i , ret = 0 ;
const char * mux_parent_names [ MUX_CLK_NUM_PARENTS ] ;
const char * clk_div_parents [ 1 ] ;
2017-08-28 17:29:04 +03:00
u32 clk_reg ;
2016-10-19 21:18:24 +03:00
2017-08-28 17:29:03 +03:00
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
clk_reg = 0 ;
clk_reg | = CLK_ALWAYS_ON ;
clk_reg | = CLK_DIV_MASK ;
clk_reg | = FIELD_PREP ( CLK_CORE_PHASE_MASK , host - > tp . core_phase ) ;
clk_reg | = FIELD_PREP ( CLK_TX_PHASE_MASK , host - > tp . tx_phase ) ;
clk_reg | = FIELD_PREP ( CLK_RX_PHASE_MASK , host - > tp . rx_phase ) ;
writel ( clk_reg , host - > regs + SD_EMMC_CLOCK ) ;
2016-10-19 21:18:24 +03:00
/* get the mux parents */
for ( i = 0 ; i < MUX_CLK_NUM_PARENTS ; i + + ) {
2017-03-04 15:24:09 +03:00
struct clk * clk ;
2016-10-19 21:18:24 +03:00
char name [ 16 ] ;
snprintf ( name , sizeof ( name ) , " clkin%d " , i ) ;
2017-03-04 15:24:09 +03:00
clk = devm_clk_get ( host - > dev , name ) ;
if ( IS_ERR ( clk ) ) {
if ( clk ! = ERR_PTR ( - EPROBE_DEFER ) )
2016-10-19 21:18:24 +03:00
dev_err ( host - > dev , " Missing clock %s \n " , name ) ;
2017-03-04 15:24:09 +03:00
return PTR_ERR ( clk ) ;
2016-10-19 21:18:24 +03:00
}
2017-03-04 15:24:09 +03:00
mux_parent_names [ i ] = __clk_get_name ( clk ) ;
2016-10-19 21:18:24 +03:00
}
/* create the mux */
snprintf ( clk_name , sizeof ( clk_name ) , " %s#mux " , dev_name ( host - > dev ) ) ;
init . name = clk_name ;
init . ops = & clk_mux_ops ;
init . flags = 0 ;
init . parent_names = mux_parent_names ;
2017-03-04 15:22:57 +03:00
init . num_parents = MUX_CLK_NUM_PARENTS ;
2016-10-19 21:18:24 +03:00
host - > mux . reg = host - > regs + SD_EMMC_CLOCK ;
2017-03-25 13:23:24 +03:00
host - > mux . shift = __bf_shf ( CLK_SRC_MASK ) ;
2017-08-21 19:02:46 +03:00
host - > mux . mask = CLK_SRC_MASK > > host - > mux . shift ;
2016-10-19 21:18:24 +03:00
host - > mux . flags = 0 ;
host - > mux . table = NULL ;
host - > mux . hw . init = & init ;
host - > mux_clk = devm_clk_register ( host - > dev , & host - > mux . hw ) ;
if ( WARN_ON ( IS_ERR ( host - > mux_clk ) ) )
return PTR_ERR ( host - > mux_clk ) ;
/* create the divider */
snprintf ( clk_name , sizeof ( clk_name ) , " %s#div " , dev_name ( host - > dev ) ) ;
2017-03-04 15:26:24 +03:00
init . name = clk_name ;
2016-10-19 21:18:24 +03:00
init . ops = & clk_divider_ops ;
init . flags = CLK_SET_RATE_PARENT ;
clk_div_parents [ 0 ] = __clk_get_name ( host - > mux_clk ) ;
init . parent_names = clk_div_parents ;
init . num_parents = ARRAY_SIZE ( clk_div_parents ) ;
host - > cfg_div . reg = host - > regs + SD_EMMC_CLOCK ;
2017-03-25 13:23:24 +03:00
host - > cfg_div . shift = __bf_shf ( CLK_DIV_MASK ) ;
host - > cfg_div . width = __builtin_popcountl ( CLK_DIV_MASK ) ;
2016-10-19 21:18:24 +03:00
host - > cfg_div . hw . init = & init ;
host - > cfg_div . flags = CLK_DIVIDER_ONE_BASED |
2017-08-21 19:02:47 +03:00
CLK_DIVIDER_ROUND_CLOSEST ;
2016-10-19 21:18:24 +03:00
host - > cfg_div_clk = devm_clk_register ( host - > dev , & host - > cfg_div . hw ) ;
if ( WARN_ON ( PTR_ERR_OR_ZERO ( host - > cfg_div_clk ) ) )
return PTR_ERR ( host - > cfg_div_clk ) ;
ret = clk_prepare_enable ( host - > cfg_div_clk ) ;
2017-02-08 14:36:20 +03:00
if ( ret )
return ret ;
/* Get the nearest minimum clock to 400KHz */
host - > mmc - > f_min = clk_round_rate ( host - > cfg_div_clk , 400000 ) ;
2016-10-19 21:18:24 +03:00
2017-02-08 14:36:20 +03:00
ret = meson_mmc_clk_set ( host , host - > mmc - > f_min ) ;
2017-03-04 15:25:14 +03:00
if ( ret )
2016-10-19 21:18:24 +03:00
clk_disable_unprepare ( host - > cfg_div_clk ) ;
return ret ;
}
2017-04-04 22:11:17 +03:00
static void meson_mmc_set_tuning_params ( struct mmc_host * mmc )
{
struct meson_host * host = mmc_priv ( mmc ) ;
u32 regval ;
/* stop clock */
regval = readl ( host - > regs + SD_EMMC_CFG ) ;
regval | = CFG_STOP_CLOCK ;
writel ( regval , host - > regs + SD_EMMC_CFG ) ;
regval = readl ( host - > regs + SD_EMMC_CLOCK ) ;
regval & = ~ CLK_CORE_PHASE_MASK ;
regval | = FIELD_PREP ( CLK_CORE_PHASE_MASK , host - > tp . core_phase ) ;
regval & = ~ CLK_TX_PHASE_MASK ;
regval | = FIELD_PREP ( CLK_TX_PHASE_MASK , host - > tp . tx_phase ) ;
regval & = ~ CLK_RX_PHASE_MASK ;
regval | = FIELD_PREP ( CLK_RX_PHASE_MASK , host - > tp . rx_phase ) ;
writel ( regval , host - > regs + SD_EMMC_CLOCK ) ;
/* start clock */
regval = readl ( host - > regs + SD_EMMC_CFG ) ;
regval & = ~ CFG_STOP_CLOCK ;
writel ( regval , host - > regs + SD_EMMC_CFG ) ;
}
2016-10-19 21:18:24 +03:00
static void meson_mmc_set_ios ( struct mmc_host * mmc , struct mmc_ios * ios )
{
struct meson_host * host = mmc_priv ( mmc ) ;
2017-08-28 17:29:05 +03:00
u32 bus_width , val ;
int err ;
2016-10-19 21:18:24 +03:00
/*
* GPIO regulator , only controls switching between 1 v8 and
* 3 v3 , doesn ' t support MMC_POWER_OFF , MMC_POWER_ON .
*/
switch ( ios - > power_mode ) {
case MMC_POWER_OFF :
if ( ! IS_ERR ( mmc - > supply . vmmc ) )
mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc , 0 ) ;
if ( ! IS_ERR ( mmc - > supply . vqmmc ) & & host - > vqmmc_enabled ) {
regulator_disable ( mmc - > supply . vqmmc ) ;
host - > vqmmc_enabled = false ;
}
break ;
case MMC_POWER_UP :
if ( ! IS_ERR ( mmc - > supply . vmmc ) )
mmc_regulator_set_ocr ( mmc , mmc - > supply . vmmc , ios - > vdd ) ;
break ;
case MMC_POWER_ON :
if ( ! IS_ERR ( mmc - > supply . vqmmc ) & & ! host - > vqmmc_enabled ) {
int ret = regulator_enable ( mmc - > supply . vqmmc ) ;
if ( ret < 0 )
2017-08-28 17:29:05 +03:00
dev_err ( host - > dev ,
2016-10-19 21:18:24 +03:00
" failed to enable vqmmc regulator \n " ) ;
else
host - > vqmmc_enabled = true ;
}
break ;
}
/* Bus width */
switch ( ios - > bus_width ) {
case MMC_BUS_WIDTH_1 :
bus_width = CFG_BUS_WIDTH_1 ;
break ;
case MMC_BUS_WIDTH_4 :
bus_width = CFG_BUS_WIDTH_4 ;
break ;
case MMC_BUS_WIDTH_8 :
bus_width = CFG_BUS_WIDTH_8 ;
break ;
default :
dev_err ( host - > dev , " Invalid ios->bus_width: %u. Setting to 4. \n " ,
ios - > bus_width ) ;
bus_width = CFG_BUS_WIDTH_4 ;
}
val = readl ( host - > regs + SD_EMMC_CFG ) ;
2017-03-25 13:23:24 +03:00
val & = ~ CFG_BUS_WIDTH_MASK ;
val | = FIELD_PREP ( CFG_BUS_WIDTH_MASK , bus_width ) ;
2016-10-19 21:18:24 +03:00
2017-02-08 00:35:59 +03:00
val & = ~ CFG_DDR ;
if ( ios - > timing = = MMC_TIMING_UHS_DDR50 | |
ios - > timing = = MMC_TIMING_MMC_DDR52 | |
ios - > timing = = MMC_TIMING_MMC_HS400 )
val | = CFG_DDR ;
val & = ~ CFG_CHK_DS ;
if ( ios - > timing = = MMC_TIMING_MMC_HS400 )
val | = CFG_CHK_DS ;
2017-08-28 17:29:05 +03:00
err = meson_mmc_clk_set ( host , ios - > clock ) ;
if ( err )
dev_err ( host - > dev , " Failed to set clock: %d \n , " , err ) ;
writel ( val , host - > regs + SD_EMMC_CFG ) ;
dev_dbg ( host - > dev , " SD_EMMC_CFG: 0x%08x \n " , val ) ;
2016-10-19 21:18:24 +03:00
}
2017-03-04 15:20:44 +03:00
static void meson_mmc_request_done ( struct mmc_host * mmc ,
struct mmc_request * mrq )
2016-10-19 21:18:24 +03:00
{
struct meson_host * host = mmc_priv ( mmc ) ;
host - > cmd = NULL ;
mmc_request_done ( host - > mmc , mrq ) ;
}
2017-03-27 22:57:11 +03:00
static void meson_mmc_set_blksz ( struct mmc_host * mmc , unsigned int blksz )
{
struct meson_host * host = mmc_priv ( mmc ) ;
u32 cfg , blksz_old ;
cfg = readl ( host - > regs + SD_EMMC_CFG ) ;
blksz_old = FIELD_GET ( CFG_BLK_LEN_MASK , cfg ) ;
if ( ! is_power_of_2 ( blksz ) )
dev_err ( host - > dev , " blksz %u is not a power of 2 \n " , blksz ) ;
blksz = ilog2 ( blksz ) ;
/* check if block-size matches, if not update */
if ( blksz = = blksz_old )
return ;
dev_dbg ( host - > dev , " %s: update blk_len %d -> %d \n " , __func__ ,
blksz_old , blksz ) ;
cfg & = ~ CFG_BLK_LEN_MASK ;
cfg | = FIELD_PREP ( CFG_BLK_LEN_MASK , blksz ) ;
writel ( cfg , host - > regs + SD_EMMC_CFG ) ;
}
2017-03-27 23:02:32 +03:00
static void meson_mmc_set_response_bits ( struct mmc_command * cmd , u32 * cmd_cfg )
{
if ( cmd - > flags & MMC_RSP_PRESENT ) {
if ( cmd - > flags & MMC_RSP_136 )
* cmd_cfg | = CMD_CFG_RESP_128 ;
* cmd_cfg | = CMD_CFG_RESP_NUM ;
if ( ! ( cmd - > flags & MMC_RSP_CRC ) )
* cmd_cfg | = CMD_CFG_RESP_NOCRC ;
if ( cmd - > flags & MMC_RSP_BUSY )
* cmd_cfg | = CMD_CFG_R1B ;
} else {
* cmd_cfg | = CMD_CFG_NO_RESP ;
}
}
2017-04-07 22:22:44 +03:00
static void meson_mmc_desc_chain_transfer ( struct mmc_host * mmc , u32 cmd_cfg )
{
struct meson_host * host = mmc_priv ( mmc ) ;
struct sd_emmc_desc * desc = host - > descs ;
struct mmc_data * data = host - > cmd - > data ;
struct scatterlist * sg ;
u32 start ;
int i ;
if ( data - > flags & MMC_DATA_WRITE )
cmd_cfg | = CMD_CFG_DATA_WR ;
if ( data - > blocks > 1 ) {
cmd_cfg | = CMD_CFG_BLOCK_MODE ;
meson_mmc_set_blksz ( mmc , data - > blksz ) ;
}
for_each_sg ( data - > sg , sg , data - > sg_count , i ) {
unsigned int len = sg_dma_len ( sg ) ;
if ( data - > blocks > 1 )
len / = data - > blksz ;
desc [ i ] . cmd_cfg = cmd_cfg ;
desc [ i ] . cmd_cfg | = FIELD_PREP ( CMD_CFG_LENGTH_MASK , len ) ;
if ( i > 0 )
desc [ i ] . cmd_cfg | = CMD_CFG_NO_CMD ;
desc [ i ] . cmd_arg = host - > cmd - > arg ;
desc [ i ] . cmd_resp = 0 ;
desc [ i ] . cmd_data = sg_dma_address ( sg ) ;
}
desc [ data - > sg_count - 1 ] . cmd_cfg | = CMD_CFG_END_OF_CHAIN ;
dma_wmb ( ) ; /* ensure descriptor is written before kicked */
start = host - > descs_dma_addr | START_DESC_BUSY ;
writel ( start , host - > regs + SD_EMMC_START ) ;
}
2016-10-19 21:18:24 +03:00
static void meson_mmc_start_cmd ( struct mmc_host * mmc , struct mmc_command * cmd )
{
struct meson_host * host = mmc_priv ( mmc ) ;
2017-03-23 00:33:44 +03:00
struct mmc_data * data = cmd - > data ;
2017-03-27 22:57:11 +03:00
u32 cmd_cfg = 0 , cmd_data = 0 ;
2016-10-19 21:18:24 +03:00
unsigned int xfer_bytes = 0 ;
/* Setup descriptors */
dma_rmb ( ) ;
2017-04-07 22:22:44 +03:00
host - > cmd = cmd ;
2017-03-25 13:23:24 +03:00
cmd_cfg | = FIELD_PREP ( CMD_CFG_CMD_INDEX_MASK , cmd - > opcode ) ;
2017-03-23 00:33:47 +03:00
cmd_cfg | = CMD_CFG_OWNER ; /* owned by CPU */
2016-10-19 21:18:24 +03:00
2017-03-27 23:02:32 +03:00
meson_mmc_set_response_bits ( cmd , & cmd_cfg ) ;
2016-10-19 21:18:24 +03:00
/* data? */
2017-03-23 00:33:44 +03:00
if ( data ) {
2017-04-07 22:22:44 +03:00
data - > bytes_xfered = 0 ;
2017-03-23 00:33:47 +03:00
cmd_cfg | = CMD_CFG_DATA_IO ;
2017-03-25 13:23:24 +03:00
cmd_cfg | = FIELD_PREP ( CMD_CFG_TIMEOUT_MASK ,
2017-03-25 13:26:18 +03:00
ilog2 ( meson_mmc_get_timeout_msecs ( data ) ) ) ;
2017-03-23 00:34:01 +03:00
2017-04-07 22:22:44 +03:00
if ( meson_mmc_desc_chain_mode ( data ) ) {
meson_mmc_desc_chain_transfer ( mmc , cmd_cfg ) ;
return ;
}
2017-03-23 00:33:44 +03:00
if ( data - > blocks > 1 ) {
2017-03-23 00:33:47 +03:00
cmd_cfg | = CMD_CFG_BLOCK_MODE ;
2017-03-25 13:23:24 +03:00
cmd_cfg | = FIELD_PREP ( CMD_CFG_LENGTH_MASK ,
data - > blocks ) ;
2017-03-27 22:57:11 +03:00
meson_mmc_set_blksz ( mmc , data - > blksz ) ;
2016-10-19 21:18:24 +03:00
} else {
2017-03-25 13:23:24 +03:00
cmd_cfg | = FIELD_PREP ( CMD_CFG_LENGTH_MASK , data - > blksz ) ;
2016-10-19 21:18:24 +03:00
}
2017-03-23 00:33:44 +03:00
xfer_bytes = data - > blksz * data - > blocks ;
if ( data - > flags & MMC_DATA_WRITE ) {
2017-03-23 00:33:47 +03:00
cmd_cfg | = CMD_CFG_DATA_WR ;
2016-10-19 21:18:24 +03:00
WARN_ON ( xfer_bytes > host - > bounce_buf_size ) ;
2017-03-23 00:33:44 +03:00
sg_copy_to_buffer ( data - > sg , data - > sg_len ,
2016-10-19 21:18:24 +03:00
host - > bounce_buf , xfer_bytes ) ;
dma_wmb ( ) ;
}
2017-03-23 00:33:47 +03:00
cmd_data = host - > bounce_dma_addr & CMD_DATA_MASK ;
2016-10-19 21:18:24 +03:00
} else {
2017-03-25 13:23:24 +03:00
cmd_cfg | = FIELD_PREP ( CMD_CFG_TIMEOUT_MASK ,
ilog2 ( SD_EMMC_CMD_TIMEOUT ) ) ;
2016-10-19 21:18:24 +03:00
}
/* Last descriptor */
2017-03-23 00:33:47 +03:00
cmd_cfg | = CMD_CFG_END_OF_CHAIN ;
writel ( cmd_cfg , host - > regs + SD_EMMC_CMD_CFG ) ;
writel ( cmd_data , host - > regs + SD_EMMC_CMD_DAT ) ;
writel ( 0 , host - > regs + SD_EMMC_CMD_RSP ) ;
2016-10-19 21:18:24 +03:00
wmb ( ) ; /* ensure descriptor is written before kicked */
2017-03-23 00:33:47 +03:00
writel ( cmd - > arg , host - > regs + SD_EMMC_CMD_ARG ) ;
2016-10-19 21:18:24 +03:00
}
static void meson_mmc_request ( struct mmc_host * mmc , struct mmc_request * mrq )
{
struct meson_host * host = mmc_priv ( mmc ) ;
2017-04-07 22:22:44 +03:00
bool needs_pre_post_req = mrq - > data & &
! ( mrq - > data - > host_cookie & SD_EMMC_PRE_REQ_DONE ) ;
if ( needs_pre_post_req ) {
meson_mmc_get_transfer_mode ( mmc , mrq ) ;
if ( ! meson_mmc_desc_chain_mode ( mrq - > data ) )
needs_pre_post_req = false ;
}
if ( needs_pre_post_req )
meson_mmc_pre_req ( mmc , mrq ) ;
2016-10-19 21:18:24 +03:00
/* Stop execution */
writel ( 0 , host - > regs + SD_EMMC_START ) ;
2017-04-07 22:22:44 +03:00
meson_mmc_start_cmd ( mmc , mrq - > sbc ? : mrq - > cmd ) ;
if ( needs_pre_post_req )
meson_mmc_post_req ( mmc , mrq , 0 ) ;
2016-10-19 21:18:24 +03:00
}
2017-03-04 15:20:44 +03:00
static void meson_mmc_read_resp ( struct mmc_host * mmc , struct mmc_command * cmd )
2016-10-19 21:18:24 +03:00
{
struct meson_host * host = mmc_priv ( mmc ) ;
if ( cmd - > flags & MMC_RSP_136 ) {
cmd - > resp [ 0 ] = readl ( host - > regs + SD_EMMC_CMD_RSP3 ) ;
cmd - > resp [ 1 ] = readl ( host - > regs + SD_EMMC_CMD_RSP2 ) ;
cmd - > resp [ 2 ] = readl ( host - > regs + SD_EMMC_CMD_RSP1 ) ;
cmd - > resp [ 3 ] = readl ( host - > regs + SD_EMMC_CMD_RSP ) ;
} else if ( cmd - > flags & MMC_RSP_PRESENT ) {
cmd - > resp [ 0 ] = readl ( host - > regs + SD_EMMC_CMD_RSP ) ;
}
}
static irqreturn_t meson_mmc_irq ( int irq , void * dev_id )
{
struct meson_host * host = dev_id ;
2016-12-23 18:01:08 +03:00
struct mmc_command * cmd ;
2017-03-23 00:33:53 +03:00
struct mmc_data * data ;
2016-10-19 21:18:24 +03:00
u32 irq_en , status , raw_status ;
irqreturn_t ret = IRQ_HANDLED ;
if ( WARN_ON ( ! host ) )
return IRQ_NONE ;
2016-12-23 18:01:08 +03:00
cmd = host - > cmd ;
2016-10-19 21:18:24 +03:00
if ( WARN_ON ( ! cmd ) )
return IRQ_NONE ;
2017-03-23 00:33:53 +03:00
data = cmd - > data ;
2016-10-19 21:18:24 +03:00
spin_lock ( & host - > lock ) ;
irq_en = readl ( host - > regs + SD_EMMC_IRQ_EN ) ;
raw_status = readl ( host - > regs + SD_EMMC_STATUS ) ;
status = raw_status & irq_en ;
if ( ! status ) {
dev_warn ( host - > dev , " Spurious IRQ! status=0x%08x, irq_en=0x%08x \n " ,
raw_status , irq_en ) ;
ret = IRQ_NONE ;
goto out ;
}
2017-03-23 00:33:50 +03:00
meson_mmc_read_resp ( host - > mmc , cmd ) ;
2016-10-19 21:18:24 +03:00
cmd - > error = 0 ;
if ( status & IRQ_RXD_ERR_MASK ) {
dev_dbg ( host - > dev , " Unhandled IRQ: RXD error \n " ) ;
cmd - > error = - EILSEQ ;
}
if ( status & IRQ_TXD_ERR ) {
dev_dbg ( host - > dev , " Unhandled IRQ: TXD error \n " ) ;
cmd - > error = - EILSEQ ;
}
if ( status & IRQ_DESC_ERR )
dev_dbg ( host - > dev , " Unhandled IRQ: Descriptor error \n " ) ;
if ( status & IRQ_RESP_ERR ) {
dev_dbg ( host - > dev , " Unhandled IRQ: Response error \n " ) ;
cmd - > error = - EILSEQ ;
}
if ( status & IRQ_RESP_TIMEOUT ) {
dev_dbg ( host - > dev , " Unhandled IRQ: Response timeout \n " ) ;
cmd - > error = - ETIMEDOUT ;
}
if ( status & IRQ_DESC_TIMEOUT ) {
dev_dbg ( host - > dev , " Unhandled IRQ: Descriptor timeout \n " ) ;
cmd - > error = - ETIMEDOUT ;
}
if ( status & IRQ_SDIO )
dev_dbg ( host - > dev , " Unhandled IRQ: SDIO. \n " ) ;
2017-03-23 00:33:53 +03:00
if ( status & ( IRQ_END_OF_CHAIN | IRQ_RESP_STATUS ) ) {
if ( data & & ! cmd - > error )
data - > bytes_xfered = data - > blksz * data - > blocks ;
2017-04-07 22:22:44 +03:00
if ( meson_mmc_bounce_buf_read ( data ) | |
meson_mmc_get_next_command ( cmd ) )
ret = IRQ_WAKE_THREAD ;
2017-03-23 00:33:53 +03:00
} else {
2016-10-19 21:18:24 +03:00
dev_warn ( host - > dev , " Unknown IRQ! status=0x%04x: MMC CMD%u arg=0x%08x flags=0x%08x stop=%d \n " ,
status , cmd - > opcode , cmd - > arg ,
2017-03-04 15:36:45 +03:00
cmd - > flags , cmd - > mrq - > stop ? 1 : 0 ) ;
2016-10-19 21:18:24 +03:00
if ( cmd - > data ) {
struct mmc_data * data = cmd - > data ;
dev_warn ( host - > dev , " \t blksz %u blocks %u flags 0x%08x (%s%s) " ,
data - > blksz , data - > blocks , data - > flags ,
data - > flags & MMC_DATA_WRITE ? " write " : " " ,
data - > flags & MMC_DATA_READ ? " read " : " " ) ;
}
}
out :
/* ack all (enabled) interrupts */
writel ( status , host - > regs + SD_EMMC_STATUS ) ;
2017-03-23 00:33:50 +03:00
if ( ret = = IRQ_HANDLED )
2016-10-19 21:18:24 +03:00
meson_mmc_request_done ( host - > mmc , cmd - > mrq ) ;
spin_unlock ( & host - > lock ) ;
return ret ;
}
static irqreturn_t meson_mmc_irq_thread ( int irq , void * dev_id )
{
struct meson_host * host = dev_id ;
2017-03-25 13:28:13 +03:00
struct mmc_command * next_cmd , * cmd = host - > cmd ;
2016-10-19 21:18:24 +03:00
struct mmc_data * data ;
unsigned int xfer_bytes ;
if ( WARN_ON ( ! cmd ) )
2016-12-23 18:01:08 +03:00
return IRQ_NONE ;
2016-10-19 21:18:24 +03:00
data = cmd - > data ;
2017-04-07 22:22:44 +03:00
if ( meson_mmc_bounce_buf_read ( data ) ) {
2016-10-19 21:18:24 +03:00
xfer_bytes = data - > blksz * data - > blocks ;
2017-02-08 00:34:41 +03:00
WARN_ON ( xfer_bytes > host - > bounce_buf_size ) ;
sg_copy_from_buffer ( data - > sg , data - > sg_len ,
host - > bounce_buf , xfer_bytes ) ;
2016-10-19 21:18:24 +03:00
}
2017-03-25 13:28:13 +03:00
next_cmd = meson_mmc_get_next_command ( cmd ) ;
if ( next_cmd )
meson_mmc_start_cmd ( host - > mmc , next_cmd ) ;
2016-10-19 21:18:24 +03:00
else
2017-03-25 13:28:13 +03:00
meson_mmc_request_done ( host - > mmc , cmd - > mrq ) ;
2016-10-19 21:18:24 +03:00
2017-02-08 00:34:41 +03:00
return IRQ_HANDLED ;
2016-10-19 21:18:24 +03:00
}
2017-04-04 22:11:17 +03:00
static int meson_mmc_execute_tuning ( struct mmc_host * mmc , u32 opcode )
{
struct meson_host * host = mmc_priv ( mmc ) ;
struct meson_tuning_params tp_old = host - > tp ;
int ret = - EINVAL , i , cmd_error ;
dev_info ( mmc_dev ( mmc ) , " (re)tuning... \n " ) ;
for ( i = CLK_PHASE_0 ; i < = CLK_PHASE_270 ; i + + ) {
host - > tp . rx_phase = i ;
/* exclude the active parameter set if retuning */
if ( ! memcmp ( & tp_old , & host - > tp , sizeof ( tp_old ) ) & &
mmc - > doing_retune )
continue ;
meson_mmc_set_tuning_params ( mmc ) ;
ret = mmc_send_tuning ( mmc , opcode , & cmd_error ) ;
if ( ! ret )
break ;
}
return ret ;
}
2016-10-19 21:18:24 +03:00
/*
* NOTE : we only need this until the GPIO / pinctrl driver can handle
* interrupts . For now , the MMC core will use this for polling .
*/
static int meson_mmc_get_cd ( struct mmc_host * mmc )
{
int status = mmc_gpio_get_cd ( mmc ) ;
if ( status = = - ENOSYS )
return 1 ; /* assume present */
return status ;
}
2017-03-04 15:35:13 +03:00
static void meson_mmc_cfg_init ( struct meson_host * host )
{
u32 cfg = 0 ;
2017-03-25 13:23:24 +03:00
cfg | = FIELD_PREP ( CFG_RESP_TIMEOUT_MASK ,
ilog2 ( SD_EMMC_CFG_RESP_TIMEOUT ) ) ;
cfg | = FIELD_PREP ( CFG_RC_CC_MASK , ilog2 ( SD_EMMC_CFG_CMD_GAP ) ) ;
cfg | = FIELD_PREP ( CFG_BLK_LEN_MASK , ilog2 ( SD_EMMC_CFG_BLK_SIZE ) ) ;
2017-03-04 15:35:13 +03:00
writel ( cfg , host - > regs + SD_EMMC_CFG ) ;
}
2016-10-19 21:18:24 +03:00
static const struct mmc_host_ops meson_mmc_ops = {
. request = meson_mmc_request ,
. set_ios = meson_mmc_set_ios ,
. get_cd = meson_mmc_get_cd ,
2017-04-07 22:22:44 +03:00
. pre_req = meson_mmc_pre_req ,
. post_req = meson_mmc_post_req ,
2017-04-04 22:11:17 +03:00
. execute_tuning = meson_mmc_execute_tuning ,
2016-10-19 21:18:24 +03:00
} ;
static int meson_mmc_probe ( struct platform_device * pdev )
{
struct resource * res ;
struct meson_host * host ;
struct mmc_host * mmc ;
2017-03-04 15:21:54 +03:00
int ret , irq ;
2016-10-19 21:18:24 +03:00
mmc = mmc_alloc_host ( sizeof ( struct meson_host ) , & pdev - > dev ) ;
if ( ! mmc )
return - ENOMEM ;
host = mmc_priv ( mmc ) ;
host - > mmc = mmc ;
host - > dev = & pdev - > dev ;
dev_set_drvdata ( & pdev - > dev , host ) ;
spin_lock_init ( & host - > lock ) ;
/* Get regulators and the supported OCR mask */
host - > vqmmc_enabled = false ;
ret = mmc_regulator_get_supply ( mmc ) ;
if ( ret = = - EPROBE_DEFER )
goto free_host ;
ret = mmc_of_parse ( mmc ) ;
if ( ret ) {
2017-01-26 03:01:39 +03:00
if ( ret ! = - EPROBE_DEFER )
dev_warn ( & pdev - > dev , " error parsing DT: %d \n " , ret ) ;
2016-10-19 21:18:24 +03:00
goto free_host ;
}
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
host - > regs = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( host - > regs ) ) {
ret = PTR_ERR ( host - > regs ) ;
goto free_host ;
}
2017-03-04 15:21:54 +03:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( ! irq ) {
2016-10-19 21:18:24 +03:00
dev_err ( & pdev - > dev , " failed to get interrupt resource. \n " ) ;
ret = - EINVAL ;
goto free_host ;
}
host - > core_clk = devm_clk_get ( & pdev - > dev , " core " ) ;
if ( IS_ERR ( host - > core_clk ) ) {
ret = PTR_ERR ( host - > core_clk ) ;
goto free_host ;
}
ret = clk_prepare_enable ( host - > core_clk ) ;
if ( ret )
goto free_host ;
2017-04-04 22:03:22 +03:00
host - > tp . core_phase = CLK_PHASE_180 ;
host - > tp . tx_phase = CLK_PHASE_0 ;
host - > tp . rx_phase = CLK_PHASE_0 ;
2016-10-19 21:18:24 +03:00
ret = meson_mmc_clk_init ( host ) ;
if ( ret )
2017-03-14 23:05:20 +03:00
goto err_core_clk ;
2016-10-19 21:18:24 +03:00
2017-08-28 17:29:04 +03:00
/* set config to sane default */
meson_mmc_cfg_init ( host ) ;
2016-10-19 21:18:24 +03:00
/* Stop execution */
writel ( 0 , host - > regs + SD_EMMC_START ) ;
/* clear, ack, enable all interrupts */
writel ( 0 , host - > regs + SD_EMMC_IRQ_EN ) ;
writel ( IRQ_EN_MASK , host - > regs + SD_EMMC_STATUS ) ;
2017-02-08 00:34:51 +03:00
writel ( IRQ_EN_MASK , host - > regs + SD_EMMC_IRQ_EN ) ;
2016-10-19 21:18:24 +03:00
2017-03-04 15:21:54 +03:00
ret = devm_request_threaded_irq ( & pdev - > dev , irq , meson_mmc_irq ,
meson_mmc_irq_thread , IRQF_SHARED ,
2017-03-25 13:24:41 +03:00
NULL , host ) ;
2016-10-19 21:18:24 +03:00
if ( ret )
2017-03-04 15:25:14 +03:00
goto err_div_clk ;
2016-10-19 21:18:24 +03:00
2017-03-25 13:28:13 +03:00
mmc - > caps | = MMC_CAP_CMD23 ;
2017-02-08 00:34:58 +03:00
mmc - > max_blk_count = CMD_CFG_LENGTH_MASK ;
mmc - > max_req_size = mmc - > max_blk_count * mmc - > max_blk_size ;
2017-04-07 22:22:44 +03:00
mmc - > max_segs = SD_EMMC_DESC_BUF_LEN / sizeof ( struct sd_emmc_desc ) ;
mmc - > max_seg_size = mmc - > max_req_size ;
2017-02-08 00:34:58 +03:00
2016-10-19 21:18:24 +03:00
/* data bounce buffer */
2017-02-08 00:35:02 +03:00
host - > bounce_buf_size = mmc - > max_req_size ;
2016-10-19 21:18:24 +03:00
host - > bounce_buf =
dma_alloc_coherent ( host - > dev , host - > bounce_buf_size ,
& host - > bounce_dma_addr , GFP_KERNEL ) ;
if ( host - > bounce_buf = = NULL ) {
dev_err ( host - > dev , " Unable to map allocate DMA bounce buffer. \n " ) ;
ret = - ENOMEM ;
2017-03-04 15:25:14 +03:00
goto err_div_clk ;
2016-10-19 21:18:24 +03:00
}
2017-04-07 22:22:44 +03:00
host - > descs = dma_alloc_coherent ( host - > dev , SD_EMMC_DESC_BUF_LEN ,
& host - > descs_dma_addr , GFP_KERNEL ) ;
if ( ! host - > descs ) {
dev_err ( host - > dev , " Allocating descriptor DMA buffer failed \n " ) ;
ret = - ENOMEM ;
goto err_bounce_buf ;
}
2016-10-19 21:18:24 +03:00
mmc - > ops = & meson_mmc_ops ;
mmc_add_host ( mmc ) ;
return 0 ;
2017-04-07 22:22:44 +03:00
err_bounce_buf :
dma_free_coherent ( host - > dev , host - > bounce_buf_size ,
host - > bounce_buf , host - > bounce_dma_addr ) ;
2017-03-04 15:25:14 +03:00
err_div_clk :
2016-10-19 21:18:24 +03:00
clk_disable_unprepare ( host - > cfg_div_clk ) ;
2017-03-14 23:05:20 +03:00
err_core_clk :
2016-10-19 21:18:24 +03:00
clk_disable_unprepare ( host - > core_clk ) ;
2017-03-14 23:05:20 +03:00
free_host :
2016-10-19 21:18:24 +03:00
mmc_free_host ( mmc ) ;
return ret ;
}
static int meson_mmc_remove ( struct platform_device * pdev )
{
struct meson_host * host = dev_get_drvdata ( & pdev - > dev ) ;
2017-02-18 20:06:47 +03:00
mmc_remove_host ( host - > mmc ) ;
2017-02-08 00:34:51 +03:00
/* disable interrupts */
writel ( 0 , host - > regs + SD_EMMC_IRQ_EN ) ;
2017-04-07 22:22:44 +03:00
dma_free_coherent ( host - > dev , SD_EMMC_DESC_BUF_LEN ,
host - > descs , host - > descs_dma_addr ) ;
2017-02-08 00:35:40 +03:00
dma_free_coherent ( host - > dev , host - > bounce_buf_size ,
host - > bounce_buf , host - > bounce_dma_addr ) ;
2016-10-19 21:18:24 +03:00
clk_disable_unprepare ( host - > cfg_div_clk ) ;
clk_disable_unprepare ( host - > core_clk ) ;
mmc_free_host ( host - > mmc ) ;
return 0 ;
}
static const struct of_device_id meson_mmc_of_match [ ] = {
{ . compatible = " amlogic,meson-gx-mmc " , } ,
{ . compatible = " amlogic,meson-gxbb-mmc " , } ,
{ . compatible = " amlogic,meson-gxl-mmc " , } ,
{ . compatible = " amlogic,meson-gxm-mmc " , } ,
{ }
} ;
MODULE_DEVICE_TABLE ( of , meson_mmc_of_match ) ;
static struct platform_driver meson_mmc_driver = {
. probe = meson_mmc_probe ,
. remove = meson_mmc_remove ,
. driver = {
. name = DRIVER_NAME ,
. of_match_table = of_match_ptr ( meson_mmc_of_match ) ,
} ,
} ;
module_platform_driver ( meson_mmc_driver ) ;
MODULE_DESCRIPTION ( " Amlogic S905*/GX* SD/eMMC driver " ) ;
MODULE_AUTHOR ( " Kevin Hilman <khilman@baylibre.com> " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;