linux/drivers/mmc/host/cavium.c

1087 lines
28 KiB
C
Raw Normal View History

/*
* Shared part of driver for MMC/SDHC controller on Cavium OCTEON and
* ThunderX SOCs.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2012-2017 Cavium Inc.
* Authors:
* David Daney <david.daney@cavium.com>
* Peter Swain <pswain@cavium.com>
* Steven J. Hill <steven.hill@cavium.com>
* Jan Glauber <jglauber@cavium.com>
*/
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/scatterlist.h>
#include <linux/time.h>
#include "cavium.h"
const char *cvm_mmc_irq_names[] = {
"MMC Buffer",
"MMC Command",
"MMC DMA",
"MMC Command Error",
"MMC DMA Error",
"MMC Switch",
"MMC Switch Error",
"MMC DMA int Fifo",
"MMC DMA int",
};
/*
* The Cavium MMC host hardware assumes that all commands have fixed
* command and response types. These are correct if MMC devices are
* being used. However, non-MMC devices like SD use command and
* response types that are unexpected by the host hardware.
*
* The command and response types can be overridden by supplying an
* XOR value that is applied to the type. We calculate the XOR value
* from the values in this table and the flags passed from the MMC
* core.
*/
static struct cvm_mmc_cr_type cvm_mmc_cr_types[] = {
{0, 0}, /* CMD0 */
{0, 3}, /* CMD1 */
{0, 2}, /* CMD2 */
{0, 1}, /* CMD3 */
{0, 0}, /* CMD4 */
{0, 1}, /* CMD5 */
{0, 1}, /* CMD6 */
{0, 1}, /* CMD7 */
{1, 1}, /* CMD8 */
{0, 2}, /* CMD9 */
{0, 2}, /* CMD10 */
{1, 1}, /* CMD11 */
{0, 1}, /* CMD12 */
{0, 1}, /* CMD13 */
{1, 1}, /* CMD14 */
{0, 0}, /* CMD15 */
{0, 1}, /* CMD16 */
{1, 1}, /* CMD17 */
{1, 1}, /* CMD18 */
{3, 1}, /* CMD19 */
{2, 1}, /* CMD20 */
{0, 0}, /* CMD21 */
{0, 0}, /* CMD22 */
{0, 1}, /* CMD23 */
{2, 1}, /* CMD24 */
{2, 1}, /* CMD25 */
{2, 1}, /* CMD26 */
{2, 1}, /* CMD27 */
{0, 1}, /* CMD28 */
{0, 1}, /* CMD29 */
{1, 1}, /* CMD30 */
{1, 1}, /* CMD31 */
{0, 0}, /* CMD32 */
{0, 0}, /* CMD33 */
{0, 0}, /* CMD34 */
{0, 1}, /* CMD35 */
{0, 1}, /* CMD36 */
{0, 0}, /* CMD37 */
{0, 1}, /* CMD38 */
{0, 4}, /* CMD39 */
{0, 5}, /* CMD40 */
{0, 0}, /* CMD41 */
{2, 1}, /* CMD42 */
{0, 0}, /* CMD43 */
{0, 0}, /* CMD44 */
{0, 0}, /* CMD45 */
{0, 0}, /* CMD46 */
{0, 0}, /* CMD47 */
{0, 0}, /* CMD48 */
{0, 0}, /* CMD49 */
{0, 0}, /* CMD50 */
{0, 0}, /* CMD51 */
{0, 0}, /* CMD52 */
{0, 0}, /* CMD53 */
{0, 0}, /* CMD54 */
{0, 1}, /* CMD55 */
{0xff, 0xff}, /* CMD56 */
{0, 0}, /* CMD57 */
{0, 0}, /* CMD58 */
{0, 0}, /* CMD59 */
{0, 0}, /* CMD60 */
{0, 0}, /* CMD61 */
{0, 0}, /* CMD62 */
{0, 0} /* CMD63 */
};
static struct cvm_mmc_cr_mods cvm_mmc_get_cr_mods(struct mmc_command *cmd)
{
struct cvm_mmc_cr_type *cr;
u8 hardware_ctype, hardware_rtype;
u8 desired_ctype = 0, desired_rtype = 0;
struct cvm_mmc_cr_mods r;
cr = cvm_mmc_cr_types + (cmd->opcode & 0x3f);
hardware_ctype = cr->ctype;
hardware_rtype = cr->rtype;
if (cmd->opcode == MMC_GEN_CMD)
hardware_ctype = (cmd->arg & 1) ? 1 : 2;
switch (mmc_cmd_type(cmd)) {
case MMC_CMD_ADTC:
desired_ctype = (cmd->data->flags & MMC_DATA_WRITE) ? 2 : 1;
break;
case MMC_CMD_AC:
case MMC_CMD_BC:
case MMC_CMD_BCR:
desired_ctype = 0;
break;
}
switch (mmc_resp_type(cmd)) {
case MMC_RSP_NONE:
desired_rtype = 0;
break;
case MMC_RSP_R1:/* MMC_RSP_R5, MMC_RSP_R6, MMC_RSP_R7 */
case MMC_RSP_R1B:
desired_rtype = 1;
break;
case MMC_RSP_R2:
desired_rtype = 2;
break;
case MMC_RSP_R3: /* MMC_RSP_R4 */
desired_rtype = 3;
break;
}
r.ctype_xor = desired_ctype ^ hardware_ctype;
r.rtype_xor = desired_rtype ^ hardware_rtype;
return r;
}
static void check_switch_errors(struct cvm_mmc_host *host)
{
u64 emm_switch;
emm_switch = readq(host->base + MIO_EMM_SWITCH(host));
if (emm_switch & MIO_EMM_SWITCH_ERR0)
dev_err(host->dev, "Switch power class error\n");
if (emm_switch & MIO_EMM_SWITCH_ERR1)
dev_err(host->dev, "Switch hs timing error\n");
if (emm_switch & MIO_EMM_SWITCH_ERR2)
dev_err(host->dev, "Switch bus width error\n");
}
static void clear_bus_id(u64 *reg)
{
u64 bus_id_mask = GENMASK_ULL(61, 60);
*reg &= ~bus_id_mask;
}
static void set_bus_id(u64 *reg, int bus_id)
{
clear_bus_id(reg);
*reg |= FIELD_PREP(GENMASK(61, 60), bus_id);
}
static int get_bus_id(u64 reg)
{
return FIELD_GET(GENMASK_ULL(61, 60), reg);
}
/*
* We never set the switch_exe bit since that would interfere
* with the commands send by the MMC core.
*/
static void do_switch(struct cvm_mmc_host *host, u64 emm_switch)
{
int retries = 100;
u64 rsp_sts;
int bus_id;
/*
* Modes setting only taken from slot 0. Work around that hardware
* issue by first switching to slot 0.
*/
bus_id = get_bus_id(emm_switch);
clear_bus_id(&emm_switch);
writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
set_bus_id(&emm_switch, bus_id);
writeq(emm_switch, host->base + MIO_EMM_SWITCH(host));
/* wait for the switch to finish */
do {
rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
if (!(rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL))
break;
udelay(10);
} while (--retries);
check_switch_errors(host);
}
static bool switch_val_changed(struct cvm_mmc_slot *slot, u64 new_val)
{
/* Match BUS_ID, HS_TIMING, BUS_WIDTH, POWER_CLASS, CLK_HI, CLK_LO */
u64 match = 0x3001070fffffffffull;
return (slot->cached_switch & match) != (new_val & match);
}
static void set_wdog(struct cvm_mmc_slot *slot, unsigned int ns)
{
u64 timeout;
if (!slot->clock)
return;
if (ns)
timeout = (slot->clock * ns) / NSEC_PER_SEC;
else
timeout = (slot->clock * 850ull) / 1000ull;
writeq(timeout, slot->host->base + MIO_EMM_WDOG(slot->host));
}
static void cvm_mmc_reset_bus(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
u64 emm_switch, wdog;
emm_switch = readq(slot->host->base + MIO_EMM_SWITCH(host));
emm_switch &= ~(MIO_EMM_SWITCH_EXE | MIO_EMM_SWITCH_ERR0 |
MIO_EMM_SWITCH_ERR1 | MIO_EMM_SWITCH_ERR2);
set_bus_id(&emm_switch, slot->bus_id);
wdog = readq(slot->host->base + MIO_EMM_WDOG(host));
do_switch(slot->host, emm_switch);
slot->cached_switch = emm_switch;
msleep(20);
writeq(wdog, slot->host->base + MIO_EMM_WDOG(host));
}
/* Switch to another slot if needed */
static void cvm_mmc_switch_to(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
struct cvm_mmc_slot *old_slot;
u64 emm_sample, emm_switch;
if (slot->bus_id == host->last_slot)
return;
if (host->last_slot >= 0 && host->slot[host->last_slot]) {
old_slot = host->slot[host->last_slot];
old_slot->cached_switch = readq(host->base + MIO_EMM_SWITCH(host));
old_slot->cached_rca = readq(host->base + MIO_EMM_RCA(host));
}
writeq(slot->cached_rca, host->base + MIO_EMM_RCA(host));
emm_switch = slot->cached_switch;
set_bus_id(&emm_switch, slot->bus_id);
do_switch(host, emm_switch);
emm_sample = FIELD_PREP(MIO_EMM_SAMPLE_CMD_CNT, slot->cmd_cnt) |
FIELD_PREP(MIO_EMM_SAMPLE_DAT_CNT, slot->dat_cnt);
writeq(emm_sample, host->base + MIO_EMM_SAMPLE(host));
host->last_slot = slot->bus_id;
}
static void do_read(struct cvm_mmc_host *host, struct mmc_request *req,
u64 dbuf)
{
struct sg_mapping_iter *smi = &host->smi;
int data_len = req->data->blocks * req->data->blksz;
int bytes_xfered, shift = -1;
u64 dat = 0;
/* Auto inc from offset zero */
writeq((0x10000 | (dbuf << 6)), host->base + MIO_EMM_BUF_IDX(host));
for (bytes_xfered = 0; bytes_xfered < data_len;) {
if (smi->consumed >= smi->length) {
if (!sg_miter_next(smi))
break;
smi->consumed = 0;
}
if (shift < 0) {
dat = readq(host->base + MIO_EMM_BUF_DAT(host));
shift = 56;
}
while (smi->consumed < smi->length && shift >= 0) {
((u8 *)smi->addr)[smi->consumed] = (dat >> shift) & 0xff;
bytes_xfered++;
smi->consumed++;
shift -= 8;
}
}
sg_miter_stop(smi);
req->data->bytes_xfered = bytes_xfered;
req->data->error = 0;
}
static void do_write(struct mmc_request *req)
{
req->data->bytes_xfered = req->data->blocks * req->data->blksz;
req->data->error = 0;
}
static void set_cmd_response(struct cvm_mmc_host *host, struct mmc_request *req,
u64 rsp_sts)
{
u64 rsp_hi, rsp_lo;
if (!(rsp_sts & MIO_EMM_RSP_STS_RSP_VAL))
return;
rsp_lo = readq(host->base + MIO_EMM_RSP_LO(host));
switch (FIELD_GET(MIO_EMM_RSP_STS_RSP_TYPE, rsp_sts)) {
case 1:
case 3:
req->cmd->resp[0] = (rsp_lo >> 8) & 0xffffffff;
req->cmd->resp[1] = 0;
req->cmd->resp[2] = 0;
req->cmd->resp[3] = 0;
break;
case 2:
req->cmd->resp[3] = rsp_lo & 0xffffffff;
req->cmd->resp[2] = (rsp_lo >> 32) & 0xffffffff;
rsp_hi = readq(host->base + MIO_EMM_RSP_HI(host));
req->cmd->resp[1] = rsp_hi & 0xffffffff;
req->cmd->resp[0] = (rsp_hi >> 32) & 0xffffffff;
break;
}
}
static int get_dma_dir(struct mmc_data *data)
{
return (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
}
static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
mmc: cavium: Add the missing dma unmap when the dma has finished. This fixes the below calltrace when the CONFIG_DMA_API_DEBUG is enabled. DMA-API: thunderx_mmc 0000:01:01.4: cpu touching an active dma mapped cacheline [cln=0x000000002fdf9800] WARNING: CPU: 21 PID: 1 at kernel/dma/debug.c:596 debug_dma_assert_idle+0x1f8/0x270 Modules linked in: CPU: 21 PID: 1 Comm: init Not tainted 5.3.0-rc1-next-20190725-yocto-standard+ #64 Hardware name: Marvell OcteonTX CN96XX board (DT) pstate: 80400009 (Nzcv daif +PAN -UAO) pc : debug_dma_assert_idle+0x1f8/0x270 lr : debug_dma_assert_idle+0x1f8/0x270 sp : ffff0000113cfc10 x29: ffff0000113cfc10 x28: 0000ffff8c880000 x27: ffff800bc72a0000 x26: ffff000010ff8000 x25: ffff000010ff8940 x24: ffff000010ff8968 x23: 0000000000000000 x22: ffff000010e83700 x21: ffff000010ea2000 x20: ffff000010e835c8 x19: ffff800bc2c73300 x18: ffffffffffffffff x17: 0000000000000000 x16: 0000000000000000 x15: ffff000010e835c8 x14: 6d20616d64206576 x13: 69746361206e6120 x12: 676e696863756f74 x11: 20757063203a342e x10: 31303a31303a3030 x9 : 303020636d6d5f78 x8 : 3230303030303030 x7 : 00000000000002fd x6 : ffff000010fd57d0 x5 : 0000000000000000 x4 : ffff0000106c5210 x3 : 00000000ffffffff x2 : 0000800bee9c0000 x1 : 57d5843f4aa62800 x0 : 0000000000000000 Call trace: debug_dma_assert_idle+0x1f8/0x270 wp_page_copy+0xb0/0x688 do_wp_page+0xa8/0x5b8 __handle_mm_fault+0x600/0xd00 handle_mm_fault+0x118/0x1e8 do_page_fault+0x200/0x500 do_mem_abort+0x50/0xb0 el0_da+0x20/0x24 ---[ end trace a005534bd23e109f ]--- DMA-API: Mapped at: debug_dma_map_sg+0x94/0x350 cvm_mmc_request+0x3c4/0x988 __mmc_start_request+0x9c/0x1f8 mmc_start_request+0x7c/0xb0 mmc_blk_mq_issue_rq+0x5c4/0x7b8 Signed-off-by: Kevin Hao <haokexin@gmail.com> Fixes: ba3869ff32e4 ("mmc: cavium: Add core MMC driver for Cavium SOCs") Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-07-26 05:30:49 +03:00
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
static int finish_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 fifo_cfg;
int count;
/* Check if there are any pending requests left */
fifo_cfg = readq(host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
count = FIELD_GET(MIO_EMM_DMA_FIFO_CFG_COUNT, fifo_cfg);
if (count)
dev_err(host->dev, "%u requests still pending\n", count);
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
/* Clear and disable FIFO */
writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
static int finish_dma(struct cvm_mmc_host *host, struct mmc_data *data)
{
if (host->use_sg && data->sg_len > 1)
return finish_dma_sg(host, data);
else
return finish_dma_single(host, data);
}
static int check_status(u64 rsp_sts)
{
if (rsp_sts & MIO_EMM_RSP_STS_RSP_BAD_STS ||
rsp_sts & MIO_EMM_RSP_STS_RSP_CRC_ERR ||
rsp_sts & MIO_EMM_RSP_STS_BLK_CRC_ERR)
return -EILSEQ;
if (rsp_sts & MIO_EMM_RSP_STS_RSP_TIMEOUT ||
rsp_sts & MIO_EMM_RSP_STS_BLK_TIMEOUT)
return -ETIMEDOUT;
if (rsp_sts & MIO_EMM_RSP_STS_DBUF_ERR)
return -EIO;
return 0;
}
/* Try to clean up failed DMA. */
static void cleanup_dma(struct cvm_mmc_host *host, u64 rsp_sts)
{
u64 emm_dma;
emm_dma = readq(host->base + MIO_EMM_DMA(host));
emm_dma |= FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
FIELD_PREP(MIO_EMM_DMA_DAT_NULL, 1);
set_bus_id(&emm_dma, get_bus_id(rsp_sts));
writeq(emm_dma, host->base + MIO_EMM_DMA(host));
}
irqreturn_t cvm_mmc_interrupt(int irq, void *dev_id)
{
struct cvm_mmc_host *host = dev_id;
struct mmc_request *req;
unsigned long flags = 0;
u64 emm_int, rsp_sts;
bool host_done;
if (host->need_irq_handler_lock)
spin_lock_irqsave(&host->irq_handler_lock, flags);
else
__acquire(&host->irq_handler_lock);
/* Clear interrupt bits (write 1 clears ). */
emm_int = readq(host->base + MIO_EMM_INT(host));
writeq(emm_int, host->base + MIO_EMM_INT(host));
if (emm_int & MIO_EMM_INT_SWITCH_ERR)
check_switch_errors(host);
req = host->current_req;
if (!req)
goto out;
rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
/*
* dma_val set means DMA is still in progress. Don't touch
* the request and wait for the interrupt indicating that
* the DMA is finished.
*/
if ((rsp_sts & MIO_EMM_RSP_STS_DMA_VAL) && host->dma_active)
goto out;
if (!host->dma_active && req->data &&
(emm_int & MIO_EMM_INT_BUF_DONE)) {
unsigned int type = (rsp_sts >> 7) & 3;
if (type == 1)
do_read(host, req, rsp_sts & MIO_EMM_RSP_STS_DBUF);
else if (type == 2)
do_write(req);
}
host_done = emm_int & MIO_EMM_INT_CMD_DONE ||
emm_int & MIO_EMM_INT_DMA_DONE ||
emm_int & MIO_EMM_INT_CMD_ERR ||
emm_int & MIO_EMM_INT_DMA_ERR;
if (!(host_done && req->done))
goto no_req_done;
req->cmd->error = check_status(rsp_sts);
if (host->dma_active && req->data)
if (!finish_dma(host, req->data))
goto no_req_done;
set_cmd_response(host, req, rsp_sts);
if ((emm_int & MIO_EMM_INT_DMA_ERR) &&
(rsp_sts & MIO_EMM_RSP_STS_DMA_PEND))
cleanup_dma(host, rsp_sts);
host->current_req = NULL;
req->done(req);
no_req_done:
if (host->dmar_fixup_done)
host->dmar_fixup_done(host);
if (host_done)
host->release_bus(host);
out:
if (host->need_irq_handler_lock)
spin_unlock_irqrestore(&host->irq_handler_lock, flags);
else
__release(&host->irq_handler_lock);
return IRQ_RETVAL(emm_int != 0);
}
/*
* Program DMA_CFG and if needed DMA_ADR.
* Returns 0 on error, DMA address otherwise.
*/
static u64 prepare_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
u64 dma_cfg, addr;
int count, rw;
count = dma_map_sg(host->dev, data->sg, data->sg_len,
get_dma_dir(data));
if (!count)
return 0;
rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
dma_cfg = FIELD_PREP(MIO_EMM_DMA_CFG_EN, 1) |
FIELD_PREP(MIO_EMM_DMA_CFG_RW, rw);
#ifdef __LITTLE_ENDIAN
dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ENDIAN, 1);
#endif
dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_SIZE,
(sg_dma_len(&data->sg[0]) / 8) - 1);
addr = sg_dma_address(&data->sg[0]);
if (!host->big_dma_addr)
dma_cfg |= FIELD_PREP(MIO_EMM_DMA_CFG_ADR, addr);
writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host));
pr_debug("[%s] sg_dma_len: %u total sg_elem: %d\n",
(rw) ? "W" : "R", sg_dma_len(&data->sg[0]), count);
if (host->big_dma_addr)
writeq(addr, host->dma_base + MIO_EMM_DMA_ADR(host));
return addr;
}
/*
* Queue complete sg list into the FIFO.
* Returns 0 on error, 1 otherwise.
*/
static u64 prepare_dma_sg(struct cvm_mmc_host *host, struct mmc_data *data)
{
struct scatterlist *sg;
u64 fifo_cmd, addr;
int count, i, rw;
count = dma_map_sg(host->dev, data->sg, data->sg_len,
get_dma_dir(data));
if (!count)
return 0;
if (count > 16)
goto error;
/* Enable FIFO by removing CLR bit */
writeq(0, host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
for_each_sg(data->sg, sg, count, i) {
/* Program DMA address */
addr = sg_dma_address(sg);
if (addr & 7)
goto error;
writeq(addr, host->dma_base + MIO_EMM_DMA_FIFO_ADR(host));
/*
* If we have scatter-gather support we also have an extra
* register for the DMA addr, so no need to check
* host->big_dma_addr here.
*/
rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0;
fifo_cmd = FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_RW, rw);
/* enable interrupts on the last element */
fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_INTDIS,
(i + 1 == count) ? 0 : 1);
#ifdef __LITTLE_ENDIAN
fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_ENDIAN, 1);
#endif
fifo_cmd |= FIELD_PREP(MIO_EMM_DMA_FIFO_CMD_SIZE,
sg_dma_len(sg) / 8 - 1);
/*
* The write copies the address and the command to the FIFO
* and increments the FIFO's COUNT field.
*/
writeq(fifo_cmd, host->dma_base + MIO_EMM_DMA_FIFO_CMD(host));
pr_debug("[%s] sg_dma_len: %u sg_elem: %d/%d\n",
(rw) ? "W" : "R", sg_dma_len(sg), i, count);
}
/*
* In difference to prepare_dma_single we don't return the
* address here, as it would not make sense for scatter-gather.
* The dma fixup is only required on models that don't support
* scatter-gather, so that is not a problem.
*/
return 1;
error:
WARN_ON_ONCE(1);
dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
/* Disable FIFO */
writeq(BIT_ULL(16), host->dma_base + MIO_EMM_DMA_FIFO_CFG(host));
return 0;
}
static u64 prepare_dma(struct cvm_mmc_host *host, struct mmc_data *data)
{
if (host->use_sg && data->sg_len > 1)
return prepare_dma_sg(host, data);
else
return prepare_dma_single(host, data);
}
static u64 prepare_ext_dma(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
u64 emm_dma;
emm_dma = FIELD_PREP(MIO_EMM_DMA_VAL, 1) |
FIELD_PREP(MIO_EMM_DMA_SECTOR,
mmc_card_is_blockaddr(mmc->card) ? 1 : 0) |
FIELD_PREP(MIO_EMM_DMA_RW,
(mrq->data->flags & MMC_DATA_WRITE) ? 1 : 0) |
FIELD_PREP(MIO_EMM_DMA_BLOCK_CNT, mrq->data->blocks) |
FIELD_PREP(MIO_EMM_DMA_CARD_ADDR, mrq->cmd->arg);
set_bus_id(&emm_dma, slot->bus_id);
if (mmc_card_mmc(mmc->card) || (mmc_card_sd(mmc->card) &&
(mmc->card->scr.cmds & SD_SCR_CMD23_SUPPORT)))
emm_dma |= FIELD_PREP(MIO_EMM_DMA_MULTI, 1);
pr_debug("[%s] blocks: %u multi: %d\n",
(emm_dma & MIO_EMM_DMA_RW) ? "W" : "R",
mrq->data->blocks, (emm_dma & MIO_EMM_DMA_MULTI) ? 1 : 0);
return emm_dma;
}
static void cvm_mmc_dma_request(struct mmc_host *mmc,
struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
struct mmc_data *data;
u64 emm_dma, addr;
if (!mrq->data || !mrq->data->sg || !mrq->data->sg_len ||
!mrq->stop || mrq->stop->opcode != MMC_STOP_TRANSMISSION) {
dev_err(&mmc->card->dev,
"Error: cmv_mmc_dma_request no data\n");
goto error;
}
cvm_mmc_switch_to(slot);
data = mrq->data;
pr_debug("DMA request blocks: %d block_size: %d total_size: %d\n",
data->blocks, data->blksz, data->blocks * data->blksz);
if (data->timeout_ns)
set_wdog(slot, data->timeout_ns);
WARN_ON(host->current_req);
host->current_req = mrq;
emm_dma = prepare_ext_dma(mmc, mrq);
addr = prepare_dma(host, data);
if (!addr) {
dev_err(host->dev, "prepare_dma failed\n");
goto error;
}
host->dma_active = true;
host->int_enable(host, MIO_EMM_INT_CMD_ERR | MIO_EMM_INT_DMA_DONE |
MIO_EMM_INT_DMA_ERR);
if (host->dmar_fixup)
host->dmar_fixup(host, mrq->cmd, data, addr);
/*
* If we have a valid SD card in the slot, we set the response
* bit mask to check for CRC errors and timeouts only.
* Otherwise, use the default power reset value.
*/
if (mmc_card_sd(mmc->card))
writeq(0x00b00000ull, host->base + MIO_EMM_STS_MASK(host));
else
writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
writeq(emm_dma, host->base + MIO_EMM_DMA(host));
return;
error:
mrq->cmd->error = -EINVAL;
if (mrq->done)
mrq->done(mrq);
host->release_bus(host);
}
static void do_read_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
{
sg_miter_start(&host->smi, mrq->data->sg, mrq->data->sg_len,
SG_MITER_ATOMIC | SG_MITER_TO_SG);
}
static void do_write_request(struct cvm_mmc_host *host, struct mmc_request *mrq)
{
unsigned int data_len = mrq->data->blocks * mrq->data->blksz;
struct sg_mapping_iter *smi = &host->smi;
unsigned int bytes_xfered;
int shift = 56;
u64 dat = 0;
/* Copy data to the xmit buffer before issuing the command. */
sg_miter_start(smi, mrq->data->sg, mrq->data->sg_len, SG_MITER_FROM_SG);
/* Auto inc from offset zero, dbuf zero */
writeq(0x10000ull, host->base + MIO_EMM_BUF_IDX(host));
for (bytes_xfered = 0; bytes_xfered < data_len;) {
if (smi->consumed >= smi->length) {
if (!sg_miter_next(smi))
break;
smi->consumed = 0;
}
while (smi->consumed < smi->length && shift >= 0) {
dat |= (u64)((u8 *)smi->addr)[smi->consumed] << shift;
bytes_xfered++;
smi->consumed++;
shift -= 8;
}
if (shift < 0) {
writeq(dat, host->base + MIO_EMM_BUF_DAT(host));
shift = 56;
dat = 0;
}
}
sg_miter_stop(smi);
}
static void cvm_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
struct mmc_command *cmd = mrq->cmd;
struct cvm_mmc_cr_mods mods;
u64 emm_cmd, rsp_sts;
int retries = 100;
/*
* Note about locking:
* All MMC devices share the same bus and controller. Allow only a
* single user of the bootbus/MMC bus at a time. The lock is acquired
* on all entry points from the MMC layer.
*
* For requests the lock is only released after the completion
* interrupt!
*/
host->acquire_bus(host);
if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK ||
cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
return cvm_mmc_dma_request(mmc, mrq);
cvm_mmc_switch_to(slot);
mods = cvm_mmc_get_cr_mods(cmd);
WARN_ON(host->current_req);
host->current_req = mrq;
if (cmd->data) {
if (cmd->data->flags & MMC_DATA_READ)
do_read_request(host, mrq);
else
do_write_request(host, mrq);
if (cmd->data->timeout_ns)
set_wdog(slot, cmd->data->timeout_ns);
} else
set_wdog(slot, 0);
host->dma_active = false;
host->int_enable(host, MIO_EMM_INT_CMD_DONE | MIO_EMM_INT_CMD_ERR);
emm_cmd = FIELD_PREP(MIO_EMM_CMD_VAL, 1) |
FIELD_PREP(MIO_EMM_CMD_CTYPE_XOR, mods.ctype_xor) |
FIELD_PREP(MIO_EMM_CMD_RTYPE_XOR, mods.rtype_xor) |
FIELD_PREP(MIO_EMM_CMD_IDX, cmd->opcode) |
FIELD_PREP(MIO_EMM_CMD_ARG, cmd->arg);
set_bus_id(&emm_cmd, slot->bus_id);
if (cmd->data && mmc_cmd_type(cmd) == MMC_CMD_ADTC)
emm_cmd |= FIELD_PREP(MIO_EMM_CMD_OFFSET,
64 - ((cmd->data->blocks * cmd->data->blksz) / 8));
writeq(0, host->base + MIO_EMM_STS_MASK(host));
retry:
rsp_sts = readq(host->base + MIO_EMM_RSP_STS(host));
if (rsp_sts & MIO_EMM_RSP_STS_DMA_VAL ||
rsp_sts & MIO_EMM_RSP_STS_CMD_VAL ||
rsp_sts & MIO_EMM_RSP_STS_SWITCH_VAL ||
rsp_sts & MIO_EMM_RSP_STS_DMA_PEND) {
udelay(10);
if (--retries)
goto retry;
}
if (!retries)
dev_err(host->dev, "Bad status: %llx before command write\n", rsp_sts);
writeq(emm_cmd, host->base + MIO_EMM_CMD(host));
}
static void cvm_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct cvm_mmc_slot *slot = mmc_priv(mmc);
struct cvm_mmc_host *host = slot->host;
int clk_period = 0, power_class = 10, bus_width = 0;
u64 clock, emm_switch;
host->acquire_bus(host);
cvm_mmc_switch_to(slot);
/* Set the power state */
switch (ios->power_mode) {
case MMC_POWER_ON:
break;
case MMC_POWER_OFF:
cvm_mmc_reset_bus(slot);
if (host->global_pwr_gpiod)
host->set_shared_power(host, 0);
else if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
break;
case MMC_POWER_UP:
if (host->global_pwr_gpiod)
host->set_shared_power(host, 1);
else if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
break;
}
/* Convert bus width to HW definition */
switch (ios->bus_width) {
case MMC_BUS_WIDTH_8:
bus_width = 2;
break;
case MMC_BUS_WIDTH_4:
bus_width = 1;
break;
case MMC_BUS_WIDTH_1:
bus_width = 0;
break;
}
/* DDR is available for 4/8 bit bus width */
if (ios->bus_width && ios->timing == MMC_TIMING_MMC_DDR52)
bus_width |= 4;
/* Change the clock frequency. */
clock = ios->clock;
if (clock > 52000000)
clock = 52000000;
slot->clock = clock;
if (clock)
clk_period = (host->sys_freq + clock - 1) / (2 * clock);
emm_switch = FIELD_PREP(MIO_EMM_SWITCH_HS_TIMING,
(ios->timing == MMC_TIMING_MMC_HS)) |
FIELD_PREP(MIO_EMM_SWITCH_BUS_WIDTH, bus_width) |
FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, power_class) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_HI, clk_period) |
FIELD_PREP(MIO_EMM_SWITCH_CLK_LO, clk_period);
set_bus_id(&emm_switch, slot->bus_id);
if (!switch_val_changed(slot, emm_switch))
goto out;
set_wdog(slot, 0);
do_switch(host, emm_switch);
slot->cached_switch = emm_switch;
out:
host->release_bus(host);
}
static const struct mmc_host_ops cvm_mmc_ops = {
.request = cvm_mmc_request,
.set_ios = cvm_mmc_set_ios,
.get_ro = mmc_gpio_get_ro,
.get_cd = mmc_gpio_get_cd,
};
static void cvm_mmc_set_clock(struct cvm_mmc_slot *slot, unsigned int clock)
{
struct mmc_host *mmc = slot->mmc;
clock = min(clock, mmc->f_max);
clock = max(clock, mmc->f_min);
slot->clock = clock;
}
static int cvm_mmc_init_lowlevel(struct cvm_mmc_slot *slot)
{
struct cvm_mmc_host *host = slot->host;
u64 emm_switch;
/* Enable this bus slot. */
host->emm_cfg |= (1ull << slot->bus_id);
writeq(host->emm_cfg, slot->host->base + MIO_EMM_CFG(host));
udelay(10);
/* Program initial clock speed and power. */
cvm_mmc_set_clock(slot, slot->mmc->f_min);
emm_switch = FIELD_PREP(MIO_EMM_SWITCH_POWER_CLASS, 10);
emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_HI,
(host->sys_freq / slot->clock) / 2);
emm_switch |= FIELD_PREP(MIO_EMM_SWITCH_CLK_LO,
(host->sys_freq / slot->clock) / 2);
/* Make the changes take effect on this bus slot. */
set_bus_id(&emm_switch, slot->bus_id);
do_switch(host, emm_switch);
slot->cached_switch = emm_switch;
/*
* Set watchdog timeout value and default reset value
* for the mask register. Finally, set the CARD_RCA
* bit so that we can get the card address relative
* to the CMD register for CMD7 transactions.
*/
set_wdog(slot, 0);
writeq(0xe4390080ull, host->base + MIO_EMM_STS_MASK(host));
writeq(1, host->base + MIO_EMM_RCA(host));
return 0;
}
static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot)
{
u32 id, cmd_skew = 0, dat_skew = 0, bus_width = 0;
struct device_node *node = dev->of_node;
struct mmc_host *mmc = slot->mmc;
u64 clock_period;
int ret;
ret = of_property_read_u32(node, "reg", &id);
if (ret) {
dev_err(dev, "Missing or invalid reg property on %pOF\n", node);
return ret;
}
if (id >= CAVIUM_MAX_MMC || slot->host->slot[id]) {
dev_err(dev, "Invalid reg property on %pOF\n", node);
return -EINVAL;
}
ret = mmc_regulator_get_supply(mmc);
if (ret)
return ret;
/*
* Legacy Octeon firmware has no regulator entry, fall-back to
* a hard-coded voltage to get a sane OCR.
*/
if (IS_ERR(mmc->supply.vmmc))
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
/* Common MMC bindings */
ret = mmc_of_parse(mmc);
if (ret)
return ret;
/* Set bus width */
if (!(mmc->caps & (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA))) {
of_property_read_u32(node, "cavium,bus-max-width", &bus_width);
if (bus_width == 8)
mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
else if (bus_width == 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
}
/* Set maximum and minimum frequency */
if (!mmc->f_max)
of_property_read_u32(node, "spi-max-frequency", &mmc->f_max);
if (!mmc->f_max || mmc->f_max > 52000000)
mmc->f_max = 52000000;
mmc->f_min = 400000;
/* Sampling register settings, period in picoseconds */
clock_period = 1000000000000ull / slot->host->sys_freq;
of_property_read_u32(node, "cavium,cmd-clk-skew", &cmd_skew);
of_property_read_u32(node, "cavium,dat-clk-skew", &dat_skew);
slot->cmd_cnt = (cmd_skew + clock_period / 2) / clock_period;
slot->dat_cnt = (dat_skew + clock_period / 2) / clock_period;
return id;
}
int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
{
struct cvm_mmc_slot *slot;
struct mmc_host *mmc;
int ret, id;
mmc = mmc_alloc_host(sizeof(struct cvm_mmc_slot), dev);
if (!mmc)
return -ENOMEM;
slot = mmc_priv(mmc);
slot->mmc = mmc;
slot->host = host;
ret = cvm_mmc_of_parse(dev, slot);
if (ret < 0)
goto error;
id = ret;
/* Set up host parameters */
mmc->ops = &cvm_mmc_ops;
/*
* We only have a 3.3v supply, we cannot support any
* of the UHS modes. We do support the high speed DDR
* modes up to 52MHz.
mmc: core: Delete bounce buffer Kconfig option This option is activated by all multiplatform configs and what not so we almost always have it turned on, and the memory it saves is negligible, even more so moving forward. The actual bounce buffer only gets allocated only when used, the only thing the ifdefs are saving is a little bit of code. It is highly improper to have this as a Kconfig option that get turned on by Kconfig, make this a pure runtime-thing and let the host decide whether we use bounce buffers. We add a new property "disable_bounce" to the host struct. Notice that mmc_queue_calc_bouncesz() already disables the bounce buffers if host->max_segs != 1, so any arch that has a maximum number of segments higher than 1 will have bounce buffers disabled. The option CONFIG_MMC_BLOCK_BOUNCE is default y so the majority of platforms in the kernel already have it on, and it then gets turned off at runtime since most of these have a host->max_segs > 1. The few exceptions that have host->max_segs == 1 and still turn off the bounce buffering are those that disable it in their defconfig. Those are the following: arch/arm/configs/colibri_pxa300_defconfig arch/arm/configs/zeus_defconfig - Uses MMC_PXA, drivers/mmc/host/pxamci.c - Sets host->max_segs = NR_SG, which is 1 - This needs its bounce buffer deactivated so we set host->disable_bounce to true in the host driver arch/arm/configs/davinci_all_defconfig - Uses MMC_DAVINCI, drivers/mmc/host/davinci_mmc.c - This driver sets host->max_segs to MAX_NR_SG, which is 16 - That means this driver anyways disabled bounce buffers - No special action needed for this platform arch/arm/configs/lpc32xx_defconfig arch/arm/configs/nhk8815_defconfig arch/arm/configs/u300_defconfig - Uses MMC_ARMMMCI, drivers/mmc/host/mmci.[c|h] - This driver by default sets host->max_segs to NR_SG, which is 128, unless a DMA engine is used, and in that case the number of segments are also > 1 - That means this driver already disables bounce buffers - No special action needed for these platforms arch/arm/configs/sama5_defconfig - Uses MMC_SDHCI, MMC_SDHCI_PLTFM, MMC_SDHCI_OF_AT91, MMC_ATMELMCI - Uses drivers/mmc/host/sdhci.c - Normally sets host->max_segs to SDHCI_MAX_SEGS which is 128 and thus disables bounce buffers - Sets host->max_segs to 1 if SDHCI_USE_SDMA is set - SDHCI_USE_SDMA is only set by SDHCI on PCI adapers - That means that for this platform bounce buffers are already disabled at runtime - No special action needed for this platform arch/blackfin/configs/CM-BF533_defconfig arch/blackfin/configs/CM-BF537E_defconfig - Uses MMC_SPI (a simple MMC card connected on SPI pins) - Uses drivers/mmc/host/mmc_spi.c - Sets host->max_segs to MMC_SPI_BLOCKSATONCE which is 128 - That means this platform already disables bounce buffers at runtime - No special action needed for these platforms arch/mips/configs/cavium_octeon_defconfig - Uses MMC_CAVIUM_OCTEON, drivers/mmc/host/cavium.c - Sets host->max_segs to 16 or 1 - Setting host->disable_bounce to be sure for the 1 case arch/mips/configs/qi_lb60_defconfig - Uses MMC_JZ4740, drivers/mmc/host/jz4740_mmc.c - This sets host->max_segs to 128 so bounce buffers are already runtime disabled - No action needed for this platform It would be interesting to come up with a list of the platforms that actually end up using bounce buffers. I have not been able to infer such a list, but it occurs when host->max_segs == 1 and the bounce buffering is not explicitly disabled. Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2017-05-18 12:29:31 +03:00
*
* Disable bounce buffers for max_segs = 1
*/
mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | MMC_CAP_3_3V_DDR;
if (host->use_sg)
mmc->max_segs = 16;
else
mmc->max_segs = 1;
/* DMA size field can address up to 8 MB */
mmc: cavium: Set the correct dma max segment size for mmc_host We have set the mmc_host.max_seg_size to 8M, but the dma max segment size of PCI device is set to 64K by default in function pci_device_add(). The mmc_host.max_seg_size is used to set the max segment size of the blk queue. Then this mismatch will trigger a calltrace like below when a bigger than 64K segment request arrives at mmc dev. So we should consider the limitation of the cvm_mmc_host when setting the mmc_host.max_seg_size. DMA-API: thunderx_mmc 0000:01:01.4: mapping sg segment longer than device claims to support [len=131072] [max=65536] WARNING: CPU: 6 PID: 238 at kernel/dma/debug.c:1221 debug_dma_map_sg+0x2b8/0x350 Modules linked in: CPU: 6 PID: 238 Comm: kworker/6:1H Not tainted 5.3.0-rc1-next-20190724-yocto-standard+ #62 Hardware name: Marvell OcteonTX CN96XX board (DT) Workqueue: kblockd blk_mq_run_work_fn pstate: 80c00009 (Nzcv daif +PAN +UAO) pc : debug_dma_map_sg+0x2b8/0x350 lr : debug_dma_map_sg+0x2b8/0x350 sp : ffff00001770f9e0 x29: ffff00001770f9e0 x28: ffffffff00000000 x27: 00000000ffffffff x26: ffff800bc2c73180 x25: ffff000010e83700 x24: 0000000000000002 x23: 0000000000000001 x22: 0000000000000001 x21: 0000000000000000 x20: ffff800bc48ba0b0 x19: ffff800bc97e8c00 x18: ffffffffffffffff x17: 0000000000000000 x16: 0000000000000000 x15: ffff000010e835c8 x14: 6874207265676e6f x13: 6c20746e656d6765 x12: 7320677320676e69 x11: 7070616d203a342e x10: 31303a31303a3030 x9 : 303020636d6d5f78 x8 : 35363d78616d5b20 x7 : 00000000000002fd x6 : ffff000010fd57dc x5 : 0000000000000000 x4 : ffff0000106c61f0 x3 : 00000000ffffffff x2 : 0000800bee060000 x1 : 7010678df3041a00 x0 : 0000000000000000 Call trace: debug_dma_map_sg+0x2b8/0x350 cvm_mmc_request+0x3c4/0x988 __mmc_start_request+0x9c/0x1f8 mmc_start_request+0x7c/0xb0 mmc_blk_mq_issue_rq+0x5c4/0x7b8 mmc_mq_queue_rq+0x11c/0x278 blk_mq_dispatch_rq_list+0xb0/0x568 blk_mq_do_dispatch_sched+0x6c/0x108 blk_mq_sched_dispatch_requests+0x110/0x1b8 __blk_mq_run_hw_queue+0xb0/0x118 blk_mq_run_work_fn+0x28/0x38 process_one_work+0x210/0x490 worker_thread+0x48/0x458 kthread+0x130/0x138 ret_from_fork+0x10/0x1c Signed-off-by: Kevin Hao <haokexin@gmail.com> Fixes: ba3869ff32e4 ("mmc: cavium: Add core MMC driver for Cavium SOCs") Cc: stable@vger.kernel.org Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
2019-07-26 05:30:48 +03:00
mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
dma_get_max_seg_size(host->dev));
mmc->max_req_size = mmc->max_seg_size;
/* External DMA is in 512 byte blocks */
mmc->max_blk_size = 512;
/* DMA block count field is 15 bits */
mmc->max_blk_count = 32767;
slot->clock = mmc->f_min;
slot->bus_id = id;
slot->cached_rca = 1;
host->acquire_bus(host);
host->slot[id] = slot;
cvm_mmc_switch_to(slot);
cvm_mmc_init_lowlevel(slot);
host->release_bus(host);
ret = mmc_add_host(mmc);
if (ret) {
dev_err(dev, "mmc_add_host() returned %d\n", ret);
slot->host->slot[id] = NULL;
goto error;
}
return 0;
error:
mmc_free_host(slot->mmc);
return ret;
}
int cvm_mmc_of_slot_remove(struct cvm_mmc_slot *slot)
{
mmc_remove_host(slot->mmc);
slot->host->slot[slot->bus_id] = NULL;
mmc_free_host(slot->mmc);
return 0;
}