e6b5be2be4
Here's the set of driver core patches for 3.19-rc1. They are dominated by the removal of the .owner field in platform drivers. They touch a lot of files, but they are "simple" changes, just removing a line in a structure. Other than that, a few minor driver core and debugfs changes. There are some ath9k patches coming in through this tree that have been acked by the wireless maintainers as they relied on the debugfs changes. Everything has been in linux-next for a while. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iEYEABECAAYFAlSOD20ACgkQMUfUDdst+ylLPACg2QrW1oHhdTMT9WI8jihlHVRM 53kAoLeteByQ3iVwWurwwseRPiWa8+MI =OVRS -----END PGP SIGNATURE----- Merge tag 'driver-core-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core Pull driver core update from Greg KH: "Here's the set of driver core patches for 3.19-rc1. They are dominated by the removal of the .owner field in platform drivers. They touch a lot of files, but they are "simple" changes, just removing a line in a structure. Other than that, a few minor driver core and debugfs changes. There are some ath9k patches coming in through this tree that have been acked by the wireless maintainers as they relied on the debugfs changes. Everything has been in linux-next for a while" * tag 'driver-core-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core: (324 commits) Revert "ath: ath9k: use debugfs_create_devm_seqfile() helper for seq_file entries" fs: debugfs: add forward declaration for struct device type firmware class: Deletion of an unnecessary check before the function call "vunmap" firmware loader: fix hung task warning dump devcoredump: provide a one-way disable function device: Add dev_<level>_once variants ath: ath9k: use debugfs_create_devm_seqfile() helper for seq_file entries ath: use seq_file api for ath9k debugfs files debugfs: add helper function to create device related seq_file drivers/base: cacheinfo: remove noisy error boot message Revert "core: platform: add warning if driver has no owner" drivers: base: support cpu cache information interface to userspace via sysfs drivers: base: add cpu_device_create to support per-cpu devices topology: replace custom attribute macros with standard DEVICE_ATTR* cpumask: factor out show_cpumap into separate helper function driver core: Fix unbalanced device reference in drivers_probe driver core: fix race with userland in device_add() sysfs/kernfs: make read requests on pre-alloc files use the buffer. sysfs/kernfs: allow attributes to request write buffer be pre-allocated. fs: sysfs: return EGBIG on write if offset is larger than file size ...
1400 lines
41 KiB
C
1400 lines
41 KiB
C
/* OMAP SSI port driver.
|
|
*
|
|
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
|
|
* Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
|
|
*
|
|
* Contact: Carlos Chinea <carlos.chinea@nokia.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
|
|
* 02110-1301 USA
|
|
*/
|
|
|
|
#include <linux/platform_device.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/pm_runtime.h>
|
|
|
|
#include <linux/of_gpio.h>
|
|
#include <linux/debugfs.h>
|
|
|
|
#include "omap_ssi_regs.h"
|
|
#include "omap_ssi.h"
|
|
|
|
static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline unsigned int ssi_wakein(struct hsi_port *port)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
return gpio_get_value(omap_port->wake_gpio);
|
|
}
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
static void ssi_debug_remove_port(struct hsi_port *port)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
|
|
debugfs_remove_recursive(omap_port->dir);
|
|
}
|
|
|
|
static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
|
|
{
|
|
struct hsi_port *port = m->private;
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
void __iomem *base = omap_ssi->sys;
|
|
unsigned int ch;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
if (omap_port->wake_irq > 0)
|
|
seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
|
|
seq_printf(m, "WAKE\t\t: 0x%08x\n",
|
|
readl(base + SSI_WAKE_REG(port->num)));
|
|
seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
|
|
readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
|
|
seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
|
|
readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
|
|
/* SST */
|
|
base = omap_port->sst_base;
|
|
seq_puts(m, "\nSST\n===\n");
|
|
seq_printf(m, "ID SST\t\t: 0x%08x\n",
|
|
readl(base + SSI_SST_ID_REG));
|
|
seq_printf(m, "MODE\t\t: 0x%08x\n",
|
|
readl(base + SSI_SST_MODE_REG));
|
|
seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
|
|
readl(base + SSI_SST_FRAMESIZE_REG));
|
|
seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
|
|
readl(base + SSI_SST_DIVISOR_REG));
|
|
seq_printf(m, "CHANNELS\t: 0x%08x\n",
|
|
readl(base + SSI_SST_CHANNELS_REG));
|
|
seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
|
|
readl(base + SSI_SST_ARBMODE_REG));
|
|
seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
|
|
readl(base + SSI_SST_TXSTATE_REG));
|
|
seq_printf(m, "BUFSTATE\t: 0x%08x\n",
|
|
readl(base + SSI_SST_BUFSTATE_REG));
|
|
seq_printf(m, "BREAK\t\t: 0x%08x\n",
|
|
readl(base + SSI_SST_BREAK_REG));
|
|
for (ch = 0; ch < omap_port->channels; ch++) {
|
|
seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
|
|
readl(base + SSI_SST_BUFFER_CH_REG(ch)));
|
|
}
|
|
/* SSR */
|
|
base = omap_port->ssr_base;
|
|
seq_puts(m, "\nSSR\n===\n");
|
|
seq_printf(m, "ID SSR\t\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_ID_REG));
|
|
seq_printf(m, "MODE\t\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_MODE_REG));
|
|
seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_FRAMESIZE_REG));
|
|
seq_printf(m, "CHANNELS\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_CHANNELS_REG));
|
|
seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_TIMEOUT_REG));
|
|
seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_RXSTATE_REG));
|
|
seq_printf(m, "BUFSTATE\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_BUFSTATE_REG));
|
|
seq_printf(m, "BREAK\t\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_BREAK_REG));
|
|
seq_printf(m, "ERROR\t\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_ERROR_REG));
|
|
seq_printf(m, "ERRORACK\t: 0x%08x\n",
|
|
readl(base + SSI_SSR_ERRORACK_REG));
|
|
for (ch = 0; ch < omap_port->channels; ch++) {
|
|
seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
|
|
readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
|
|
}
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_port_regs_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, ssi_debug_port_show, inode->i_private);
|
|
}
|
|
|
|
static const struct file_operations ssi_port_regs_fops = {
|
|
.open = ssi_port_regs_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static int ssi_div_get(void *data, u64 *val)
|
|
{
|
|
struct hsi_port *port = data;
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_div_set(void *data, u64 val)
|
|
{
|
|
struct hsi_port *port = data;
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
|
|
if (val > 127)
|
|
return -EINVAL;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
|
|
omap_port->sst.divisor = val;
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
|
|
|
|
static int __init ssi_debug_add_port(struct omap_ssi_port *omap_port,
|
|
struct dentry *dir)
|
|
{
|
|
struct hsi_port *port = to_hsi_port(omap_port->dev);
|
|
|
|
dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
|
|
if (!dir)
|
|
return -ENOMEM;
|
|
omap_port->dir = dir;
|
|
debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
|
|
dir = debugfs_create_dir("sst", dir);
|
|
if (!dir)
|
|
return -ENOMEM;
|
|
debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
|
|
&ssi_sst_div_fops);
|
|
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static int ssi_claim_lch(struct hsi_msg *msg)
|
|
{
|
|
|
|
struct hsi_port *port = hsi_get_port(msg->cl);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
int lch;
|
|
|
|
for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
|
|
if (!omap_ssi->gdd_trn[lch].msg) {
|
|
omap_ssi->gdd_trn[lch].msg = msg;
|
|
omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
|
|
return lch;
|
|
}
|
|
|
|
return -EBUSY;
|
|
}
|
|
|
|
static int ssi_start_dma(struct hsi_msg *msg, int lch)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(msg->cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
void __iomem *gdd = omap_ssi->gdd;
|
|
int err;
|
|
u16 csdp;
|
|
u16 ccr;
|
|
u32 s_addr;
|
|
u32 d_addr;
|
|
u32 tmp;
|
|
|
|
if (msg->ttype == HSI_MSG_READ) {
|
|
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
|
|
DMA_FROM_DEVICE);
|
|
if (err < 0) {
|
|
dev_dbg(&ssi->device, "DMA map SG failed !\n");
|
|
return err;
|
|
}
|
|
csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
|
|
SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
|
|
SSI_DATA_TYPE_S32;
|
|
ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
|
|
ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
|
|
SSI_CCR_ENABLE;
|
|
s_addr = omap_port->ssr_dma +
|
|
SSI_SSR_BUFFER_CH_REG(msg->channel);
|
|
d_addr = sg_dma_address(msg->sgt.sgl);
|
|
} else {
|
|
err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
|
|
DMA_TO_DEVICE);
|
|
if (err < 0) {
|
|
dev_dbg(&ssi->device, "DMA map SG failed !\n");
|
|
return err;
|
|
}
|
|
csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
|
|
SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
|
|
SSI_DATA_TYPE_S32;
|
|
ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
|
|
ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
|
|
SSI_CCR_ENABLE;
|
|
s_addr = sg_dma_address(msg->sgt.sgl);
|
|
d_addr = omap_port->sst_dma +
|
|
SSI_SST_BUFFER_CH_REG(msg->channel);
|
|
}
|
|
dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
|
|
lch, csdp, ccr, s_addr, d_addr);
|
|
|
|
/* Hold clocks during the transfer */
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
|
|
writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
|
|
writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
|
|
writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
|
|
writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
|
|
writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
|
|
gdd + SSI_GDD_CEN_REG(lch));
|
|
|
|
spin_lock_bh(&omap_ssi->lock);
|
|
tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
tmp |= SSI_GDD_LCH(lch);
|
|
writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
spin_unlock_bh(&omap_ssi->lock);
|
|
writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
|
|
msg->status = HSI_STATUS_PROCEEDING;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_start_pio(struct hsi_msg *msg)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(msg->cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
u32 val;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
if (msg->ttype == HSI_MSG_WRITE) {
|
|
val = SSI_DATAACCEPT(msg->channel);
|
|
/* Hold clocks for pio writes */
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
} else {
|
|
val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
|
|
}
|
|
dev_dbg(&port->device, "Single %s transfer\n",
|
|
msg->ttype ? "write" : "read");
|
|
val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
msg->actual_len = 0;
|
|
msg->status = HSI_STATUS_PROCEEDING;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_start_transfer(struct list_head *queue)
|
|
{
|
|
struct hsi_msg *msg;
|
|
int lch = -1;
|
|
|
|
if (list_empty(queue))
|
|
return 0;
|
|
msg = list_first_entry(queue, struct hsi_msg, link);
|
|
if (msg->status != HSI_STATUS_QUEUED)
|
|
return 0;
|
|
if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
|
|
lch = ssi_claim_lch(msg);
|
|
if (lch >= 0)
|
|
return ssi_start_dma(msg, lch);
|
|
else
|
|
return ssi_start_pio(msg);
|
|
}
|
|
|
|
static int ssi_async_break(struct hsi_msg *msg)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(msg->cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
int err = 0;
|
|
u32 tmp;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
if (msg->ttype == HSI_MSG_WRITE) {
|
|
if (omap_port->sst.mode != SSI_MODE_FRAME) {
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
|
|
msg->status = HSI_STATUS_COMPLETED;
|
|
msg->complete(msg);
|
|
} else {
|
|
if (omap_port->ssr.mode != SSI_MODE_FRAME) {
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
spin_lock_bh(&omap_port->lock);
|
|
tmp = readl(omap_ssi->sys +
|
|
SSI_MPU_ENABLE_REG(port->num, 0));
|
|
writel(tmp | SSI_BREAKDETECTED,
|
|
omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
msg->status = HSI_STATUS_PROCEEDING;
|
|
list_add_tail(&msg->link, &omap_port->brkqueue);
|
|
spin_unlock_bh(&omap_port->lock);
|
|
}
|
|
out:
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int ssi_async(struct hsi_msg *msg)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(msg->cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct list_head *queue;
|
|
int err = 0;
|
|
|
|
BUG_ON(!msg);
|
|
|
|
if (msg->sgt.nents > 1)
|
|
return -ENOSYS; /* TODO: Add sg support */
|
|
|
|
if (msg->break_frame)
|
|
return ssi_async_break(msg);
|
|
|
|
if (msg->ttype) {
|
|
BUG_ON(msg->channel >= omap_port->sst.channels);
|
|
queue = &omap_port->txqueue[msg->channel];
|
|
} else {
|
|
BUG_ON(msg->channel >= omap_port->ssr.channels);
|
|
queue = &omap_port->rxqueue[msg->channel];
|
|
}
|
|
msg->status = HSI_STATUS_QUEUED;
|
|
spin_lock_bh(&omap_port->lock);
|
|
list_add_tail(&msg->link, queue);
|
|
err = ssi_start_transfer(queue);
|
|
if (err < 0) {
|
|
list_del(&msg->link);
|
|
msg->status = HSI_STATUS_ERROR;
|
|
}
|
|
spin_unlock_bh(&omap_port->lock);
|
|
dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
|
|
msg->status, msg->ttype, msg->channel);
|
|
|
|
return err;
|
|
}
|
|
|
|
static u32 ssi_calculate_div(struct hsi_controller *ssi)
|
|
{
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
u32 tx_fckrate = (u32) omap_ssi->fck_rate;
|
|
|
|
/* / 2 : SSI TX clock is always half of the SSI functional clock */
|
|
tx_fckrate >>= 1;
|
|
/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
|
|
tx_fckrate--;
|
|
dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
|
|
tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
|
|
omap_ssi->max_speed);
|
|
|
|
return tx_fckrate / omap_ssi->max_speed;
|
|
}
|
|
|
|
static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
|
|
{
|
|
struct list_head *node, *tmp;
|
|
struct hsi_msg *msg;
|
|
|
|
list_for_each_safe(node, tmp, queue) {
|
|
msg = list_entry(node, struct hsi_msg, link);
|
|
if ((cl) && (cl != msg->cl))
|
|
continue;
|
|
list_del(node);
|
|
pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
|
|
msg->channel, msg, msg->sgt.sgl->length,
|
|
msg->ttype, msg->context);
|
|
if (msg->destructor)
|
|
msg->destructor(msg);
|
|
else
|
|
hsi_free_msg(msg);
|
|
}
|
|
}
|
|
|
|
static int ssi_setup(struct hsi_client *cl)
|
|
{
|
|
struct hsi_port *port = to_hsi_port(cl->device.parent);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
void __iomem *sst = omap_port->sst_base;
|
|
void __iomem *ssr = omap_port->ssr_base;
|
|
u32 div;
|
|
u32 val;
|
|
int err = 0;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
spin_lock_bh(&omap_port->lock);
|
|
if (cl->tx_cfg.speed)
|
|
omap_ssi->max_speed = cl->tx_cfg.speed;
|
|
div = ssi_calculate_div(ssi);
|
|
if (div > SSI_MAX_DIVISOR) {
|
|
dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
|
|
cl->tx_cfg.speed, div);
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
/* Set TX/RX module to sleep to stop TX/RX during cfg update */
|
|
writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
|
|
writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
|
|
/* Flush posted write */
|
|
val = readl(ssr + SSI_SSR_MODE_REG);
|
|
/* TX */
|
|
writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
|
|
writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
|
|
writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
|
|
writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
|
|
writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
|
|
/* RX */
|
|
writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
|
|
writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
|
|
writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
|
|
/* Cleanup the break queue if we leave FRAME mode */
|
|
if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
|
|
(cl->rx_cfg.mode != SSI_MODE_FRAME))
|
|
ssi_flush_queue(&omap_port->brkqueue, cl);
|
|
writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
|
|
omap_port->channels = max(cl->rx_cfg.num_hw_channels,
|
|
cl->tx_cfg.num_hw_channels);
|
|
/* Shadow registering for OFF mode */
|
|
/* SST */
|
|
omap_port->sst.divisor = div;
|
|
omap_port->sst.frame_size = 31;
|
|
omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
|
|
omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
|
|
omap_port->sst.mode = cl->tx_cfg.mode;
|
|
/* SSR */
|
|
omap_port->ssr.frame_size = 31;
|
|
omap_port->ssr.timeout = 0;
|
|
omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
|
|
omap_port->ssr.mode = cl->rx_cfg.mode;
|
|
out:
|
|
spin_unlock_bh(&omap_port->lock);
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
return err;
|
|
}
|
|
|
|
static int ssi_flush(struct hsi_client *cl)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
struct hsi_msg *msg;
|
|
void __iomem *sst = omap_port->sst_base;
|
|
void __iomem *ssr = omap_port->ssr_base;
|
|
unsigned int i;
|
|
u32 err;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
spin_lock_bh(&omap_port->lock);
|
|
/* Stop all DMA transfers */
|
|
for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
|
|
msg = omap_ssi->gdd_trn[i].msg;
|
|
if (!msg || (port != hsi_get_port(msg->cl)))
|
|
continue;
|
|
writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
|
|
if (msg->ttype == HSI_MSG_READ)
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
omap_ssi->gdd_trn[i].msg = NULL;
|
|
}
|
|
/* Flush all SST buffers */
|
|
writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
|
|
writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
|
|
/* Flush all SSR buffers */
|
|
writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
|
|
writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
|
|
/* Flush all errors */
|
|
err = readl(ssr + SSI_SSR_ERROR_REG);
|
|
writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
|
|
/* Flush break */
|
|
writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
|
|
/* Clear interrupts */
|
|
writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
writel_relaxed(0xffffff00,
|
|
omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
|
|
writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
|
|
/* Dequeue all pending requests */
|
|
for (i = 0; i < omap_port->channels; i++) {
|
|
/* Release write clocks */
|
|
if (!list_empty(&omap_port->txqueue[i]))
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
ssi_flush_queue(&omap_port->txqueue[i], NULL);
|
|
ssi_flush_queue(&omap_port->rxqueue[i], NULL);
|
|
}
|
|
ssi_flush_queue(&omap_port->brkqueue, NULL);
|
|
spin_unlock_bh(&omap_port->lock);
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_start_tx(struct hsi_client *cl)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
|
|
|
|
spin_lock_bh(&omap_port->wk_lock);
|
|
if (omap_port->wk_refcount++) {
|
|
spin_unlock_bh(&omap_port->wk_lock);
|
|
return 0;
|
|
}
|
|
pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
|
|
writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
|
|
spin_unlock_bh(&omap_port->wk_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_stop_tx(struct hsi_client *cl)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
|
|
|
|
spin_lock_bh(&omap_port->wk_lock);
|
|
BUG_ON(!omap_port->wk_refcount);
|
|
if (--omap_port->wk_refcount) {
|
|
spin_unlock_bh(&omap_port->wk_lock);
|
|
return 0;
|
|
}
|
|
writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
|
|
pm_runtime_put_sync(omap_port->pdev); /* Release clocks */
|
|
spin_unlock_bh(&omap_port->wk_lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void ssi_transfer(struct omap_ssi_port *omap_port,
|
|
struct list_head *queue)
|
|
{
|
|
struct hsi_msg *msg;
|
|
int err = -1;
|
|
|
|
spin_lock_bh(&omap_port->lock);
|
|
while (err < 0) {
|
|
err = ssi_start_transfer(queue);
|
|
if (err < 0) {
|
|
msg = list_first_entry(queue, struct hsi_msg, link);
|
|
msg->status = HSI_STATUS_ERROR;
|
|
msg->actual_len = 0;
|
|
list_del(&msg->link);
|
|
spin_unlock_bh(&omap_port->lock);
|
|
msg->complete(msg);
|
|
spin_lock_bh(&omap_port->lock);
|
|
}
|
|
}
|
|
spin_unlock_bh(&omap_port->lock);
|
|
}
|
|
|
|
static void ssi_cleanup_queues(struct hsi_client *cl)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
struct hsi_msg *msg;
|
|
unsigned int i;
|
|
u32 rxbufstate = 0;
|
|
u32 txbufstate = 0;
|
|
u32 status = SSI_ERROROCCURED;
|
|
u32 tmp;
|
|
|
|
ssi_flush_queue(&omap_port->brkqueue, cl);
|
|
if (list_empty(&omap_port->brkqueue))
|
|
status |= SSI_BREAKDETECTED;
|
|
|
|
for (i = 0; i < omap_port->channels; i++) {
|
|
if (list_empty(&omap_port->txqueue[i]))
|
|
continue;
|
|
msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
|
|
link);
|
|
if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
|
|
txbufstate |= (1 << i);
|
|
status |= SSI_DATAACCEPT(i);
|
|
/* Release the clocks writes, also GDD ones */
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
}
|
|
ssi_flush_queue(&omap_port->txqueue[i], cl);
|
|
}
|
|
for (i = 0; i < omap_port->channels; i++) {
|
|
if (list_empty(&omap_port->rxqueue[i]))
|
|
continue;
|
|
msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
|
|
link);
|
|
if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
|
|
rxbufstate |= (1 << i);
|
|
status |= SSI_DATAAVAILABLE(i);
|
|
}
|
|
ssi_flush_queue(&omap_port->rxqueue[i], cl);
|
|
/* Check if we keep the error detection interrupt armed */
|
|
if (!list_empty(&omap_port->rxqueue[i]))
|
|
status &= ~SSI_ERROROCCURED;
|
|
}
|
|
/* Cleanup write buffers */
|
|
tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
|
|
tmp &= ~txbufstate;
|
|
writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
|
|
/* Cleanup read buffers */
|
|
tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
|
|
tmp &= ~rxbufstate;
|
|
writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
|
|
/* Disarm and ack pending interrupts */
|
|
tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
tmp &= ~status;
|
|
writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
writel_relaxed(status, omap_ssi->sys +
|
|
SSI_MPU_STATUS_REG(port->num, 0));
|
|
}
|
|
|
|
static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
|
|
{
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
struct hsi_port *port = hsi_get_port(cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_msg *msg;
|
|
unsigned int i;
|
|
u32 val = 0;
|
|
u32 tmp;
|
|
|
|
for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
|
|
msg = omap_ssi->gdd_trn[i].msg;
|
|
if ((!msg) || (msg->cl != cl))
|
|
continue;
|
|
writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
|
|
val |= (1 << i);
|
|
/*
|
|
* Clock references for write will be handled in
|
|
* ssi_cleanup_queues
|
|
*/
|
|
if (msg->ttype == HSI_MSG_READ)
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
omap_ssi->gdd_trn[i].msg = NULL;
|
|
}
|
|
tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
tmp &= ~val;
|
|
writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
|
|
}
|
|
|
|
static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
|
|
{
|
|
writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
|
|
writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
|
|
/* OCP barrier */
|
|
mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_release(struct hsi_client *cl)
|
|
{
|
|
struct hsi_port *port = hsi_get_port(cl);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
|
|
spin_lock_bh(&omap_port->lock);
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
/* Stop all the pending DMA requests for that client */
|
|
ssi_cleanup_gdd(ssi, cl);
|
|
/* Now cleanup all the queues */
|
|
ssi_cleanup_queues(cl);
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
/* If it is the last client of the port, do extra checks and cleanup */
|
|
if (port->claimed <= 1) {
|
|
/*
|
|
* Drop the clock reference for the incoming wake line
|
|
* if it is still kept high by the other side.
|
|
*/
|
|
if (omap_port->wkin_cken) {
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
omap_port->wkin_cken = 0;
|
|
}
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
/* Stop any SSI TX/RX without a client */
|
|
ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
|
|
omap_port->sst.mode = SSI_MODE_SLEEP;
|
|
omap_port->ssr.mode = SSI_MODE_SLEEP;
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
WARN_ON(omap_port->wk_refcount != 0);
|
|
}
|
|
spin_unlock_bh(&omap_port->lock);
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
|
|
static void ssi_error(struct hsi_port *port)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
struct hsi_msg *msg;
|
|
unsigned int i;
|
|
u32 err;
|
|
u32 val;
|
|
u32 tmp;
|
|
|
|
/* ACK error */
|
|
err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
|
|
dev_err(&port->device, "SSI error: 0x%02x\n", err);
|
|
if (!err) {
|
|
dev_dbg(&port->device, "spurious SSI error ignored!\n");
|
|
return;
|
|
}
|
|
spin_lock(&omap_ssi->lock);
|
|
/* Cancel all GDD read transfers */
|
|
for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
|
|
msg = omap_ssi->gdd_trn[i].msg;
|
|
if ((msg) && (msg->ttype == HSI_MSG_READ)) {
|
|
writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
|
|
val |= (1 << i);
|
|
omap_ssi->gdd_trn[i].msg = NULL;
|
|
}
|
|
}
|
|
tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
tmp &= ~val;
|
|
writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
|
|
spin_unlock(&omap_ssi->lock);
|
|
/* Cancel all PIO read transfers */
|
|
spin_lock(&omap_port->lock);
|
|
tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
|
|
writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
/* ACK error */
|
|
writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
|
|
writel_relaxed(SSI_ERROROCCURED,
|
|
omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
|
|
/* Signal the error all current pending read requests */
|
|
for (i = 0; i < omap_port->channels; i++) {
|
|
if (list_empty(&omap_port->rxqueue[i]))
|
|
continue;
|
|
msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
|
|
link);
|
|
list_del(&msg->link);
|
|
msg->status = HSI_STATUS_ERROR;
|
|
spin_unlock(&omap_port->lock);
|
|
msg->complete(msg);
|
|
/* Now restart queued reads if any */
|
|
ssi_transfer(omap_port, &omap_port->rxqueue[i]);
|
|
spin_lock(&omap_port->lock);
|
|
}
|
|
spin_unlock(&omap_port->lock);
|
|
}
|
|
|
|
static void ssi_break_complete(struct hsi_port *port)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
struct hsi_msg *msg;
|
|
struct hsi_msg *tmp;
|
|
u32 val;
|
|
|
|
dev_dbg(&port->device, "HWBREAK received\n");
|
|
|
|
spin_lock(&omap_port->lock);
|
|
val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
val &= ~SSI_BREAKDETECTED;
|
|
writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
|
|
writel(SSI_BREAKDETECTED,
|
|
omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
|
|
spin_unlock(&omap_port->lock);
|
|
|
|
list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
|
|
msg->status = HSI_STATUS_COMPLETED;
|
|
spin_lock(&omap_port->lock);
|
|
list_del(&msg->link);
|
|
spin_unlock(&omap_port->lock);
|
|
msg->complete(msg);
|
|
}
|
|
|
|
}
|
|
|
|
static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
|
|
{
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_msg *msg;
|
|
u32 *buf;
|
|
u32 reg;
|
|
u32 val;
|
|
|
|
spin_lock(&omap_port->lock);
|
|
msg = list_first_entry(queue, struct hsi_msg, link);
|
|
if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
|
|
msg->actual_len = 0;
|
|
msg->status = HSI_STATUS_PENDING;
|
|
}
|
|
if (msg->ttype == HSI_MSG_WRITE)
|
|
val = SSI_DATAACCEPT(msg->channel);
|
|
else
|
|
val = SSI_DATAAVAILABLE(msg->channel);
|
|
if (msg->status == HSI_STATUS_PROCEEDING) {
|
|
buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
|
|
if (msg->ttype == HSI_MSG_WRITE)
|
|
writel(*buf, omap_port->sst_base +
|
|
SSI_SST_BUFFER_CH_REG(msg->channel));
|
|
else
|
|
*buf = readl(omap_port->ssr_base +
|
|
SSI_SSR_BUFFER_CH_REG(msg->channel));
|
|
dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
|
|
msg->ttype, *buf);
|
|
msg->actual_len += sizeof(*buf);
|
|
if (msg->actual_len >= msg->sgt.sgl->length)
|
|
msg->status = HSI_STATUS_COMPLETED;
|
|
/*
|
|
* Wait for the last written frame to be really sent before
|
|
* we call the complete callback
|
|
*/
|
|
if ((msg->status == HSI_STATUS_PROCEEDING) ||
|
|
((msg->status == HSI_STATUS_COMPLETED) &&
|
|
(msg->ttype == HSI_MSG_WRITE))) {
|
|
writel(val, omap_ssi->sys +
|
|
SSI_MPU_STATUS_REG(port->num, 0));
|
|
spin_unlock(&omap_port->lock);
|
|
|
|
return;
|
|
}
|
|
|
|
}
|
|
/* Transfer completed at this point */
|
|
reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
if (msg->ttype == HSI_MSG_WRITE) {
|
|
/* Release clocks for write transfer */
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
}
|
|
reg &= ~val;
|
|
writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
|
|
list_del(&msg->link);
|
|
spin_unlock(&omap_port->lock);
|
|
msg->complete(msg);
|
|
ssi_transfer(omap_port, queue);
|
|
}
|
|
|
|
static void ssi_pio_tasklet(unsigned long ssi_port)
|
|
{
|
|
struct hsi_port *port = (struct hsi_port *)ssi_port;
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
void __iomem *sys = omap_ssi->sys;
|
|
unsigned int ch;
|
|
u32 status_reg;
|
|
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
|
|
status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
|
|
for (ch = 0; ch < omap_port->channels; ch++) {
|
|
if (status_reg & SSI_DATAACCEPT(ch))
|
|
ssi_pio_complete(port, &omap_port->txqueue[ch]);
|
|
if (status_reg & SSI_DATAAVAILABLE(ch))
|
|
ssi_pio_complete(port, &omap_port->rxqueue[ch]);
|
|
}
|
|
if (status_reg & SSI_BREAKDETECTED)
|
|
ssi_break_complete(port);
|
|
if (status_reg & SSI_ERROROCCURED)
|
|
ssi_error(port);
|
|
|
|
status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
|
|
status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
|
|
if (status_reg)
|
|
tasklet_hi_schedule(&omap_port->pio_tasklet);
|
|
else
|
|
enable_irq(omap_port->irq);
|
|
}
|
|
|
|
static irqreturn_t ssi_pio_isr(int irq, void *port)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
|
|
tasklet_hi_schedule(&omap_port->pio_tasklet);
|
|
disable_irq_nosync(irq);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static void ssi_wake_tasklet(unsigned long ssi_port)
|
|
{
|
|
struct hsi_port *port = (struct hsi_port *)ssi_port;
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
if (ssi_wakein(port)) {
|
|
/**
|
|
* We can have a quick High-Low-High transition in the line.
|
|
* In such a case if we have long interrupt latencies,
|
|
* we can miss the low event or get twice a high event.
|
|
* This workaround will avoid breaking the clock reference
|
|
* count when such a situation ocurrs.
|
|
*/
|
|
spin_lock(&omap_port->lock);
|
|
if (!omap_port->wkin_cken) {
|
|
omap_port->wkin_cken = 1;
|
|
pm_runtime_get_sync(omap_port->pdev);
|
|
}
|
|
spin_unlock(&omap_port->lock);
|
|
dev_dbg(&ssi->device, "Wake in high\n");
|
|
if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
|
|
writel(SSI_WAKE(0),
|
|
omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
|
|
}
|
|
hsi_event(port, HSI_EVENT_START_RX);
|
|
} else {
|
|
dev_dbg(&ssi->device, "Wake in low\n");
|
|
if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
|
|
writel(SSI_WAKE(0),
|
|
omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
|
|
}
|
|
hsi_event(port, HSI_EVENT_STOP_RX);
|
|
spin_lock(&omap_port->lock);
|
|
if (omap_port->wkin_cken) {
|
|
pm_runtime_put_sync(omap_port->pdev);
|
|
omap_port->wkin_cken = 0;
|
|
}
|
|
spin_unlock(&omap_port->lock);
|
|
}
|
|
}
|
|
|
|
static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
|
|
|
|
tasklet_hi_schedule(&omap_port->wake_tasklet);
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
static int __init ssi_port_irq(struct hsi_port *port,
|
|
struct platform_device *pd)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
int err;
|
|
|
|
err = platform_get_irq(pd, 0);
|
|
if (err < 0) {
|
|
dev_err(&port->device, "Port IRQ resource missing\n");
|
|
return err;
|
|
}
|
|
omap_port->irq = err;
|
|
tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
|
|
(unsigned long)port);
|
|
err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
|
|
0, "mpu_irq0", port);
|
|
if (err < 0)
|
|
dev_err(&port->device, "Request IRQ %d failed (%d)\n",
|
|
omap_port->irq, err);
|
|
return err;
|
|
}
|
|
|
|
static int __init ssi_wake_irq(struct hsi_port *port,
|
|
struct platform_device *pd)
|
|
{
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
int cawake_irq;
|
|
int err;
|
|
|
|
if (omap_port->wake_gpio == -1) {
|
|
omap_port->wake_irq = -1;
|
|
return 0;
|
|
}
|
|
|
|
cawake_irq = gpio_to_irq(omap_port->wake_gpio);
|
|
|
|
omap_port->wake_irq = cawake_irq;
|
|
tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
|
|
(unsigned long)port);
|
|
err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr,
|
|
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
|
|
"cawake", port);
|
|
if (err < 0)
|
|
dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
|
|
cawake_irq, err);
|
|
err = enable_irq_wake(cawake_irq);
|
|
if (err < 0)
|
|
dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
|
|
cawake_irq, err);
|
|
|
|
return err;
|
|
}
|
|
|
|
static void __init ssi_queues_init(struct omap_ssi_port *omap_port)
|
|
{
|
|
unsigned int ch;
|
|
|
|
for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
|
|
INIT_LIST_HEAD(&omap_port->txqueue[ch]);
|
|
INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
|
|
}
|
|
INIT_LIST_HEAD(&omap_port->brkqueue);
|
|
}
|
|
|
|
static int __init ssi_port_get_iomem(struct platform_device *pd,
|
|
const char *name, void __iomem **pbase, dma_addr_t *phy)
|
|
{
|
|
struct hsi_port *port = platform_get_drvdata(pd);
|
|
struct resource *mem;
|
|
struct resource *ioarea;
|
|
void __iomem *base;
|
|
|
|
mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
|
|
if (!mem) {
|
|
dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
|
|
return -ENXIO;
|
|
}
|
|
ioarea = devm_request_mem_region(&port->device, mem->start,
|
|
resource_size(mem), dev_name(&pd->dev));
|
|
if (!ioarea) {
|
|
dev_err(&pd->dev, "%s IO memory region request failed\n",
|
|
mem->name);
|
|
return -ENXIO;
|
|
}
|
|
base = devm_ioremap(&port->device, mem->start, resource_size(mem));
|
|
if (!base) {
|
|
dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
|
|
return -ENXIO;
|
|
}
|
|
*pbase = base;
|
|
|
|
if (phy)
|
|
*phy = mem->start;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init ssi_port_probe(struct platform_device *pd)
|
|
{
|
|
struct device_node *np = pd->dev.of_node;
|
|
struct hsi_port *port;
|
|
struct omap_ssi_port *omap_port;
|
|
struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
u32 cawake_gpio = 0;
|
|
u32 port_id;
|
|
int err;
|
|
|
|
dev_dbg(&pd->dev, "init ssi port...\n");
|
|
|
|
if (!try_module_get(ssi->owner)) {
|
|
dev_err(&pd->dev, "could not increment parent module refcount (err=%d)\n",
|
|
err);
|
|
return -ENODEV;
|
|
}
|
|
|
|
if (!ssi->port || !omap_ssi->port) {
|
|
dev_err(&pd->dev, "ssi controller not initialized!\n");
|
|
err = -ENODEV;
|
|
goto error;
|
|
}
|
|
|
|
/* get id of first uninitialized port in controller */
|
|
for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
|
|
port_id++)
|
|
;
|
|
|
|
if (port_id >= ssi->num_ports) {
|
|
dev_err(&pd->dev, "port id out of range!\n");
|
|
err = -ENODEV;
|
|
goto error;
|
|
}
|
|
|
|
port = ssi->port[port_id];
|
|
|
|
if (!np) {
|
|
dev_err(&pd->dev, "missing device tree data\n");
|
|
err = -EINVAL;
|
|
goto error;
|
|
}
|
|
|
|
cawake_gpio = of_get_named_gpio(np, "ti,ssi-cawake-gpio", 0);
|
|
if (cawake_gpio < 0) {
|
|
dev_err(&pd->dev, "DT data is missing cawake gpio (err=%d)\n",
|
|
cawake_gpio);
|
|
err = -ENODEV;
|
|
goto error;
|
|
}
|
|
|
|
err = devm_gpio_request_one(&port->device, cawake_gpio, GPIOF_DIR_IN,
|
|
"cawake");
|
|
if (err) {
|
|
dev_err(&pd->dev, "could not request cawake gpio (err=%d)!\n",
|
|
err);
|
|
err = -ENXIO;
|
|
goto error;
|
|
}
|
|
|
|
omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
|
|
if (!omap_port) {
|
|
err = -ENOMEM;
|
|
goto error;
|
|
}
|
|
omap_port->wake_gpio = cawake_gpio;
|
|
omap_port->pdev = &pd->dev;
|
|
omap_port->port_id = port_id;
|
|
|
|
/* initialize HSI port */
|
|
port->async = ssi_async;
|
|
port->setup = ssi_setup;
|
|
port->flush = ssi_flush;
|
|
port->start_tx = ssi_start_tx;
|
|
port->stop_tx = ssi_stop_tx;
|
|
port->release = ssi_release;
|
|
hsi_port_set_drvdata(port, omap_port);
|
|
omap_ssi->port[port_id] = omap_port;
|
|
|
|
platform_set_drvdata(pd, port);
|
|
|
|
err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
|
|
&omap_port->sst_dma);
|
|
if (err < 0)
|
|
goto error;
|
|
err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
|
|
&omap_port->ssr_dma);
|
|
if (err < 0)
|
|
goto error;
|
|
|
|
err = ssi_port_irq(port, pd);
|
|
if (err < 0)
|
|
goto error;
|
|
err = ssi_wake_irq(port, pd);
|
|
if (err < 0)
|
|
goto error;
|
|
|
|
ssi_queues_init(omap_port);
|
|
spin_lock_init(&omap_port->lock);
|
|
spin_lock_init(&omap_port->wk_lock);
|
|
omap_port->dev = &port->device;
|
|
|
|
pm_runtime_irq_safe(omap_port->pdev);
|
|
pm_runtime_enable(omap_port->pdev);
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
err = ssi_debug_add_port(omap_port, omap_ssi->dir);
|
|
if (err < 0) {
|
|
pm_runtime_disable(omap_port->pdev);
|
|
goto error;
|
|
}
|
|
#endif
|
|
|
|
hsi_add_clients_from_dt(port, np);
|
|
|
|
dev_info(&pd->dev, "ssi port %u successfully initialized (cawake=%d)\n",
|
|
port_id, cawake_gpio);
|
|
|
|
return 0;
|
|
|
|
error:
|
|
return err;
|
|
}
|
|
|
|
static int __exit ssi_port_remove(struct platform_device *pd)
|
|
{
|
|
struct hsi_port *port = platform_get_drvdata(pd);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
ssi_debug_remove_port(port);
|
|
#endif
|
|
|
|
hsi_port_unregister_clients(port);
|
|
|
|
tasklet_kill(&omap_port->wake_tasklet);
|
|
tasklet_kill(&omap_port->pio_tasklet);
|
|
|
|
port->async = hsi_dummy_msg;
|
|
port->setup = hsi_dummy_cl;
|
|
port->flush = hsi_dummy_cl;
|
|
port->start_tx = hsi_dummy_cl;
|
|
port->stop_tx = hsi_dummy_cl;
|
|
port->release = hsi_dummy_cl;
|
|
|
|
omap_ssi->port[omap_port->port_id] = NULL;
|
|
platform_set_drvdata(pd, NULL);
|
|
module_put(ssi->owner);
|
|
pm_runtime_disable(&pd->dev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_PM
|
|
static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
|
|
{
|
|
struct hsi_port *port = to_hsi_port(omap_port->dev);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
omap_port->sys_mpu_enable = readl(omap_ssi->sys +
|
|
SSI_MPU_ENABLE_REG(port->num, 0));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
|
|
{
|
|
struct hsi_port *port = to_hsi_port(omap_port->dev);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
void __iomem *base;
|
|
|
|
writel_relaxed(omap_port->sys_mpu_enable,
|
|
omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
|
|
|
|
/* SST context */
|
|
base = omap_port->sst_base;
|
|
writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
|
|
writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
|
|
writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
|
|
|
|
/* SSR context */
|
|
base = omap_port->ssr_base;
|
|
writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
|
|
writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
|
|
writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
|
|
{
|
|
u32 mode;
|
|
|
|
writel_relaxed(omap_port->sst.mode,
|
|
omap_port->sst_base + SSI_SST_MODE_REG);
|
|
writel_relaxed(omap_port->ssr.mode,
|
|
omap_port->ssr_base + SSI_SSR_MODE_REG);
|
|
/* OCP barrier */
|
|
mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
|
|
{
|
|
writel_relaxed(omap_port->sst.divisor,
|
|
omap_port->sst_base + SSI_SST_DIVISOR_REG);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int omap_ssi_port_runtime_suspend(struct device *dev)
|
|
{
|
|
struct hsi_port *port = dev_get_drvdata(dev);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
dev_dbg(dev, "port runtime suspend!\n");
|
|
|
|
ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
|
|
if (omap_ssi->get_loss)
|
|
omap_port->loss_count =
|
|
omap_ssi->get_loss(ssi->device.parent);
|
|
ssi_save_port_ctx(omap_port);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int omap_ssi_port_runtime_resume(struct device *dev)
|
|
{
|
|
struct hsi_port *port = dev_get_drvdata(dev);
|
|
struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
|
|
struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
|
|
struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
|
|
|
|
dev_dbg(dev, "port runtime resume!\n");
|
|
|
|
if ((omap_ssi->get_loss) && (omap_port->loss_count ==
|
|
omap_ssi->get_loss(ssi->device.parent)))
|
|
goto mode; /* We always need to restore the mode & TX divisor */
|
|
|
|
ssi_restore_port_ctx(omap_port);
|
|
|
|
mode:
|
|
ssi_restore_divisor(omap_port);
|
|
ssi_restore_port_mode(omap_port);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct dev_pm_ops omap_ssi_port_pm_ops = {
|
|
SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
|
|
omap_ssi_port_runtime_resume, NULL)
|
|
};
|
|
|
|
#define DEV_PM_OPS (&omap_ssi_port_pm_ops)
|
|
#else
|
|
#define DEV_PM_OPS NULL
|
|
#endif
|
|
|
|
|
|
#ifdef CONFIG_OF
|
|
static const struct of_device_id omap_ssi_port_of_match[] = {
|
|
{ .compatible = "ti,omap3-ssi-port", },
|
|
{},
|
|
};
|
|
MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
|
|
#else
|
|
#define omap_ssi_port_of_match NULL
|
|
#endif
|
|
|
|
static struct platform_driver ssi_port_pdriver = {
|
|
.remove = __exit_p(ssi_port_remove),
|
|
.driver = {
|
|
.name = "omap_ssi_port",
|
|
.of_match_table = omap_ssi_port_of_match,
|
|
.pm = DEV_PM_OPS,
|
|
},
|
|
};
|
|
|
|
module_platform_driver_probe(ssi_port_pdriver, ssi_port_probe);
|
|
|
|
MODULE_ALIAS("platform:omap_ssi_port");
|
|
MODULE_AUTHOR("Carlos Chinea <carlos.chinea@nokia.com>");
|
|
MODULE_AUTHOR("Sebastian Reichel <sre@kernel.org>");
|
|
MODULE_DESCRIPTION("Synchronous Serial Interface Port Driver");
|
|
MODULE_LICENSE("GPL v2");
|