2017-12-14 08:49:41 +03:00
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
// Copyright(c) 2015-17 Intel Corporation.
/*
* Cadence SoundWire Master module
* Used by Master driver
*/
# include <linux/delay.h>
# include <linux/device.h>
2019-08-21 21:58:19 +03:00
# include <linux/debugfs.h>
2017-12-14 08:49:41 +03:00
# include <linux/interrupt.h>
2019-04-04 11:12:21 +03:00
# include <linux/io.h>
2017-12-14 08:49:41 +03:00
# include <linux/module.h>
# include <linux/mod_devicetable.h>
2020-09-08 16:45:21 +03:00
# include <linux/pm_runtime.h>
2017-12-14 08:49:41 +03:00
# include <linux/soundwire/sdw_registers.h>
# include <linux/soundwire/sdw.h>
2018-04-26 16:08:53 +03:00
# include <sound/pcm_params.h>
# include <sound/soc.h>
2020-07-16 18:09:45 +03:00
# include <linux/workqueue.h>
2017-12-14 08:49:41 +03:00
# include "bus.h"
# include "cadence_master.h"
2019-08-06 03:55:21 +03:00
static int interrupt_mask ;
module_param_named ( cnds_mcp_int_mask , interrupt_mask , int , 0444 ) ;
MODULE_PARM_DESC ( cdns_mcp_int_mask , " Cadence MCP IntMask " ) ;
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_CONFIG 0x0
# define CDNS_MCP_CONFIG_MCMD_RETRY GENMASK(27, 24)
# define CDNS_MCP_CONFIG_MPREQ_DELAY GENMASK(20, 16)
# define CDNS_MCP_CONFIG_MMASTER BIT(7)
# define CDNS_MCP_CONFIG_BUS_REL BIT(6)
# define CDNS_MCP_CONFIG_SNIFFER BIT(5)
# define CDNS_MCP_CONFIG_SSPMOD BIT(4)
# define CDNS_MCP_CONFIG_CMD BIT(3)
# define CDNS_MCP_CONFIG_OP GENMASK(2, 0)
# define CDNS_MCP_CONFIG_OP_NORMAL 0
# define CDNS_MCP_CONTROL 0x4
# define CDNS_MCP_CONTROL_RST_DELAY GENMASK(10, 8)
# define CDNS_MCP_CONTROL_CMD_RST BIT(7)
# define CDNS_MCP_CONTROL_SOFT_RST BIT(6)
# define CDNS_MCP_CONTROL_SW_RST BIT(5)
# define CDNS_MCP_CONTROL_HW_RST BIT(4)
# define CDNS_MCP_CONTROL_CLK_PAUSE BIT(3)
# define CDNS_MCP_CONTROL_CLK_STOP_CLR BIT(2)
# define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1)
# define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
# define CDNS_MCP_CMDCTRL 0x8
2020-09-08 16:45:21 +03:00
# define CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR BIT(2)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_SSPSTAT 0xC
# define CDNS_MCP_FRAME_SHAPE 0x10
# define CDNS_MCP_FRAME_SHAPE_INIT 0x14
2019-08-06 03:55:11 +03:00
# define CDNS_MCP_FRAME_SHAPE_COL_MASK GENMASK(2, 0)
2020-09-03 14:45:01 +03:00
# define CDNS_MCP_FRAME_SHAPE_ROW_MASK GENMASK(7, 3)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_CONFIG_UPDATE 0x18
# define CDNS_MCP_CONFIG_UPDATE_BIT BIT(0)
# define CDNS_MCP_PHYCTRL 0x1C
# define CDNS_MCP_SSP_CTRL0 0x20
# define CDNS_MCP_SSP_CTRL1 0x28
# define CDNS_MCP_CLK_CTRL0 0x30
# define CDNS_MCP_CLK_CTRL1 0x38
2019-08-06 03:55:18 +03:00
# define CDNS_MCP_CLK_MCLKD_MASK GENMASK(7, 0)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_STAT 0x40
# define CDNS_MCP_STAT_ACTIVE_BANK BIT(20)
# define CDNS_MCP_STAT_CLK_STOP BIT(16)
# define CDNS_MCP_INTSTAT 0x44
# define CDNS_MCP_INTMASK 0x48
# define CDNS_MCP_INT_IRQ BIT(31)
2020-01-11 00:57:26 +03:00
# define CDNS_MCP_INT_RESERVED1 GENMASK(30, 17)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_INT_WAKEUP BIT(16)
# define CDNS_MCP_INT_SLAVE_RSVD BIT(15)
# define CDNS_MCP_INT_SLAVE_ALERT BIT(14)
# define CDNS_MCP_INT_SLAVE_ATTACH BIT(13)
# define CDNS_MCP_INT_SLAVE_NATTACH BIT(12)
# define CDNS_MCP_INT_SLAVE_MASK GENMASK(15, 12)
# define CDNS_MCP_INT_DPINT BIT(11)
# define CDNS_MCP_INT_CTRL_CLASH BIT(10)
# define CDNS_MCP_INT_DATA_CLASH BIT(9)
2019-08-06 03:55:08 +03:00
# define CDNS_MCP_INT_PARITY BIT(8)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_INT_CMD_ERR BIT(7)
2020-01-11 00:57:26 +03:00
# define CDNS_MCP_INT_RESERVED2 GENMASK(6, 4)
2019-08-06 03:55:08 +03:00
# define CDNS_MCP_INT_RX_NE BIT(3)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_INT_RX_WL BIT(2)
# define CDNS_MCP_INT_TXE BIT(1)
2019-08-06 03:55:08 +03:00
# define CDNS_MCP_INT_TXF BIT(0)
2020-01-11 00:57:26 +03:00
# define CDNS_MCP_INT_RESERVED (CDNS_MCP_INT_RESERVED1 | CDNS_MCP_INT_RESERVED2)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_INTSET 0x4C
2019-07-26 02:40:05 +03:00
# define CDNS_MCP_SLAVE_STAT 0x50
# define CDNS_MCP_SLAVE_STAT_MASK GENMASK(1, 0)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_SLAVE_INTSTAT0 0x54
# define CDNS_MCP_SLAVE_INTSTAT1 0x58
# define CDNS_MCP_SLAVE_INTSTAT_NPRESENT BIT(0)
# define CDNS_MCP_SLAVE_INTSTAT_ATTACHED BIT(1)
# define CDNS_MCP_SLAVE_INTSTAT_ALERT BIT(2)
# define CDNS_MCP_SLAVE_INTSTAT_RESERVED BIT(3)
# define CDNS_MCP_SLAVE_STATUS_BITS GENMASK(3, 0)
# define CDNS_MCP_SLAVE_STATUS_NUM 4
# define CDNS_MCP_SLAVE_INTMASK0 0x5C
# define CDNS_MCP_SLAVE_INTMASK1 0x60
2019-07-26 02:40:06 +03:00
# define CDNS_MCP_SLAVE_INTMASK0_MASK GENMASK(31, 0)
# define CDNS_MCP_SLAVE_INTMASK1_MASK GENMASK(15, 0)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_PORT_INTSTAT 0x64
# define CDNS_MCP_PDI_STAT 0x6C
# define CDNS_MCP_FIFOLEVEL 0x78
# define CDNS_MCP_FIFOSTAT 0x7C
# define CDNS_MCP_RX_FIFO_AVAIL GENMASK(5, 0)
# define CDNS_MCP_CMD_BASE 0x80
# define CDNS_MCP_RESP_BASE 0x80
# define CDNS_MCP_CMD_LEN 0x20
# define CDNS_MCP_CMD_WORD_LEN 0x4
# define CDNS_MCP_CMD_SSP_TAG BIT(31)
# define CDNS_MCP_CMD_COMMAND GENMASK(30, 28)
# define CDNS_MCP_CMD_DEV_ADDR GENMASK(27, 24)
2020-09-03 14:45:01 +03:00
# define CDNS_MCP_CMD_REG_ADDR GENMASK(23, 8)
2017-12-14 08:49:41 +03:00
# define CDNS_MCP_CMD_REG_DATA GENMASK(7, 0)
# define CDNS_MCP_CMD_READ 2
# define CDNS_MCP_CMD_WRITE 3
# define CDNS_MCP_RESP_RDATA GENMASK(15, 8)
# define CDNS_MCP_RESP_ACK BIT(0)
# define CDNS_MCP_RESP_NACK BIT(1)
# define CDNS_DP_SIZE 128
# define CDNS_DPN_B0_CONFIG(n) (0x100 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B0_CH_EN(n) (0x104 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B0_SAMPLE_CTRL(n) (0x108 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B0_OFFSET_CTRL(n) (0x10C + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B0_HCTRL(n) (0x110 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B0_ASYNC_CTRL(n) (0x114 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B1_CONFIG(n) (0x118 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B1_CH_EN(n) (0x11C + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B1_SAMPLE_CTRL(n) (0x120 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B1_OFFSET_CTRL(n) (0x124 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B1_HCTRL(n) (0x128 + CDNS_DP_SIZE * (n))
# define CDNS_DPN_B1_ASYNC_CTRL(n) (0x12C + CDNS_DP_SIZE * (n))
# define CDNS_DPN_CONFIG_BPM BIT(18)
# define CDNS_DPN_CONFIG_BGC GENMASK(17, 16)
# define CDNS_DPN_CONFIG_WL GENMASK(12, 8)
# define CDNS_DPN_CONFIG_PORT_DAT GENMASK(3, 2)
# define CDNS_DPN_CONFIG_PORT_FLOW GENMASK(1, 0)
# define CDNS_DPN_SAMPLE_CTRL_SI GENMASK(15, 0)
# define CDNS_DPN_OFFSET_CTRL_1 GENMASK(7, 0)
# define CDNS_DPN_OFFSET_CTRL_2 GENMASK(15, 8)
# define CDNS_DPN_HCTRL_HSTOP GENMASK(3, 0)
# define CDNS_DPN_HCTRL_HSTART GENMASK(7, 4)
# define CDNS_DPN_HCTRL_LCTRL GENMASK(10, 8)
# define CDNS_PORTCTRL 0x130
2020-09-20 22:32:07 +03:00
# define CDNS_PORTCTRL_TEST_FAILED BIT(1)
2017-12-14 08:49:41 +03:00
# define CDNS_PORTCTRL_DIRN BIT(7)
# define CDNS_PORTCTRL_BANK_INVERT BIT(8)
# define CDNS_PORT_OFFSET 0x80
# define CDNS_PDI_CONFIG(n) (0x1100 + (n) * 16)
# define CDNS_PDI_CONFIG_SOFT_RESET BIT(24)
# define CDNS_PDI_CONFIG_CHANNEL GENMASK(15, 8)
# define CDNS_PDI_CONFIG_PORT GENMASK(4, 0)
/* Driver defaults */
2021-01-15 09:16:51 +03:00
# define CDNS_TX_TIMEOUT 500
2017-12-14 08:49:41 +03:00
# define CDNS_SCP_RX_FIFOLEVEL 0x2
/*
* register accessor helpers
*/
static inline u32 cdns_readl ( struct sdw_cdns * cdns , int offset )
{
return readl ( cdns - > registers + offset ) ;
}
static inline void cdns_writel ( struct sdw_cdns * cdns , int offset , u32 value )
{
writel ( value , cdns - > registers + offset ) ;
}
static inline void cdns_updatel ( struct sdw_cdns * cdns ,
int offset , u32 mask , u32 val )
{
u32 tmp ;
tmp = cdns_readl ( cdns , offset ) ;
tmp = ( tmp & ~ mask ) | val ;
cdns_writel ( cdns , offset , tmp ) ;
}
2020-03-17 19:33:17 +03:00
static int cdns_set_wait ( struct sdw_cdns * cdns , int offset , u32 mask , u32 value )
{
int timeout = 10 ;
u32 reg_read ;
/* Wait for bit to be set */
do {
reg_read = readl ( cdns - > registers + offset ) ;
if ( ( reg_read & mask ) = = value )
return 0 ;
timeout - - ;
usleep_range ( 50 , 100 ) ;
} while ( timeout ! = 0 ) ;
return - ETIMEDOUT ;
}
2020-03-17 19:33:20 +03:00
static int cdns_clear_bit ( struct sdw_cdns * cdns , int offset , u32 value )
{
writel ( value , cdns - > registers + offset ) ;
/* Wait for bit to be self cleared */
return cdns_set_wait ( cdns , offset , value , 0 ) ;
}
2019-10-23 02:54:44 +03:00
/*
* all changes to the MCP_CONFIG , MCP_CONTROL , MCP_CMDCTRL and MCP_PHYCTRL
* need to be confirmed with a write to MCP_CONFIG_UPDATE
*/
2020-03-17 19:33:13 +03:00
static int cdns_config_update ( struct sdw_cdns * cdns )
2019-10-23 02:54:44 +03:00
{
int ret ;
2020-03-17 19:33:16 +03:00
if ( sdw_cdns_is_clock_stop ( cdns ) ) {
dev_err ( cdns - > dev , " Cannot program MCP_CONFIG_UPDATE in ClockStopMode \n " ) ;
return - EINVAL ;
}
2019-10-23 02:54:44 +03:00
ret = cdns_clear_bit ( cdns , CDNS_MCP_CONFIG_UPDATE ,
CDNS_MCP_CONFIG_UPDATE_BIT ) ;
if ( ret < 0 )
dev_err ( cdns - > dev , " Config update timedout \n " ) ;
return ret ;
}
2019-08-21 21:58:19 +03:00
/*
* debugfs
*/
# ifdef CONFIG_DEBUG_FS
# define RD_BUF (2 * PAGE_SIZE)
static ssize_t cdns_sprintf ( struct sdw_cdns * cdns ,
char * buf , size_t pos , unsigned int reg )
{
return scnprintf ( buf + pos , RD_BUF - pos ,
" %4x \t %8x \n " , reg , cdns_readl ( cdns , reg ) ) ;
}
static int cdns_reg_show ( struct seq_file * s , void * data )
{
struct sdw_cdns * cdns = s - > private ;
char * buf ;
ssize_t ret ;
int num_ports ;
int i , j ;
buf = kzalloc ( RD_BUF , GFP_KERNEL ) ;
if ( ! buf )
return - ENOMEM ;
ret = scnprintf ( buf , RD_BUF , " Register Value \n " ) ;
ret + = scnprintf ( buf + ret , RD_BUF - ret , " \n MCP Registers \n " ) ;
/* 8 MCP registers */
for ( i = CDNS_MCP_CONFIG ; i < = CDNS_MCP_PHYCTRL ; i + = sizeof ( u32 ) )
ret + = cdns_sprintf ( cdns , buf , ret , i ) ;
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n Status & Intr Registers \n " ) ;
/* 13 Status & Intr registers (offsets 0x70 and 0x74 not defined) */
for ( i = CDNS_MCP_STAT ; i < = CDNS_MCP_FIFOSTAT ; i + = sizeof ( u32 ) )
ret + = cdns_sprintf ( cdns , buf , ret , i ) ;
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n SSP & Clk ctrl Registers \n " ) ;
ret + = cdns_sprintf ( cdns , buf , ret , CDNS_MCP_SSP_CTRL0 ) ;
ret + = cdns_sprintf ( cdns , buf , ret , CDNS_MCP_SSP_CTRL1 ) ;
ret + = cdns_sprintf ( cdns , buf , ret , CDNS_MCP_CLK_CTRL0 ) ;
ret + = cdns_sprintf ( cdns , buf , ret , CDNS_MCP_CLK_CTRL1 ) ;
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n DPn B0 Registers \n " ) ;
2019-09-16 22:23:47 +03:00
num_ports = cdns - > num_ports ;
2019-08-21 21:58:19 +03:00
for ( i = 0 ; i < num_ports ; i + + ) {
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n DP-%d \n " , i ) ;
for ( j = CDNS_DPN_B0_CONFIG ( i ) ;
j < CDNS_DPN_B0_ASYNC_CTRL ( i ) ; j + = sizeof ( u32 ) )
ret + = cdns_sprintf ( cdns , buf , ret , j ) ;
}
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n DPn B1 Registers \n " ) ;
for ( i = 0 ; i < num_ports ; i + + ) {
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n DP-%d \n " , i ) ;
for ( j = CDNS_DPN_B1_CONFIG ( i ) ;
j < CDNS_DPN_B1_ASYNC_CTRL ( i ) ; j + = sizeof ( u32 ) )
ret + = cdns_sprintf ( cdns , buf , ret , j ) ;
}
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n DPn Control Registers \n " ) ;
for ( i = 0 ; i < num_ports ; i + + )
ret + = cdns_sprintf ( cdns , buf , ret ,
CDNS_PORTCTRL + i * CDNS_PORT_OFFSET ) ;
ret + = scnprintf ( buf + ret , RD_BUF - ret ,
" \n PDIn Config Registers \n " ) ;
/* number of PDI and ports is interchangeable */
for ( i = 0 ; i < num_ports ; i + + )
ret + = cdns_sprintf ( cdns , buf , ret , CDNS_PDI_CONFIG ( i ) ) ;
seq_printf ( s , " %s " , buf ) ;
kfree ( buf ) ;
return 0 ;
}
DEFINE_SHOW_ATTRIBUTE ( cdns_reg ) ;
2019-10-23 02:54:45 +03:00
static int cdns_hw_reset ( void * data , u64 value )
{
struct sdw_cdns * cdns = data ;
int ret ;
if ( value ! = 1 )
return - EINVAL ;
/* Userspace changed the hardware state behind the kernel's back */
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
ret = sdw_cdns_exit_reset ( cdns ) ;
dev_dbg ( cdns - > dev , " link hw_reset done: %d \n " , ret ) ;
return ret ;
}
DEFINE_DEBUGFS_ATTRIBUTE ( cdns_hw_reset_fops , NULL , cdns_hw_reset , " %llu \n " ) ;
2020-09-08 16:45:21 +03:00
static int cdns_parity_error_injection ( void * data , u64 value )
{
struct sdw_cdns * cdns = data ;
struct sdw_bus * bus ;
int ret ;
if ( value ! = 1 )
return - EINVAL ;
bus = & cdns - > bus ;
/*
* Resume Master device . If this results in a bus reset , the
* Slave devices will re - attach and be re - enumerated .
*/
ret = pm_runtime_get_sync ( bus - > dev ) ;
if ( ret < 0 & & ret ! = - EACCES ) {
dev_err_ratelimited ( cdns - > dev ,
" pm_runtime_get_sync failed in %s, ret %d \n " ,
__func__ , ret ) ;
pm_runtime_put_noidle ( bus - > dev ) ;
return ret ;
}
/*
* wait long enough for Slave ( s ) to be in steady state . This
* does not need to be super precise .
*/
msleep ( 200 ) ;
/*
* Take the bus lock here to make sure that any bus transactions
* will be queued while we inject a parity error on a dummy read
*/
mutex_lock ( & bus - > bus_lock ) ;
/* program hardware to inject parity error */
cdns_updatel ( cdns , CDNS_MCP_CMDCTRL ,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR ,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR ) ;
/* commit changes */
cdns_updatel ( cdns , CDNS_MCP_CONFIG_UPDATE ,
CDNS_MCP_CONFIG_UPDATE_BIT ,
CDNS_MCP_CONFIG_UPDATE_BIT ) ;
/* do a broadcast dummy read to avoid bus clashes */
ret = sdw_bread_no_pm_unlocked ( & cdns - > bus , 0xf , SDW_SCP_DEVID_0 ) ;
dev_info ( cdns - > dev , " parity error injection, read: %d \n " , ret ) ;
/* program hardware to disable parity error */
cdns_updatel ( cdns , CDNS_MCP_CMDCTRL ,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR ,
0 ) ;
/* commit changes */
cdns_updatel ( cdns , CDNS_MCP_CONFIG_UPDATE ,
CDNS_MCP_CONFIG_UPDATE_BIT ,
CDNS_MCP_CONFIG_UPDATE_BIT ) ;
/* Continue bus operation with parity error injection disabled */
mutex_unlock ( & bus - > bus_lock ) ;
/* Userspace changed the hardware state behind the kernel's back */
add_taint ( TAINT_USER , LOCKDEP_STILL_OK ) ;
/*
* allow Master device to enter pm_runtime suspend . This may
* also result in Slave devices suspending .
*/
pm_runtime_mark_last_busy ( bus - > dev ) ;
pm_runtime_put_autosuspend ( bus - > dev ) ;
return 0 ;
}
DEFINE_DEBUGFS_ATTRIBUTE ( cdns_parity_error_fops , NULL ,
cdns_parity_error_injection , " %llu \n " ) ;
2019-08-21 21:58:19 +03:00
/**
* sdw_cdns_debugfs_init ( ) - Cadence debugfs init
* @ cdns : Cadence instance
* @ root : debugfs root
*/
void sdw_cdns_debugfs_init ( struct sdw_cdns * cdns , struct dentry * root )
{
debugfs_create_file ( " cdns-registers " , 0400 , root , cdns , & cdns_reg_fops ) ;
2019-10-23 02:54:45 +03:00
debugfs_create_file ( " cdns-hw-reset " , 0200 , root , cdns ,
& cdns_hw_reset_fops ) ;
2020-09-08 16:45:21 +03:00
debugfs_create_file ( " cdns-parity-error-injection " , 0200 , root , cdns ,
& cdns_parity_error_fops ) ;
2019-08-21 21:58:19 +03:00
}
EXPORT_SYMBOL_GPL ( sdw_cdns_debugfs_init ) ;
# endif /* CONFIG_DEBUG_FS */
2017-12-14 08:49:42 +03:00
/*
* IO Calls
*/
2019-05-01 18:57:41 +03:00
static enum sdw_command_response
cdns_fill_msg_resp ( struct sdw_cdns * cdns ,
struct sdw_msg * msg , int count , int offset )
2017-12-14 08:49:42 +03:00
{
int nack = 0 , no_ack = 0 ;
int i ;
/* check message response */
for ( i = 0 ; i < count ; i + + ) {
if ( ! ( cdns - > response_buf [ i ] & CDNS_MCP_RESP_ACK ) ) {
no_ack = 1 ;
2021-01-15 08:37:38 +03:00
dev_vdbg ( cdns - > dev , " Msg Ack not received, cmd %d \n " , i ) ;
2021-01-15 08:37:37 +03:00
}
if ( cdns - > response_buf [ i ] & CDNS_MCP_RESP_NACK ) {
nack = 1 ;
2021-01-15 08:37:38 +03:00
dev_err_ratelimited ( cdns - > dev , " Msg NACK received, cmd %d \n " , i ) ;
2017-12-14 08:49:42 +03:00
}
}
if ( nack ) {
2019-05-22 22:47:26 +03:00
dev_err_ratelimited ( cdns - > dev , " Msg NACKed for Slave %d \n " , msg - > dev_num ) ;
2017-12-14 08:49:42 +03:00
return SDW_CMD_FAIL ;
2020-05-08 03:30:46 +03:00
}
if ( no_ack ) {
2019-05-22 22:47:26 +03:00
dev_dbg_ratelimited ( cdns - > dev , " Msg ignored for Slave %d \n " , msg - > dev_num ) ;
2017-12-14 08:49:42 +03:00
return SDW_CMD_IGNORED ;
}
/* fill response */
for ( i = 0 ; i < count ; i + + )
2020-09-03 14:45:01 +03:00
msg - > buf [ i + offset ] = FIELD_GET ( CDNS_MCP_RESP_RDATA , cdns - > response_buf [ i ] ) ;
2017-12-14 08:49:42 +03:00
return SDW_CMD_OK ;
}
static enum sdw_command_response
_cdns_xfer_msg ( struct sdw_cdns * cdns , struct sdw_msg * msg , int cmd ,
2019-05-01 18:57:41 +03:00
int offset , int count , bool defer )
2017-12-14 08:49:42 +03:00
{
unsigned long time ;
u32 base , i , data ;
u16 addr ;
/* Program the watermark level for RX FIFO */
if ( cdns - > msg_count ! = count ) {
cdns_writel ( cdns , CDNS_MCP_FIFOLEVEL , count ) ;
cdns - > msg_count = count ;
}
base = CDNS_MCP_CMD_BASE ;
addr = msg - > addr ;
for ( i = 0 ; i < count ; i + + ) {
2020-09-03 14:45:01 +03:00
data = FIELD_PREP ( CDNS_MCP_CMD_DEV_ADDR , msg - > dev_num ) ;
data | = FIELD_PREP ( CDNS_MCP_CMD_COMMAND , cmd ) ;
data | = FIELD_PREP ( CDNS_MCP_CMD_REG_ADDR , addr ) ;
addr + + ;
2017-12-14 08:49:42 +03:00
if ( msg - > flags = = SDW_MSG_FLAG_WRITE )
data | = msg - > buf [ i + offset ] ;
2020-09-03 14:45:01 +03:00
data | = FIELD_PREP ( CDNS_MCP_CMD_SSP_TAG , msg - > ssp_sync ) ;
2017-12-14 08:49:42 +03:00
cdns_writel ( cdns , base , data ) ;
base + = CDNS_MCP_CMD_WORD_LEN ;
}
if ( defer )
return SDW_CMD_OK ;
/* wait for timeout or response */
time = wait_for_completion_timeout ( & cdns - > tx_complete ,
2019-05-01 18:57:41 +03:00
msecs_to_jiffies ( CDNS_TX_TIMEOUT ) ) ;
2017-12-14 08:49:42 +03:00
if ( ! time ) {
2020-01-11 00:57:28 +03:00
dev_err ( cdns - > dev , " IO transfer timed out, cmd %d device %d addr %x len %d \n " ,
cmd , msg - > dev_num , msg - > addr , msg - > len ) ;
2017-12-14 08:49:42 +03:00
msg - > len = 0 ;
return SDW_CMD_TIMEOUT ;
}
return cdns_fill_msg_resp ( cdns , msg , count , offset ) ;
}
2019-05-01 18:57:41 +03:00
static enum sdw_command_response
cdns_program_scp_addr ( struct sdw_cdns * cdns , struct sdw_msg * msg )
2017-12-14 08:49:42 +03:00
{
int nack = 0 , no_ack = 0 ;
unsigned long time ;
u32 data [ 2 ] , base ;
int i ;
/* Program the watermark level for RX FIFO */
if ( cdns - > msg_count ! = CDNS_SCP_RX_FIFOLEVEL ) {
cdns_writel ( cdns , CDNS_MCP_FIFOLEVEL , CDNS_SCP_RX_FIFOLEVEL ) ;
cdns - > msg_count = CDNS_SCP_RX_FIFOLEVEL ;
}
2020-09-03 14:45:01 +03:00
data [ 0 ] = FIELD_PREP ( CDNS_MCP_CMD_DEV_ADDR , msg - > dev_num ) ;
data [ 0 ] | = FIELD_PREP ( CDNS_MCP_CMD_COMMAND , 0x3 ) ;
2017-12-14 08:49:42 +03:00
data [ 1 ] = data [ 0 ] ;
2020-09-03 14:45:01 +03:00
data [ 0 ] | = FIELD_PREP ( CDNS_MCP_CMD_REG_ADDR , SDW_SCP_ADDRPAGE1 ) ;
data [ 1 ] | = FIELD_PREP ( CDNS_MCP_CMD_REG_ADDR , SDW_SCP_ADDRPAGE2 ) ;
2017-12-14 08:49:42 +03:00
data [ 0 ] | = msg - > addr_page1 ;
data [ 1 ] | = msg - > addr_page2 ;
base = CDNS_MCP_CMD_BASE ;
cdns_writel ( cdns , base , data [ 0 ] ) ;
base + = CDNS_MCP_CMD_WORD_LEN ;
cdns_writel ( cdns , base , data [ 1 ] ) ;
time = wait_for_completion_timeout ( & cdns - > tx_complete ,
2019-05-01 18:57:41 +03:00
msecs_to_jiffies ( CDNS_TX_TIMEOUT ) ) ;
2017-12-14 08:49:42 +03:00
if ( ! time ) {
dev_err ( cdns - > dev , " SCP Msg trf timed out \n " ) ;
msg - > len = 0 ;
return SDW_CMD_TIMEOUT ;
}
/* check response the writes */
for ( i = 0 ; i < 2 ; i + + ) {
if ( ! ( cdns - > response_buf [ i ] & CDNS_MCP_RESP_ACK ) ) {
no_ack = 1 ;
2019-05-01 18:57:45 +03:00
dev_err ( cdns - > dev , " Program SCP Ack not received \n " ) ;
2017-12-14 08:49:42 +03:00
if ( cdns - > response_buf [ i ] & CDNS_MCP_RESP_NACK ) {
nack = 1 ;
2019-05-01 18:57:45 +03:00
dev_err ( cdns - > dev , " Program SCP NACK received \n " ) ;
2017-12-14 08:49:42 +03:00
}
}
}
/* For NACK, NO ack, don't return err if we are in Broadcast mode */
if ( nack ) {
2019-05-22 22:47:26 +03:00
dev_err_ratelimited ( cdns - > dev ,
" SCP_addrpage NACKed for Slave %d \n " , msg - > dev_num ) ;
2017-12-14 08:49:42 +03:00
return SDW_CMD_FAIL ;
2020-05-08 03:30:46 +03:00
}
if ( no_ack ) {
2019-05-22 22:47:26 +03:00
dev_dbg_ratelimited ( cdns - > dev ,
" SCP_addrpage ignored for Slave %d \n " , msg - > dev_num ) ;
2017-12-14 08:49:42 +03:00
return SDW_CMD_IGNORED ;
}
return SDW_CMD_OK ;
}
static int cdns_prep_msg ( struct sdw_cdns * cdns , struct sdw_msg * msg , int * cmd )
{
int ret ;
if ( msg - > page ) {
ret = cdns_program_scp_addr ( cdns , msg ) ;
if ( ret ) {
msg - > len = 0 ;
return ret ;
}
}
switch ( msg - > flags ) {
case SDW_MSG_FLAG_READ :
* cmd = CDNS_MCP_CMD_READ ;
break ;
case SDW_MSG_FLAG_WRITE :
* cmd = CDNS_MCP_CMD_WRITE ;
break ;
default :
dev_err ( cdns - > dev , " Invalid msg cmd: %d \n " , msg - > flags ) ;
return - EINVAL ;
}
return 0 ;
}
2018-04-26 16:08:43 +03:00
enum sdw_command_response
2017-12-14 08:49:42 +03:00
cdns_xfer_msg ( struct sdw_bus * bus , struct sdw_msg * msg )
{
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
int cmd = 0 , ret , i ;
ret = cdns_prep_msg ( cdns , msg , & cmd ) ;
if ( ret )
return SDW_CMD_FAIL_OTHER ;
for ( i = 0 ; i < msg - > len / CDNS_MCP_CMD_LEN ; i + + ) {
ret = _cdns_xfer_msg ( cdns , msg , cmd , i * CDNS_MCP_CMD_LEN ,
2019-05-01 18:57:41 +03:00
CDNS_MCP_CMD_LEN , false ) ;
2017-12-14 08:49:42 +03:00
if ( ret < 0 )
goto exit ;
}
if ( ! ( msg - > len % CDNS_MCP_CMD_LEN ) )
goto exit ;
ret = _cdns_xfer_msg ( cdns , msg , cmd , i * CDNS_MCP_CMD_LEN ,
2019-05-01 18:57:41 +03:00
msg - > len % CDNS_MCP_CMD_LEN , false ) ;
2017-12-14 08:49:42 +03:00
exit :
return ret ;
}
2018-04-26 16:08:43 +03:00
EXPORT_SYMBOL ( cdns_xfer_msg ) ;
2017-12-14 08:49:42 +03:00
2018-04-26 16:08:43 +03:00
enum sdw_command_response
2017-12-14 08:49:42 +03:00
cdns_xfer_msg_defer ( struct sdw_bus * bus ,
2019-05-01 18:57:41 +03:00
struct sdw_msg * msg , struct sdw_defer * defer )
2017-12-14 08:49:42 +03:00
{
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
int cmd = 0 , ret ;
/* for defer only 1 message is supported */
if ( msg - > len > 1 )
return - ENOTSUPP ;
ret = cdns_prep_msg ( cdns , msg , & cmd ) ;
if ( ret )
return SDW_CMD_FAIL_OTHER ;
cdns - > defer = defer ;
cdns - > defer - > length = msg - > len ;
return _cdns_xfer_msg ( cdns , msg , cmd , 0 , msg - > len , true ) ;
}
2018-04-26 16:08:43 +03:00
EXPORT_SYMBOL ( cdns_xfer_msg_defer ) ;
2017-12-14 08:49:42 +03:00
2018-04-26 16:08:43 +03:00
enum sdw_command_response
2017-12-14 08:49:42 +03:00
cdns_reset_page_addr ( struct sdw_bus * bus , unsigned int dev_num )
{
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
struct sdw_msg msg ;
/* Create dummy message with valid device number */
memset ( & msg , 0 , sizeof ( msg ) ) ;
msg . dev_num = dev_num ;
return cdns_program_scp_addr ( cdns , & msg ) ;
}
2018-04-26 16:08:43 +03:00
EXPORT_SYMBOL ( cdns_reset_page_addr ) ;
2017-12-14 08:49:42 +03:00
2017-12-14 08:49:41 +03:00
/*
* IRQ handling
*/
2017-12-14 08:49:42 +03:00
static void cdns_read_response ( struct sdw_cdns * cdns )
{
u32 num_resp , cmd_base ;
int i ;
num_resp = cdns_readl ( cdns , CDNS_MCP_FIFOSTAT ) ;
num_resp & = CDNS_MCP_RX_FIFO_AVAIL ;
cmd_base = CDNS_MCP_CMD_BASE ;
for ( i = 0 ; i < num_resp ; i + + ) {
cdns - > response_buf [ i ] = cdns_readl ( cdns , cmd_base ) ;
cmd_base + = CDNS_MCP_CMD_WORD_LEN ;
}
}
2017-12-14 08:49:41 +03:00
static int cdns_update_slave_status ( struct sdw_cdns * cdns ,
2021-01-15 08:37:35 +03:00
u64 slave_intstat )
2017-12-14 08:49:41 +03:00
{
enum sdw_slave_status status [ SDW_MAX_DEVICES + 1 ] ;
bool is_slave = false ;
2019-05-22 22:47:27 +03:00
u32 mask ;
2017-12-14 08:49:41 +03:00
int i , set_status ;
memset ( status , 0 , sizeof ( status ) ) ;
for ( i = 0 ; i < = SDW_MAX_DEVICES ; i + + ) {
2021-01-15 08:37:35 +03:00
mask = ( slave_intstat > > ( i * CDNS_MCP_SLAVE_STATUS_NUM ) ) &
CDNS_MCP_SLAVE_STATUS_BITS ;
2017-12-14 08:49:41 +03:00
if ( ! mask )
continue ;
is_slave = true ;
set_status = 0 ;
if ( mask & CDNS_MCP_SLAVE_INTSTAT_RESERVED ) {
status [ i ] = SDW_SLAVE_RESERVED ;
set_status + + ;
}
if ( mask & CDNS_MCP_SLAVE_INTSTAT_ATTACHED ) {
status [ i ] = SDW_SLAVE_ATTACHED ;
set_status + + ;
}
if ( mask & CDNS_MCP_SLAVE_INTSTAT_ALERT ) {
status [ i ] = SDW_SLAVE_ALERT ;
set_status + + ;
}
if ( mask & CDNS_MCP_SLAVE_INTSTAT_NPRESENT ) {
status [ i ] = SDW_SLAVE_UNATTACHED ;
set_status + + ;
}
/* first check if Slave reported multiple status */
if ( set_status > 1 ) {
2020-01-11 00:57:30 +03:00
u32 val ;
2019-05-22 22:47:26 +03:00
dev_warn_ratelimited ( cdns - > dev ,
2020-01-11 00:57:30 +03:00
" Slave %d reported multiple Status: %d \n " ,
i , mask ) ;
/* check latest status extracted from PING commands */
val = cdns_readl ( cdns , CDNS_MCP_SLAVE_STAT ) ;
val > > = ( i * 2 ) ;
switch ( val & 0x3 ) {
case 0 :
status [ i ] = SDW_SLAVE_UNATTACHED ;
break ;
case 1 :
status [ i ] = SDW_SLAVE_ATTACHED ;
break ;
case 2 :
status [ i ] = SDW_SLAVE_ALERT ;
break ;
case 3 :
default :
status [ i ] = SDW_SLAVE_RESERVED ;
break ;
}
dev_warn_ratelimited ( cdns - > dev ,
" Slave %d status updated to %d \n " ,
i , status [ i ] ) ;
2017-12-14 08:49:41 +03:00
}
}
if ( is_slave )
return sdw_handle_slave_status ( & cdns - > bus , status ) ;
return 0 ;
}
/**
* sdw_cdns_irq ( ) - Cadence interrupt handler
* @ irq : irq number
* @ dev_id : irq context
*/
irqreturn_t sdw_cdns_irq ( int irq , void * dev_id )
{
struct sdw_cdns * cdns = dev_id ;
u32 int_status ;
int ret = IRQ_HANDLED ;
/* Check if the link is up */
if ( ! cdns - > link_up )
return IRQ_NONE ;
int_status = cdns_readl ( cdns , CDNS_MCP_INTSTAT ) ;
2020-01-11 00:57:26 +03:00
/* check for reserved values read as zero */
if ( int_status & CDNS_MCP_INT_RESERVED )
return IRQ_NONE ;
2017-12-14 08:49:41 +03:00
if ( ! ( int_status & CDNS_MCP_INT_IRQ ) )
return IRQ_NONE ;
2017-12-14 08:49:42 +03:00
if ( int_status & CDNS_MCP_INT_RX_WL ) {
cdns_read_response ( cdns ) ;
if ( cdns - > defer ) {
cdns_fill_msg_resp ( cdns , cdns - > defer - > msg ,
2019-05-01 18:57:41 +03:00
cdns - > defer - > length , 0 ) ;
2017-12-14 08:49:42 +03:00
complete ( & cdns - > defer - > complete ) ;
cdns - > defer = NULL ;
2019-05-01 18:57:42 +03:00
} else {
2017-12-14 08:49:42 +03:00
complete ( & cdns - > tx_complete ) ;
2019-05-01 18:57:42 +03:00
}
2017-12-14 08:49:42 +03:00
}
2019-08-06 03:55:08 +03:00
if ( int_status & CDNS_MCP_INT_PARITY ) {
/* Parity error detected by Master */
dev_err_ratelimited ( cdns - > dev , " Parity error \n " ) ;
}
2017-12-14 08:49:41 +03:00
if ( int_status & CDNS_MCP_INT_CTRL_CLASH ) {
/* Slave is driving bit slot during control word */
dev_err_ratelimited ( cdns - > dev , " Bus clash for control word \n " ) ;
}
if ( int_status & CDNS_MCP_INT_DATA_CLASH ) {
/*
* Multiple slaves trying to drive bit slot , or issue with
* ownership of data bits or Slave gone bonkers
*/
dev_err_ratelimited ( cdns - > dev , " Bus clash for data word \n " ) ;
}
2020-09-20 22:32:07 +03:00
if ( cdns - > bus . params . m_data_mode ! = SDW_PORT_DATA_MODE_NORMAL & &
int_status & CDNS_MCP_INT_DPINT ) {
u32 port_intstat ;
/* just log which ports report an error */
port_intstat = cdns_readl ( cdns , CDNS_MCP_PORT_INTSTAT ) ;
dev_err_ratelimited ( cdns - > dev , " DP interrupt: PortIntStat %8x \n " ,
port_intstat ) ;
/* clear status w/ write1 */
cdns_writel ( cdns , CDNS_MCP_PORT_INTSTAT , port_intstat ) ;
}
2017-12-14 08:49:41 +03:00
if ( int_status & CDNS_MCP_INT_SLAVE_MASK ) {
/* Mask the Slave interrupt and wake thread */
cdns_updatel ( cdns , CDNS_MCP_INTMASK ,
2019-05-01 18:57:41 +03:00
CDNS_MCP_INT_SLAVE_MASK , 0 ) ;
2017-12-14 08:49:41 +03:00
int_status & = ~ CDNS_MCP_INT_SLAVE_MASK ;
2020-08-18 01:23:40 +03:00
/*
* Deal with possible race condition between interrupt
* handling and disabling interrupts on suspend .
*
* If the master is in the process of disabling
* interrupts , don ' t schedule a workqueue
*/
if ( cdns - > interrupt_enabled )
schedule_work ( & cdns - > work ) ;
2017-12-14 08:49:41 +03:00
}
cdns_writel ( cdns , CDNS_MCP_INTSTAT , int_status ) ;
return ret ;
}
EXPORT_SYMBOL ( sdw_cdns_irq ) ;
/**
2020-07-16 18:09:45 +03:00
* To update slave status in a work since we will need to handle
* other interrupts eg . CDNS_MCP_INT_RX_WL during the update slave
* process .
* @ work : cdns worker thread
2017-12-14 08:49:41 +03:00
*/
2020-07-16 18:09:45 +03:00
static void cdns_update_slave_status_work ( struct work_struct * work )
2017-12-14 08:49:41 +03:00
{
2020-07-16 18:09:45 +03:00
struct sdw_cdns * cdns =
container_of ( work , struct sdw_cdns , work ) ;
2017-12-14 08:49:41 +03:00
u32 slave0 , slave1 ;
2021-01-15 08:37:35 +03:00
u64 slave_intstat ;
2017-12-14 08:49:41 +03:00
slave0 = cdns_readl ( cdns , CDNS_MCP_SLAVE_INTSTAT0 ) ;
slave1 = cdns_readl ( cdns , CDNS_MCP_SLAVE_INTSTAT1 ) ;
2021-01-15 08:37:35 +03:00
/* combine the two status */
slave_intstat = ( ( u64 ) slave1 < < 32 ) | slave0 ;
dev_dbg_ratelimited ( cdns - > dev , " Slave status change: 0x%llx \n " , slave_intstat ) ;
cdns_update_slave_status ( cdns , slave_intstat ) ;
2017-12-14 08:49:41 +03:00
cdns_writel ( cdns , CDNS_MCP_SLAVE_INTSTAT0 , slave0 ) ;
cdns_writel ( cdns , CDNS_MCP_SLAVE_INTSTAT1 , slave1 ) ;
/* clear and unmask Slave interrupt now */
cdns_writel ( cdns , CDNS_MCP_INTSTAT , CDNS_MCP_INT_SLAVE_MASK ) ;
cdns_updatel ( cdns , CDNS_MCP_INTMASK ,
2019-05-01 18:57:41 +03:00
CDNS_MCP_INT_SLAVE_MASK , CDNS_MCP_INT_SLAVE_MASK ) ;
2017-12-14 08:49:41 +03:00
}
/*
* init routines
*/
2019-10-23 02:54:44 +03:00
/**
* sdw_cdns_exit_reset ( ) - Program reset parameters and start bus operations
* @ cdns : Cadence instance
*/
int sdw_cdns_exit_reset ( struct sdw_cdns * cdns )
{
/* program maximum length reset to be safe */
cdns_updatel ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_RST_DELAY ,
CDNS_MCP_CONTROL_RST_DELAY ) ;
/* use hardware generated reset */
cdns_updatel ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_HW_RST ,
CDNS_MCP_CONTROL_HW_RST ) ;
/* commit changes */
2020-03-17 19:33:28 +03:00
cdns_updatel ( cdns , CDNS_MCP_CONFIG_UPDATE ,
CDNS_MCP_CONFIG_UPDATE_BIT ,
CDNS_MCP_CONFIG_UPDATE_BIT ) ;
/* don't wait here */
return 0 ;
2019-10-23 02:54:44 +03:00
}
EXPORT_SYMBOL ( sdw_cdns_exit_reset ) ;
2020-03-17 19:33:19 +03:00
/**
* sdw_cdns_enable_slave_interrupt ( ) - Enable SDW slave interrupts
* @ cdns : Cadence instance
* @ state : boolean for true / false
*/
static void cdns_enable_slave_interrupts ( struct sdw_cdns * cdns , bool state )
{
u32 mask ;
mask = cdns_readl ( cdns , CDNS_MCP_INTMASK ) ;
if ( state )
mask | = CDNS_MCP_INT_SLAVE_MASK ;
else
mask & = ~ CDNS_MCP_INT_SLAVE_MASK ;
cdns_writel ( cdns , CDNS_MCP_INTMASK , mask ) ;
}
2019-10-23 02:54:44 +03:00
/**
2020-01-11 00:57:29 +03:00
* sdw_cdns_enable_interrupt ( ) - Enable SDW interrupts
2019-10-23 02:54:44 +03:00
* @ cdns : Cadence instance
2020-01-15 02:31:24 +03:00
* @ state : True if we are trying to enable interrupt .
2019-10-23 02:54:44 +03:00
*/
2019-10-23 02:54:47 +03:00
int sdw_cdns_enable_interrupt ( struct sdw_cdns * cdns , bool state )
2017-12-14 08:49:42 +03:00
{
2019-10-23 02:54:47 +03:00
u32 slave_intmask0 = 0 ;
u32 slave_intmask1 = 0 ;
u32 mask = 0 ;
if ( ! state )
goto update_masks ;
2017-12-14 08:49:42 +03:00
2019-10-23 02:54:47 +03:00
slave_intmask0 = CDNS_MCP_SLAVE_INTMASK0_MASK ;
slave_intmask1 = CDNS_MCP_SLAVE_INTMASK1_MASK ;
2017-12-14 08:49:42 +03:00
2019-08-06 03:55:08 +03:00
/* enable detection of all slave state changes */
mask = CDNS_MCP_INT_SLAVE_MASK ;
/* enable detection of bus issues */
mask | = CDNS_MCP_INT_CTRL_CLASH | CDNS_MCP_INT_DATA_CLASH |
CDNS_MCP_INT_PARITY ;
2020-09-20 22:32:07 +03:00
/* port interrupt limited to test modes for now */
if ( cdns - > bus . params . m_data_mode ! = SDW_PORT_DATA_MODE_NORMAL )
mask | = CDNS_MCP_INT_DPINT ;
2019-08-06 03:55:08 +03:00
/* enable detection of RX fifo level */
mask | = CDNS_MCP_INT_RX_WL ;
/*
* CDNS_MCP_INT_IRQ needs to be set otherwise all previous
* settings are irrelevant
*/
mask | = CDNS_MCP_INT_IRQ ;
2017-12-14 08:49:42 +03:00
2019-08-06 03:55:21 +03:00
if ( interrupt_mask ) /* parameter override */
mask = interrupt_mask ;
2017-12-14 08:49:42 +03:00
2019-10-23 02:54:47 +03:00
update_masks :
2020-01-11 00:57:27 +03:00
/* clear slave interrupt status before enabling interrupt */
if ( state ) {
u32 slave_state ;
slave_state = cdns_readl ( cdns , CDNS_MCP_SLAVE_INTSTAT0 ) ;
cdns_writel ( cdns , CDNS_MCP_SLAVE_INTSTAT0 , slave_state ) ;
slave_state = cdns_readl ( cdns , CDNS_MCP_SLAVE_INTSTAT1 ) ;
cdns_writel ( cdns , CDNS_MCP_SLAVE_INTSTAT1 , slave_state ) ;
}
2020-08-18 01:23:40 +03:00
cdns - > interrupt_enabled = state ;
/*
* Complete any on - going status updates before updating masks ,
* and cancel queued status updates .
*
* There could be a race with a new interrupt thrown before
* the 3 mask updates below are complete , so in the interrupt
* we use the ' interrupt_enabled ' status to prevent new work
* from being queued .
*/
if ( ! state )
cancel_work_sync ( & cdns - > work ) ;
2020-01-11 00:57:27 +03:00
2019-10-23 02:54:47 +03:00
cdns_writel ( cdns , CDNS_MCP_SLAVE_INTMASK0 , slave_intmask0 ) ;
cdns_writel ( cdns , CDNS_MCP_SLAVE_INTMASK1 , slave_intmask1 ) ;
2017-12-14 08:49:42 +03:00
cdns_writel ( cdns , CDNS_MCP_INTMASK , mask ) ;
2020-01-11 00:57:29 +03:00
return 0 ;
2017-12-14 08:49:42 +03:00
}
EXPORT_SYMBOL ( sdw_cdns_enable_interrupt ) ;
2017-12-14 08:49:41 +03:00
2018-04-26 16:08:48 +03:00
static int cdns_allocate_pdi ( struct sdw_cdns * cdns ,
2019-05-01 18:57:41 +03:00
struct sdw_cdns_pdi * * stream ,
u32 num , u32 pdi_offset )
2018-04-26 16:08:48 +03:00
{
struct sdw_cdns_pdi * pdi ;
int i ;
if ( ! num )
return 0 ;
pdi = devm_kcalloc ( cdns - > dev , num , sizeof ( * pdi ) , GFP_KERNEL ) ;
if ( ! pdi )
return - ENOMEM ;
for ( i = 0 ; i < num ; i + + ) {
pdi [ i ] . num = i + pdi_offset ;
}
* stream = pdi ;
return 0 ;
}
/**
* sdw_cdns_pdi_init ( ) - PDI initialization routine
*
* @ cdns : Cadence instance
* @ config : Stream configurations
*/
int sdw_cdns_pdi_init ( struct sdw_cdns * cdns ,
2019-05-01 18:57:41 +03:00
struct sdw_cdns_stream_config config )
2018-04-26 16:08:48 +03:00
{
struct sdw_cdns_streams * stream ;
2019-09-16 22:23:46 +03:00
int offset ;
int ret ;
2018-04-26 16:08:48 +03:00
cdns - > pcm . num_bd = config . pcm_bd ;
cdns - > pcm . num_in = config . pcm_in ;
cdns - > pcm . num_out = config . pcm_out ;
cdns - > pdm . num_bd = config . pdm_bd ;
cdns - > pdm . num_in = config . pdm_in ;
cdns - > pdm . num_out = config . pdm_out ;
/* Allocate PDIs for PCMs */
stream = & cdns - > pcm ;
2019-09-16 22:23:47 +03:00
/* we allocate PDI0 and PDI1 which are used for Bulk */
offset = 0 ;
2018-04-26 16:08:48 +03:00
ret = cdns_allocate_pdi ( cdns , & stream - > bd ,
stream - > num_bd , offset ) ;
if ( ret )
return ret ;
offset + = stream - > num_bd ;
ret = cdns_allocate_pdi ( cdns , & stream - > in ,
stream - > num_in , offset ) ;
if ( ret )
return ret ;
offset + = stream - > num_in ;
ret = cdns_allocate_pdi ( cdns , & stream - > out ,
stream - > num_out , offset ) ;
if ( ret )
return ret ;
/* Update total number of PCM PDIs */
stream - > num_pdi = stream - > num_bd + stream - > num_in + stream - > num_out ;
cdns - > num_ports = stream - > num_pdi ;
/* Allocate PDIs for PDMs */
stream = & cdns - > pdm ;
ret = cdns_allocate_pdi ( cdns , & stream - > bd ,
stream - > num_bd , offset ) ;
if ( ret )
return ret ;
offset + = stream - > num_bd ;
ret = cdns_allocate_pdi ( cdns , & stream - > in ,
stream - > num_in , offset ) ;
if ( ret )
return ret ;
offset + = stream - > num_in ;
ret = cdns_allocate_pdi ( cdns , & stream - > out ,
stream - > num_out , offset ) ;
2019-09-16 22:23:47 +03:00
2018-04-26 16:08:48 +03:00
if ( ret )
return ret ;
/* Update total number of PDM PDIs */
stream - > num_pdi = stream - > num_bd + stream - > num_in + stream - > num_out ;
cdns - > num_ports + = stream - > num_pdi ;
return 0 ;
}
EXPORT_SYMBOL ( sdw_cdns_pdi_init ) ;
2019-08-06 03:55:11 +03:00
static u32 cdns_set_initial_frame_shape ( int n_rows , int n_cols )
{
u32 val ;
int c ;
int r ;
r = sdw_find_row_index ( n_rows ) ;
2020-09-03 14:45:01 +03:00
c = sdw_find_col_index ( n_cols ) ;
2019-08-06 03:55:11 +03:00
2020-09-03 14:45:01 +03:00
val = FIELD_PREP ( CDNS_MCP_FRAME_SHAPE_ROW_MASK , r ) ;
val | = FIELD_PREP ( CDNS_MCP_FRAME_SHAPE_COL_MASK , c ) ;
2019-08-06 03:55:11 +03:00
return val ;
}
2020-03-17 19:33:21 +03:00
static void cdns_init_clock_ctrl ( struct sdw_cdns * cdns )
2017-12-14 08:49:41 +03:00
{
2019-08-06 03:55:17 +03:00
struct sdw_bus * bus = & cdns - > bus ;
struct sdw_master_prop * prop = & bus - > prop ;
2017-12-14 08:49:41 +03:00
u32 val ;
2020-03-17 19:33:22 +03:00
u32 ssp_interval ;
2019-08-06 03:55:17 +03:00
int divider ;
2017-12-14 08:49:41 +03:00
/* Set clock divider */
2019-08-06 03:55:17 +03:00
divider = ( prop - > mclk_freq / prop - > max_clk_freq ) - 1 ;
2017-12-14 08:49:41 +03:00
2019-08-06 03:55:18 +03:00
cdns_updatel ( cdns , CDNS_MCP_CLK_CTRL0 ,
CDNS_MCP_CLK_MCLKD_MASK , divider ) ;
cdns_updatel ( cdns , CDNS_MCP_CLK_CTRL1 ,
CDNS_MCP_CLK_MCLKD_MASK , divider ) ;
2017-12-14 08:49:41 +03:00
2019-08-06 03:55:11 +03:00
/*
* Frame shape changes after initialization have to be done
* with the bank switch mechanism
*/
val = cdns_set_initial_frame_shape ( prop - > default_row ,
prop - > default_col ) ;
cdns_writel ( cdns , CDNS_MCP_FRAME_SHAPE_INIT , val ) ;
2017-12-14 08:49:41 +03:00
/* Set SSP interval to default value */
2020-03-17 19:33:22 +03:00
ssp_interval = prop - > default_frame_rate / SDW_CADENCE_GSYNC_HZ ;
cdns_writel ( cdns , CDNS_MCP_SSP_CTRL0 , ssp_interval ) ;
cdns_writel ( cdns , CDNS_MCP_SSP_CTRL1 , ssp_interval ) ;
2020-03-17 19:33:21 +03:00
}
/**
* sdw_cdns_init ( ) - Cadence initialization
* @ cdns : Cadence instance
*/
int sdw_cdns_init ( struct sdw_cdns * cdns )
{
u32 val ;
cdns_init_clock_ctrl ( cdns ) ;
2017-12-14 08:49:41 +03:00
2020-03-17 19:33:18 +03:00
/* reset msg_count to default value of FIFOLEVEL */
cdns - > msg_count = cdns_readl ( cdns , CDNS_MCP_FIFOLEVEL ) ;
2019-10-23 02:54:44 +03:00
/* flush command FIFOs */
cdns_updatel ( cdns , CDNS_MCP_CONTROL , CDNS_MCP_CONTROL_CMD_RST ,
CDNS_MCP_CONTROL_CMD_RST ) ;
2017-12-14 08:49:41 +03:00
/* Set cmd accept mode */
cdns_updatel ( cdns , CDNS_MCP_CONTROL , CDNS_MCP_CONTROL_CMD_ACCEPT ,
2019-05-01 18:57:41 +03:00
CDNS_MCP_CONTROL_CMD_ACCEPT ) ;
2017-12-14 08:49:41 +03:00
/* Configure mcp config */
val = cdns_readl ( cdns , CDNS_MCP_CONFIG ) ;
2020-03-17 19:33:24 +03:00
/* enable bus operations with clock and data */
val & = ~ CDNS_MCP_CONFIG_OP ;
val | = CDNS_MCP_CONFIG_OP_NORMAL ;
2020-03-17 19:33:23 +03:00
/* Set cmd mode for Tx and Rx cmds */
val & = ~ CDNS_MCP_CONFIG_CMD ;
2017-12-14 08:49:41 +03:00
2020-03-17 19:33:23 +03:00
/* Disable sniffer mode */
val & = ~ CDNS_MCP_CONFIG_SNIFFER ;
2017-12-14 08:49:41 +03:00
/* Disable auto bus release */
val & = ~ CDNS_MCP_CONFIG_BUS_REL ;
2020-03-17 19:33:28 +03:00
if ( cdns - > bus . multi_link )
/* Set Multi-master mode to take gsync into account */
val | = CDNS_MCP_CONFIG_MMASTER ;
2017-12-14 08:49:41 +03:00
2020-03-17 19:33:25 +03:00
/* leave frame delay to hardware default of 0x1F */
2020-03-17 19:33:23 +03:00
2020-03-17 19:33:26 +03:00
/* leave command retry to hardware default of 0 */
2017-12-14 08:49:41 +03:00
cdns_writel ( cdns , CDNS_MCP_CONFIG , val ) ;
2020-03-17 19:33:27 +03:00
/* changes will be committed later */
return 0 ;
2017-12-14 08:49:41 +03:00
}
EXPORT_SYMBOL ( sdw_cdns_init ) ;
2018-04-26 16:08:48 +03:00
int cdns_bus_conf ( struct sdw_bus * bus , struct sdw_bus_params * params )
{
2019-08-06 03:55:17 +03:00
struct sdw_master_prop * prop = & bus - > prop ;
2018-04-26 16:08:48 +03:00
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
2019-08-06 03:55:18 +03:00
int mcp_clkctrl_off ;
2018-04-26 16:08:48 +03:00
int divider ;
if ( ! params - > curr_dr_freq ) {
2019-05-01 18:57:45 +03:00
dev_err ( cdns - > dev , " NULL curr_dr_freq \n " ) ;
2018-04-26 16:08:48 +03:00
return - EINVAL ;
}
2019-08-06 03:55:17 +03:00
divider = prop - > mclk_freq * SDW_DOUBLE_RATE_FACTOR /
params - > curr_dr_freq ;
divider - - ; /* divider is 1/(N+1) */
2018-04-26 16:08:48 +03:00
if ( params - > next_bank )
mcp_clkctrl_off = CDNS_MCP_CLK_CTRL1 ;
else
mcp_clkctrl_off = CDNS_MCP_CLK_CTRL0 ;
2019-08-06 03:55:18 +03:00
cdns_updatel ( cdns , mcp_clkctrl_off , CDNS_MCP_CLK_MCLKD_MASK , divider ) ;
2018-04-26 16:08:48 +03:00
return 0 ;
}
EXPORT_SYMBOL ( cdns_bus_conf ) ;
static int cdns_port_params ( struct sdw_bus * bus ,
2019-05-01 18:57:41 +03:00
struct sdw_port_params * p_params , unsigned int bank )
2018-04-26 16:08:48 +03:00
{
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
int dpn_config = 0 , dpn_config_off ;
if ( bank )
dpn_config_off = CDNS_DPN_B1_CONFIG ( p_params - > num ) ;
else
dpn_config_off = CDNS_DPN_B0_CONFIG ( p_params - > num ) ;
dpn_config = cdns_readl ( cdns , dpn_config_off ) ;
2020-09-17 15:01:45 +03:00
u32p_replace_bits ( & dpn_config , ( p_params - > bps - 1 ) , CDNS_DPN_CONFIG_WL ) ;
u32p_replace_bits ( & dpn_config , p_params - > flow_mode , CDNS_DPN_CONFIG_PORT_FLOW ) ;
u32p_replace_bits ( & dpn_config , p_params - > data_mode , CDNS_DPN_CONFIG_PORT_DAT ) ;
2018-04-26 16:08:48 +03:00
cdns_writel ( cdns , dpn_config_off , dpn_config ) ;
return 0 ;
}
static int cdns_transport_params ( struct sdw_bus * bus ,
2019-05-01 18:57:41 +03:00
struct sdw_transport_params * t_params ,
enum sdw_reg_bank bank )
2018-04-26 16:08:48 +03:00
{
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
int dpn_offsetctrl = 0 , dpn_offsetctrl_off ;
int dpn_config = 0 , dpn_config_off ;
int dpn_hctrl = 0 , dpn_hctrl_off ;
int num = t_params - > port_num ;
int dpn_samplectrl_off ;
/*
* Note : Only full data port is supported on the Master side for
* both PCM and PDM ports .
*/
if ( bank ) {
dpn_config_off = CDNS_DPN_B1_CONFIG ( num ) ;
dpn_samplectrl_off = CDNS_DPN_B1_SAMPLE_CTRL ( num ) ;
dpn_hctrl_off = CDNS_DPN_B1_HCTRL ( num ) ;
dpn_offsetctrl_off = CDNS_DPN_B1_OFFSET_CTRL ( num ) ;
} else {
dpn_config_off = CDNS_DPN_B0_CONFIG ( num ) ;
dpn_samplectrl_off = CDNS_DPN_B0_SAMPLE_CTRL ( num ) ;
dpn_hctrl_off = CDNS_DPN_B0_HCTRL ( num ) ;
dpn_offsetctrl_off = CDNS_DPN_B0_OFFSET_CTRL ( num ) ;
}
dpn_config = cdns_readl ( cdns , dpn_config_off ) ;
2020-09-17 15:01:45 +03:00
u32p_replace_bits ( & dpn_config , t_params - > blk_grp_ctrl , CDNS_DPN_CONFIG_BGC ) ;
u32p_replace_bits ( & dpn_config , t_params - > blk_pkg_mode , CDNS_DPN_CONFIG_BPM ) ;
2018-04-26 16:08:48 +03:00
cdns_writel ( cdns , dpn_config_off , dpn_config ) ;
2020-09-17 15:01:45 +03:00
u32p_replace_bits ( & dpn_offsetctrl , t_params - > offset1 , CDNS_DPN_OFFSET_CTRL_1 ) ;
u32p_replace_bits ( & dpn_offsetctrl , t_params - > offset2 , CDNS_DPN_OFFSET_CTRL_2 ) ;
2018-04-26 16:08:48 +03:00
cdns_writel ( cdns , dpn_offsetctrl_off , dpn_offsetctrl ) ;
2020-09-17 15:01:45 +03:00
u32p_replace_bits ( & dpn_hctrl , t_params - > hstart , CDNS_DPN_HCTRL_HSTART ) ;
u32p_replace_bits ( & dpn_hctrl , t_params - > hstop , CDNS_DPN_HCTRL_HSTOP ) ;
u32p_replace_bits ( & dpn_hctrl , t_params - > lane_ctrl , CDNS_DPN_HCTRL_LCTRL ) ;
2018-04-26 16:08:48 +03:00
cdns_writel ( cdns , dpn_hctrl_off , dpn_hctrl ) ;
cdns_writel ( cdns , dpn_samplectrl_off , ( t_params - > sample_interval - 1 ) ) ;
return 0 ;
}
static int cdns_port_enable ( struct sdw_bus * bus ,
2019-05-01 18:57:41 +03:00
struct sdw_enable_ch * enable_ch , unsigned int bank )
2018-04-26 16:08:48 +03:00
{
struct sdw_cdns * cdns = bus_to_cdns ( bus ) ;
int dpn_chnen_off , ch_mask ;
if ( bank )
dpn_chnen_off = CDNS_DPN_B1_CH_EN ( enable_ch - > port_num ) ;
else
dpn_chnen_off = CDNS_DPN_B0_CH_EN ( enable_ch - > port_num ) ;
ch_mask = enable_ch - > ch_mask * enable_ch - > enable ;
cdns_writel ( cdns , dpn_chnen_off , ch_mask ) ;
return 0 ;
}
static const struct sdw_master_port_ops cdns_port_ops = {
. dpn_set_port_params = cdns_port_params ,
. dpn_set_port_transport_params = cdns_transport_params ,
. dpn_port_enable_ch = cdns_port_enable ,
} ;
2020-03-17 19:33:15 +03:00
/**
* sdw_cdns_is_clock_stop : Check clock status
*
* @ cdns : Cadence instance
*/
bool sdw_cdns_is_clock_stop ( struct sdw_cdns * cdns )
{
return ! ! ( cdns_readl ( cdns , CDNS_MCP_STAT ) & CDNS_MCP_STAT_CLK_STOP ) ;
}
EXPORT_SYMBOL ( sdw_cdns_is_clock_stop ) ;
2020-03-17 19:33:17 +03:00
/**
* sdw_cdns_clock_stop : Cadence clock stop configuration routine
*
* @ cdns : Cadence instance
* @ block_wake : prevent wakes if required by the platform
*/
int sdw_cdns_clock_stop ( struct sdw_cdns * cdns , bool block_wake )
{
bool slave_present = false ;
struct sdw_slave * slave ;
int ret ;
/* Check suspend status */
if ( sdw_cdns_is_clock_stop ( cdns ) ) {
dev_dbg ( cdns - > dev , " Clock is already stopped \n " ) ;
return 0 ;
}
2020-03-17 19:33:19 +03:00
/*
* Before entering clock stop we mask the Slave
* interrupts . This helps avoid having to deal with e . g . a
* Slave becoming UNATTACHED while the clock is being stopped
*/
cdns_enable_slave_interrupts ( cdns , false ) ;
2020-03-17 19:33:17 +03:00
/*
* For specific platforms , it is required to be able to put
* master into a state in which it ignores wake - up trials
* in clock stop state
*/
if ( block_wake )
cdns_updatel ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_BLOCK_WAKEUP ,
CDNS_MCP_CONTROL_BLOCK_WAKEUP ) ;
list_for_each_entry ( slave , & cdns - > bus . slaves , node ) {
if ( slave - > status = = SDW_SLAVE_ATTACHED | |
slave - > status = = SDW_SLAVE_ALERT ) {
slave_present = true ;
break ;
}
}
/*
* This CMD_ACCEPT should be used when there are no devices
* attached on the link when entering clock stop mode . If this is
* not set and there is a broadcast write then the command ignored
* will be treated as a failure
*/
if ( ! slave_present )
cdns_updatel ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_CMD_ACCEPT ,
CDNS_MCP_CONTROL_CMD_ACCEPT ) ;
else
cdns_updatel ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_CMD_ACCEPT , 0 ) ;
/* commit changes */
ret = cdns_config_update ( cdns ) ;
if ( ret < 0 ) {
dev_err ( cdns - > dev , " %s: config_update failed \n " , __func__ ) ;
return ret ;
}
/* Prepare slaves for clock stop */
ret = sdw_bus_prep_clk_stop ( & cdns - > bus ) ;
if ( ret < 0 ) {
dev_err ( cdns - > dev , " prepare clock stop failed %d " , ret ) ;
return ret ;
}
/*
* Enter clock stop mode and only report errors if there are
* Slave devices present ( ALERT or ATTACHED )
*/
ret = sdw_bus_clk_stop ( & cdns - > bus ) ;
if ( ret < 0 & & slave_present & & ret ! = - ENODATA ) {
2021-03-23 03:58:53 +03:00
dev_err ( cdns - > dev , " bus clock stop failed %d \n " , ret ) ;
2020-03-17 19:33:17 +03:00
return ret ;
}
ret = cdns_set_wait ( cdns , CDNS_MCP_STAT ,
CDNS_MCP_STAT_CLK_STOP ,
CDNS_MCP_STAT_CLK_STOP ) ;
if ( ret < 0 )
dev_err ( cdns - > dev , " Clock stop failed %d \n " , ret ) ;
return ret ;
}
EXPORT_SYMBOL ( sdw_cdns_clock_stop ) ;
/**
* sdw_cdns_clock_restart : Cadence PM clock restart configuration routine
*
* @ cdns : Cadence instance
* @ bus_reset : context may be lost while in low power modes and the bus
* may require a Severe Reset and re - enumeration after a wake .
*/
int sdw_cdns_clock_restart ( struct sdw_cdns * cdns , bool bus_reset )
{
int ret ;
2020-03-17 19:33:19 +03:00
/* unmask Slave interrupts that were masked when stopping the clock */
cdns_enable_slave_interrupts ( cdns , true ) ;
2020-03-17 19:33:17 +03:00
ret = cdns_clear_bit ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_CLK_STOP_CLR ) ;
if ( ret < 0 ) {
dev_err ( cdns - > dev , " Couldn't exit from clock stop \n " ) ;
return ret ;
}
ret = cdns_set_wait ( cdns , CDNS_MCP_STAT , CDNS_MCP_STAT_CLK_STOP , 0 ) ;
if ( ret < 0 ) {
dev_err ( cdns - > dev , " clock stop exit failed %d \n " , ret ) ;
return ret ;
}
cdns_updatel ( cdns , CDNS_MCP_CONTROL ,
CDNS_MCP_CONTROL_BLOCK_WAKEUP , 0 ) ;
/*
* clear CMD_ACCEPT so that the command ignored
* will be treated as a failure during a broadcast write
*/
cdns_updatel ( cdns , CDNS_MCP_CONTROL , CDNS_MCP_CONTROL_CMD_ACCEPT , 0 ) ;
if ( ! bus_reset ) {
/* enable bus operations with clock and data */
cdns_updatel ( cdns , CDNS_MCP_CONFIG ,
CDNS_MCP_CONFIG_OP ,
CDNS_MCP_CONFIG_OP_NORMAL ) ;
ret = cdns_config_update ( cdns ) ;
if ( ret < 0 ) {
dev_err ( cdns - > dev , " %s: config_update failed \n " , __func__ ) ;
return ret ;
}
ret = sdw_bus_exit_clk_stop ( & cdns - > bus ) ;
if ( ret < 0 )
dev_err ( cdns - > dev , " bus failed to exit clock stop %d \n " , ret ) ;
}
return ret ;
}
EXPORT_SYMBOL ( sdw_cdns_clock_restart ) ;
2017-12-14 08:49:42 +03:00
/**
* sdw_cdns_probe ( ) - Cadence probe routine
* @ cdns : Cadence instance
*/
int sdw_cdns_probe ( struct sdw_cdns * cdns )
{
init_completion ( & cdns - > tx_complete ) ;
2018-04-26 16:08:48 +03:00
cdns - > bus . port_ops = & cdns_port_ops ;
2017-12-14 08:49:42 +03:00
2020-07-16 18:09:45 +03:00
INIT_WORK ( & cdns - > work , cdns_update_slave_status_work ) ;
2017-12-14 08:49:42 +03:00
return 0 ;
}
EXPORT_SYMBOL ( sdw_cdns_probe ) ;
2018-04-26 16:08:53 +03:00
int cdns_set_sdw_stream ( struct snd_soc_dai * dai ,
2019-05-01 18:57:41 +03:00
void * stream , bool pcm , int direction )
2018-04-26 16:08:53 +03:00
{
struct sdw_cdns * cdns = snd_soc_dai_get_drvdata ( dai ) ;
struct sdw_cdns_dma_data * dma ;
2020-06-30 21:43:55 +03:00
if ( stream ) {
/* first paranoia check */
if ( direction = = SNDRV_PCM_STREAM_PLAYBACK )
dma = dai - > playback_dma_data ;
else
dma = dai - > capture_dma_data ;
if ( dma ) {
dev_err ( dai - > dev ,
" dma_data already allocated for dai %s \n " ,
dai - > name ) ;
return - EINVAL ;
}
2018-04-26 16:08:53 +03:00
2020-06-30 21:43:55 +03:00
/* allocate and set dma info */
dma = kzalloc ( sizeof ( * dma ) , GFP_KERNEL ) ;
if ( ! dma )
return - ENOMEM ;
2018-04-26 16:08:53 +03:00
2020-06-30 21:43:55 +03:00
if ( pcm )
dma - > stream_type = SDW_STREAM_PCM ;
else
dma - > stream_type = SDW_STREAM_PDM ;
2018-04-26 16:08:53 +03:00
2020-06-30 21:43:55 +03:00
dma - > bus = & cdns - > bus ;
dma - > link_id = cdns - > instance ;
2018-04-26 16:08:53 +03:00
2020-06-30 21:43:55 +03:00
dma - > stream = stream ;
2018-04-26 16:08:53 +03:00
2020-06-30 21:43:55 +03:00
if ( direction = = SNDRV_PCM_STREAM_PLAYBACK )
dai - > playback_dma_data = dma ;
else
dai - > capture_dma_data = dma ;
} else {
/* for NULL stream we release allocated dma_data */
if ( direction = = SNDRV_PCM_STREAM_PLAYBACK ) {
kfree ( dai - > playback_dma_data ) ;
dai - > playback_dma_data = NULL ;
} else {
kfree ( dai - > capture_dma_data ) ;
dai - > capture_dma_data = NULL ;
}
}
2018-04-26 16:08:53 +03:00
return 0 ;
}
EXPORT_SYMBOL ( cdns_set_sdw_stream ) ;
/**
* cdns_find_pdi ( ) - Find a free PDI
*
* @ cdns : Cadence instance
2020-01-14 00:10:24 +03:00
* @ offset : Starting offset
2018-04-26 16:08:53 +03:00
* @ num : Number of PDIs
* @ pdi : PDI instances
2020-01-14 00:10:24 +03:00
* @ dai_id : DAI id
2018-04-26 16:08:53 +03:00
*
2019-09-16 22:23:48 +03:00
* Find a PDI for a given PDI array . The PDI num and dai_id are
* expected to match , return NULL otherwise .
2018-04-26 16:08:53 +03:00
*/
static struct sdw_cdns_pdi * cdns_find_pdi ( struct sdw_cdns * cdns ,
2019-09-16 22:23:47 +03:00
unsigned int offset ,
2019-05-01 18:57:41 +03:00
unsigned int num ,
2019-09-16 22:23:48 +03:00
struct sdw_cdns_pdi * pdi ,
int dai_id )
2018-04-26 16:08:53 +03:00
{
int i ;
2019-09-16 22:23:48 +03:00
for ( i = offset ; i < offset + num ; i + + )
if ( pdi [ i ] . num = = dai_id )
return & pdi [ i ] ;
2018-04-26 16:08:53 +03:00
return NULL ;
}
/**
* sdw_cdns_config_stream : Configure a stream
*
* @ cdns : Cadence instance
* @ ch : Channel count
* @ dir : Data direction
* @ pdi : PDI to be used
*/
void sdw_cdns_config_stream ( struct sdw_cdns * cdns ,
2019-05-01 18:57:41 +03:00
u32 ch , u32 dir , struct sdw_cdns_pdi * pdi )
2018-04-26 16:08:53 +03:00
{
u32 offset , val = 0 ;
2020-09-20 22:32:07 +03:00
if ( dir = = SDW_DATA_DIR_RX ) {
2018-04-26 16:08:53 +03:00
val = CDNS_PORTCTRL_DIRN ;
2020-09-20 22:32:07 +03:00
if ( cdns - > bus . params . m_data_mode ! = SDW_PORT_DATA_MODE_NORMAL )
val | = CDNS_PORTCTRL_TEST_FAILED ;
}
2019-09-16 22:23:46 +03:00
offset = CDNS_PORTCTRL + pdi - > num * CDNS_PORT_OFFSET ;
2020-09-20 22:32:07 +03:00
cdns_updatel ( cdns , offset ,
CDNS_PORTCTRL_DIRN | CDNS_PORTCTRL_TEST_FAILED ,
val ) ;
2018-04-26 16:08:53 +03:00
2019-09-16 22:23:46 +03:00
val = pdi - > num ;
2020-03-17 19:33:29 +03:00
val | = CDNS_PDI_CONFIG_SOFT_RESET ;
2020-09-03 14:45:01 +03:00
val | = FIELD_PREP ( CDNS_PDI_CONFIG_CHANNEL , ( 1 < < ch ) - 1 ) ;
2018-04-26 16:08:53 +03:00
cdns_writel ( cdns , CDNS_PDI_CONFIG ( pdi - > num ) , val ) ;
}
EXPORT_SYMBOL ( sdw_cdns_config_stream ) ;
/**
2019-09-16 22:23:46 +03:00
* sdw_cdns_alloc_pdi ( ) - Allocate a PDI
2018-04-26 16:08:53 +03:00
*
* @ cdns : Cadence instance
* @ stream : Stream to be allocated
* @ ch : Channel count
* @ dir : Data direction
2020-01-14 00:10:24 +03:00
* @ dai_id : DAI id
2018-04-26 16:08:53 +03:00
*/
2019-09-16 22:23:46 +03:00
struct sdw_cdns_pdi * sdw_cdns_alloc_pdi ( struct sdw_cdns * cdns ,
struct sdw_cdns_streams * stream ,
2019-09-16 22:23:48 +03:00
u32 ch , u32 dir , int dai_id )
2018-04-26 16:08:53 +03:00
{
struct sdw_cdns_pdi * pdi = NULL ;
if ( dir = = SDW_DATA_DIR_RX )
2019-09-16 22:23:48 +03:00
pdi = cdns_find_pdi ( cdns , 0 , stream - > num_in , stream - > in ,
dai_id ) ;
2018-04-26 16:08:53 +03:00
else
2019-09-16 22:23:48 +03:00
pdi = cdns_find_pdi ( cdns , 0 , stream - > num_out , stream - > out ,
dai_id ) ;
2018-04-26 16:08:53 +03:00
/* check if we found a PDI, else find in bi-directional */
if ( ! pdi )
2019-09-16 22:23:48 +03:00
pdi = cdns_find_pdi ( cdns , 2 , stream - > num_bd , stream - > bd ,
dai_id ) ;
2018-04-26 16:08:53 +03:00
2019-09-16 22:23:46 +03:00
if ( pdi ) {
pdi - > l_ch_num = 0 ;
pdi - > h_ch_num = ch - 1 ;
pdi - > dir = dir ;
pdi - > ch_count = ch ;
}
2018-04-26 16:08:53 +03:00
2019-09-16 22:23:46 +03:00
return pdi ;
2018-04-26 16:08:53 +03:00
}
2019-09-16 22:23:46 +03:00
EXPORT_SYMBOL ( sdw_cdns_alloc_pdi ) ;
2018-04-26 16:08:53 +03:00
2017-12-14 08:49:41 +03:00
MODULE_LICENSE ( " Dual BSD/GPL " ) ;
MODULE_DESCRIPTION ( " Cadence Soundwire Library " ) ;