2018-09-26 18:27:40 +03:00
// SPDX-License-Identifier: GPL-2.0
2010-04-26 21:13:05 +04:00
/*
2018-09-26 18:27:40 +03:00
* Driver for the Intel SCU IPC mechanism
2010-04-26 21:13:05 +04:00
*
2015-01-21 22:38:09 +03:00
* ( C ) Copyright 2008 - 2010 , 2015 Intel Corporation
2010-04-26 21:13:05 +04:00
* Author : Sreedhara DS ( sreedhara . ds @ intel . com )
*
2011-03-17 23:18:22 +03:00
* SCU running in ARC processor communicates with other entity running in IA
2010-04-26 21:13:05 +04:00
* core through IPC mechanism which in turn messaging between IA core ad SCU .
* SCU has two IPC mechanism IPC - 1 and IPC - 2. IPC - 1 is used between IA32 and
* SCU where IPC - 2 is used between P - Unit and SCU . This driver delas with
* IPC - 1 Driver provides an API for power control unit registers ( e . g . MSIC )
* along with other APIs .
*/
2018-09-26 18:27:14 +03:00
2010-04-26 21:13:05 +04:00
# include <linux/delay.h>
2018-09-26 18:27:14 +03:00
# include <linux/device.h>
2010-04-26 21:13:05 +04:00
# include <linux/errno.h>
# include <linux/init.h>
# include <linux/interrupt.h>
2020-04-16 11:15:33 +03:00
# include <linux/io.h>
# include <linux/module.h>
# include <linux/slab.h>
2018-09-26 18:27:14 +03:00
2010-04-26 21:13:05 +04:00
# include <asm/intel_scu_ipc.h>
/* IPC defines the following message types */
2020-01-22 19:28:10 +03:00
# define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */
2010-04-26 21:13:05 +04:00
/* Command id associated with message IPCMSG_PCNTRL */
# define IPC_CMD_PCNTRL_W 0 /* Register write */
# define IPC_CMD_PCNTRL_R 1 /* Register read */
# define IPC_CMD_PCNTRL_M 2 /* Register read-modify-write */
/*
* IPC register summary
*
2015-01-21 22:38:11 +03:00
* IPC register blocks are memory mapped at fixed address of PCI BAR 0.
2010-04-26 21:13:05 +04:00
* To read or write information to the SCU , driver writes to IPC - 1 memory
2015-01-21 22:38:11 +03:00
* mapped registers . The following is the IPC mechanism
2010-04-26 21:13:05 +04:00
*
* 1. IA core cDMI interface claims this transaction and converts it to a
* Transaction Layer Packet ( TLP ) message which is sent across the cDMI .
*
* 2. South Complex cDMI block receives this message and writes it to
* the IPC - 1 register block , causing an interrupt to the SCU
*
* 3. SCU firmware decodes this interrupt and IPC message and the appropriate
* message handler is called within firmware .
*/
2010-07-26 13:04:24 +04:00
# define IPC_WWBUF_SIZE 20 /* IPC Write buffer Size */
# define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */
2013-11-16 04:21:54 +04:00
# define IPC_IOC 0x100 /* IPC command register IOC bit */
2013-11-15 02:15:04 +04:00
2010-04-26 21:13:05 +04:00
struct intel_scu_ipc_dev {
2020-04-16 11:15:33 +03:00
struct device dev ;
struct resource mem ;
2020-04-16 11:15:36 +03:00
struct module * owner ;
2020-04-16 11:15:33 +03:00
int irq ;
2010-04-26 21:13:05 +04:00
void __iomem * ipc_base ;
2013-11-16 04:21:54 +04:00
struct completion cmd_complete ;
2010-04-26 21:13:05 +04:00
} ;
2020-01-22 19:28:04 +03:00
# define IPC_STATUS 0x04
# define IPC_STATUS_IRQ BIT(2)
2020-01-22 19:28:05 +03:00
# define IPC_STATUS_ERR BIT(1)
# define IPC_STATUS_BUSY BIT(0)
2020-01-22 19:28:04 +03:00
2010-04-26 21:13:05 +04:00
/*
2020-01-22 19:28:05 +03:00
* IPC Write / Read Buffers :
* 16 byte buffer for sending and receiving data to and from SCU .
2010-04-26 21:13:05 +04:00
*/
2020-01-22 19:28:05 +03:00
# define IPC_WRITE_BUFFER 0x80
2010-04-26 21:13:05 +04:00
# define IPC_READ_BUFFER 0x90
2020-01-22 19:28:08 +03:00
/* Timeout in jiffies */
2021-09-28 13:19:32 +03:00
# define IPC_TIMEOUT (10 * HZ)
2020-01-22 19:28:08 +03:00
2020-04-16 11:15:33 +03:00
static struct intel_scu_ipc_dev * ipcdev ; /* Only one for now */
2010-04-26 21:13:05 +04:00
static DEFINE_MUTEX ( ipclock ) ; /* lock used to prevent multiple call to SCU */
2020-04-16 11:15:33 +03:00
static struct class intel_scu_ipc_class = {
. name = " intel_scu_ipc " ,
. owner = THIS_MODULE ,
} ;
2020-04-16 11:15:36 +03:00
/**
* intel_scu_ipc_dev_get ( ) - Get SCU IPC instance
*
* The recommended new API takes SCU IPC instance as parameter and this
* function can be called by driver to get the instance . This also makes
* sure the driver providing the IPC functionality cannot be unloaded
* while the caller has the instance .
*
* Call intel_scu_ipc_dev_put ( ) to release the instance .
*
* Returns % NULL if SCU IPC is not currently available .
*/
struct intel_scu_ipc_dev * intel_scu_ipc_dev_get ( void )
{
struct intel_scu_ipc_dev * scu = NULL ;
mutex_lock ( & ipclock ) ;
if ( ipcdev ) {
get_device ( & ipcdev - > dev ) ;
/*
* Prevent the IPC provider from being unloaded while it
* is being used .
*/
if ( ! try_module_get ( ipcdev - > owner ) )
put_device ( & ipcdev - > dev ) ;
else
scu = ipcdev ;
}
mutex_unlock ( & ipclock ) ;
return scu ;
}
EXPORT_SYMBOL_GPL ( intel_scu_ipc_dev_get ) ;
/**
* intel_scu_ipc_dev_put ( ) - Put SCU IPC instance
* @ scu : SCU IPC instance
*
* This function releases the SCU IPC instance retrieved from
* intel_scu_ipc_dev_get ( ) and allows the driver providing IPC to be
* unloaded .
*/
void intel_scu_ipc_dev_put ( struct intel_scu_ipc_dev * scu )
{
if ( scu ) {
module_put ( scu - > owner ) ;
put_device ( & scu - > dev ) ;
}
}
EXPORT_SYMBOL_GPL ( intel_scu_ipc_dev_put ) ;
struct intel_scu_ipc_devres {
struct intel_scu_ipc_dev * scu ;
} ;
static void devm_intel_scu_ipc_dev_release ( struct device * dev , void * res )
{
struct intel_scu_ipc_devres * dr = res ;
struct intel_scu_ipc_dev * scu = dr - > scu ;
intel_scu_ipc_dev_put ( scu ) ;
}
/**
* devm_intel_scu_ipc_dev_get ( ) - Allocate managed SCU IPC device
* @ dev : Device requesting the SCU IPC device
*
* The recommended new API takes SCU IPC instance as parameter and this
* function can be called by driver to get the instance . This also makes
* sure the driver providing the IPC functionality cannot be unloaded
* while the caller has the instance .
*
* Returns % NULL if SCU IPC is not currently available .
*/
struct intel_scu_ipc_dev * devm_intel_scu_ipc_dev_get ( struct device * dev )
{
struct intel_scu_ipc_devres * dr ;
struct intel_scu_ipc_dev * scu ;
dr = devres_alloc ( devm_intel_scu_ipc_dev_release , sizeof ( * dr ) , GFP_KERNEL ) ;
if ( ! dr )
return NULL ;
scu = intel_scu_ipc_dev_get ( ) ;
if ( ! scu ) {
devres_free ( dr ) ;
return NULL ;
}
dr - > scu = scu ;
devres_add ( dev , dr ) ;
return scu ;
}
EXPORT_SYMBOL_GPL ( devm_intel_scu_ipc_dev_get ) ;
2010-04-26 21:13:05 +04:00
/*
2015-10-12 14:19:45 +03:00
* Send ipc command
2010-04-26 21:13:05 +04:00
* Command Register ( Write Only ) :
* A write to this register results in an interrupt to the SCU core processor
* Format :
* | rfu2 ( 8 ) | size ( 8 ) | command id ( 4 ) | rfu1 ( 3 ) | ioc ( 1 ) | command ( 8 ) |
*/
2015-10-12 14:19:45 +03:00
static inline void ipc_command ( struct intel_scu_ipc_dev * scu , u32 cmd )
2010-04-26 21:13:05 +04:00
{
2020-01-22 19:28:04 +03:00
reinit_completion ( & scu - > cmd_complete ) ;
writel ( cmd | IPC_IOC , scu - > ipc_base ) ;
2010-04-26 21:13:05 +04:00
}
/*
2015-10-12 14:19:45 +03:00
* Write ipc data
2010-04-26 21:13:05 +04:00
* IPC Write Buffer ( Write Only ) :
* 16 - byte buffer for sending data associated with IPC command to
* SCU . Size of the data is specified in the IPC_COMMAND_REG register
*/
2015-10-12 14:19:45 +03:00
static inline void ipc_data_writel ( struct intel_scu_ipc_dev * scu , u32 data , u32 offset )
2010-04-26 21:13:05 +04:00
{
2020-01-22 19:28:05 +03:00
writel ( data , scu - > ipc_base + IPC_WRITE_BUFFER + offset ) ;
2010-04-26 21:13:05 +04:00
}
/*
* Status Register ( Read Only ) :
* Driver will read this register to get the ready / busy status of the IPC
* block and error status of the IPC command that was just processed by SCU
* Format :
* | rfu3 ( 8 ) | error code ( 8 ) | initiator id ( 8 ) | cmd id ( 4 ) | rfu1 ( 2 ) | error ( 1 ) | busy ( 1 ) |
*/
2015-10-12 14:19:45 +03:00
static inline u8 ipc_read_status ( struct intel_scu_ipc_dev * scu )
2010-04-26 21:13:05 +04:00
{
2020-01-22 19:28:05 +03:00
return __raw_readl ( scu - > ipc_base + IPC_STATUS ) ;
2010-04-26 21:13:05 +04:00
}
2015-10-12 14:19:45 +03:00
/* Read ipc byte data */
static inline u8 ipc_data_readb ( struct intel_scu_ipc_dev * scu , u32 offset )
2010-04-26 21:13:05 +04:00
{
2015-10-12 14:19:45 +03:00
return readb ( scu - > ipc_base + IPC_READ_BUFFER + offset ) ;
2010-04-26 21:13:05 +04:00
}
2015-10-12 14:19:45 +03:00
/* Read ipc u32 data */
static inline u32 ipc_data_readl ( struct intel_scu_ipc_dev * scu , u32 offset )
2010-04-26 21:13:05 +04:00
{
2015-10-12 14:19:45 +03:00
return readl ( scu - > ipc_base + IPC_READ_BUFFER + offset ) ;
2010-04-26 21:13:05 +04:00
}
2015-01-21 22:38:09 +03:00
/* Wait till scu status is busy */
2015-10-12 14:19:45 +03:00
static inline int busy_loop ( struct intel_scu_ipc_dev * scu )
2010-04-26 21:13:05 +04:00
{
2021-09-28 13:19:30 +03:00
unsigned long end = jiffies + IPC_TIMEOUT ;
2010-04-26 21:13:05 +04:00
2020-01-22 19:28:08 +03:00
do {
u32 status ;
2015-01-21 22:38:10 +03:00
2020-01-22 19:28:08 +03:00
status = ipc_read_status ( scu ) ;
if ( ! ( status & IPC_STATUS_BUSY ) )
return ( status & IPC_STATUS_ERR ) ? - EIO : 0 ;
2015-01-21 22:38:10 +03:00
2020-01-22 19:28:08 +03:00
usleep_range ( 50 , 100 ) ;
} while ( time_before ( jiffies , end ) ) ;
2010-07-26 13:06:12 +04:00
2020-01-22 19:28:08 +03:00
return - ETIMEDOUT ;
2010-04-26 21:13:05 +04:00
}
2021-09-28 13:19:34 +03:00
/* Wait till ipc ioc interrupt is received or timeout in 10 HZ */
2015-10-12 14:19:45 +03:00
static inline int ipc_wait_for_interrupt ( struct intel_scu_ipc_dev * scu )
2013-11-16 04:21:54 +04:00
{
int status ;
2020-04-16 11:15:34 +03:00
if ( ! wait_for_completion_timeout ( & scu - > cmd_complete , IPC_TIMEOUT ) )
2013-11-16 04:21:54 +04:00
return - ETIMEDOUT ;
2015-10-12 14:19:45 +03:00
status = ipc_read_status ( scu ) ;
2020-01-22 19:28:05 +03:00
if ( status & IPC_STATUS_ERR )
2013-11-16 04:21:54 +04:00
return - EIO ;
return 0 ;
}
2015-10-12 14:19:45 +03:00
static int intel_scu_ipc_check_status ( struct intel_scu_ipc_dev * scu )
2013-11-16 04:21:54 +04:00
{
2020-04-16 11:15:33 +03:00
return scu - > irq > 0 ? ipc_wait_for_interrupt ( scu ) : busy_loop ( scu ) ;
2013-11-16 04:21:54 +04:00
}
2010-04-26 21:13:05 +04:00
/* Read/Write power control(PMIC in Langwell, MSIC in PenWell) registers */
2020-04-16 11:15:36 +03:00
static int pwr_reg_rdwr ( struct intel_scu_ipc_dev * scu , u16 * addr , u8 * data ,
u32 count , u32 op , u32 id )
2010-04-26 21:13:05 +04:00
{
2012-03-06 03:01:02 +04:00
int nc ;
2010-04-26 21:13:05 +04:00
u32 offset = 0 ;
2011-01-25 17:12:12 +03:00
int err ;
2015-07-13 17:44:54 +03:00
u8 cbuf [ IPC_WWBUF_SIZE ] ;
2010-04-26 21:13:05 +04:00
u32 * wbuf = ( u32 * ) & cbuf ;
2010-07-26 13:04:37 +04:00
memset ( cbuf , 0 , sizeof ( cbuf ) ) ;
2015-07-13 17:44:54 +03:00
mutex_lock ( & ipclock ) ;
2020-04-16 11:15:36 +03:00
if ( ! scu )
scu = ipcdev ;
if ( ! scu ) {
2010-04-26 21:13:05 +04:00
mutex_unlock ( & ipclock ) ;
return - ENODEV ;
}
2012-03-06 03:01:02 +04:00
for ( nc = 0 ; nc < count ; nc + + , offset + = 2 ) {
cbuf [ offset ] = addr [ nc ] ;
cbuf [ offset + 1 ] = addr [ nc ] > > 8 ;
}
2010-04-26 21:13:05 +04:00
2012-03-06 03:01:02 +04:00
if ( id = = IPC_CMD_PCNTRL_R ) {
for ( nc = 0 , offset = 0 ; nc < count ; nc + + , offset + = 4 )
2015-10-12 14:19:45 +03:00
ipc_data_writel ( scu , wbuf [ nc ] , offset ) ;
ipc_command ( scu , ( count * 2 ) < < 16 | id < < 12 | 0 < < 8 | op ) ;
2012-03-06 03:01:02 +04:00
} else if ( id = = IPC_CMD_PCNTRL_W ) {
for ( nc = 0 ; nc < count ; nc + + , offset + = 1 )
cbuf [ offset ] = data [ nc ] ;
for ( nc = 0 , offset = 0 ; nc < count ; nc + + , offset + = 4 )
2015-10-12 14:19:45 +03:00
ipc_data_writel ( scu , wbuf [ nc ] , offset ) ;
ipc_command ( scu , ( count * 3 ) < < 16 | id < < 12 | 0 < < 8 | op ) ;
2012-03-06 03:01:02 +04:00
} else if ( id = = IPC_CMD_PCNTRL_M ) {
cbuf [ offset ] = data [ 0 ] ;
cbuf [ offset + 1 ] = data [ 1 ] ;
2015-10-12 14:19:45 +03:00
ipc_data_writel ( scu , wbuf [ 0 ] , 0 ) ; /* Write wbuff */
ipc_command ( scu , 4 < < 16 | id < < 12 | 0 < < 8 | op ) ;
2010-07-26 13:02:46 +04:00
}
2010-04-26 21:13:05 +04:00
2015-10-12 14:19:45 +03:00
err = intel_scu_ipc_check_status ( scu ) ;
2013-11-15 02:15:06 +04:00
if ( ! err & & id = = IPC_CMD_PCNTRL_R ) { /* Read rbuf */
2010-04-26 21:13:05 +04:00
/* Workaround: values are read as 0 without memcpy_fromio */
2015-10-12 14:19:45 +03:00
memcpy_fromio ( cbuf , scu - > ipc_base + 0x90 , 16 ) ;
2012-03-06 03:01:02 +04:00
for ( nc = 0 ; nc < count ; nc + + )
2015-10-12 14:19:45 +03:00
data [ nc ] = ipc_data_readb ( scu , nc ) ;
2010-04-26 21:13:05 +04:00
}
mutex_unlock ( & ipclock ) ;
return err ;
}
/**
2020-04-16 11:15:36 +03:00
* intel_scu_ipc_dev_ioread8 ( ) - Read a byte via the SCU
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ addr : Register on SCU
* @ data : Return pointer for read byte
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Read a single register . Returns % 0 on success or an error code . All
* locking between SCU accesses is handled for the caller .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* This function may sleep .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_ioread8 ( struct intel_scu_ipc_dev * scu , u16 addr , u8 * data )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:36 +03:00
return pwr_reg_rdwr ( scu , & addr , data , 1 , IPCMSG_PCNTRL , IPC_CMD_PCNTRL_R ) ;
2010-04-26 21:13:05 +04:00
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_ioread8 ) ;
2010-04-26 21:13:05 +04:00
/**
2020-04-16 11:15:36 +03:00
* intel_scu_ipc_dev_iowrite8 ( ) - Write a byte via the SCU
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ addr : Register on SCU
* @ data : Byte to write
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Write a single register . Returns % 0 on success or an error code . All
* locking between SCU accesses is handled for the caller .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* This function may sleep .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_iowrite8 ( struct intel_scu_ipc_dev * scu , u16 addr , u8 data )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:36 +03:00
return pwr_reg_rdwr ( scu , & addr , & data , 1 , IPCMSG_PCNTRL , IPC_CMD_PCNTRL_W ) ;
2010-04-26 21:13:05 +04:00
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_iowrite8 ) ;
2010-04-26 21:13:05 +04:00
/**
2020-04-16 11:15:36 +03:00
* intel_scu_ipc_dev_readv ( ) - Read a set of registers
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ addr : Register list
* @ data : Bytes to return
* @ len : Length of array
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Read registers . Returns % 0 on success or an error code . All locking
* between SCU accesses is handled for the caller .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* The largest array length permitted by the hardware is 5 items .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* This function may sleep .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_readv ( struct intel_scu_ipc_dev * scu , u16 * addr , u8 * data ,
size_t len )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:36 +03:00
return pwr_reg_rdwr ( scu , addr , data , len , IPCMSG_PCNTRL , IPC_CMD_PCNTRL_R ) ;
2010-04-26 21:13:05 +04:00
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_readv ) ;
2010-04-26 21:13:05 +04:00
/**
2020-04-16 11:15:36 +03:00
* intel_scu_ipc_dev_writev ( ) - Write a set of registers
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ addr : Register list
* @ data : Bytes to write
* @ len : Length of array
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Write registers . Returns % 0 on success or an error code . All locking
* between SCU accesses is handled for the caller .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* The largest array length permitted by the hardware is 5 items .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* This function may sleep .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_writev ( struct intel_scu_ipc_dev * scu , u16 * addr , u8 * data ,
size_t len )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:36 +03:00
return pwr_reg_rdwr ( scu , addr , data , len , IPCMSG_PCNTRL , IPC_CMD_PCNTRL_W ) ;
2010-04-26 21:13:05 +04:00
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_writev ) ;
2010-04-26 21:13:05 +04:00
/**
2020-04-16 11:15:36 +03:00
* intel_scu_ipc_dev_update ( ) - Update a register
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ addr : Register address
2020-04-16 11:15:36 +03:00
* @ data : Bits to update
2020-01-22 19:28:13 +03:00
* @ mask : Mask of bits to update
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Read - modify - write power control unit register . The first data argument
* must be register value and second is mask value mask is a bitmap that
* indicates which bits to update . % 0 = masked . Don ' t modify this bit , % 1 =
* modify this bit . returns % 0 on success or an error code .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* This function may sleep . Locking between SCU accesses is handled
* for the caller .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_update ( struct intel_scu_ipc_dev * scu , u16 addr , u8 data ,
u8 mask )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:36 +03:00
u8 tmp [ 2 ] = { data , mask } ;
return pwr_reg_rdwr ( scu , & addr , tmp , 1 , IPCMSG_PCNTRL , IPC_CMD_PCNTRL_M ) ;
2010-04-26 21:13:05 +04:00
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_update ) ;
2010-04-26 21:13:05 +04:00
/**
2020-04-16 11:15:36 +03:00
* intel_scu_ipc_dev_simple_command ( ) - Send a simple command
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ cmd : Command
* @ sub : Sub type
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Issue a simple command to the SCU . Do not use this interface if you must
* then access data as any data values may be overwritten by another SCU
* access by the time this function returns .
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* This function may sleep . Locking for SCU accesses is handled for the
* caller .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_simple_command ( struct intel_scu_ipc_dev * scu , int cmd ,
int sub )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:34 +03:00
u32 cmdval ;
2011-01-25 17:12:12 +03:00
int err ;
2010-04-26 21:13:05 +04:00
mutex_lock ( & ipclock ) ;
2020-04-16 11:15:36 +03:00
if ( ! scu )
scu = ipcdev ;
if ( ! scu ) {
2010-04-26 21:13:05 +04:00
mutex_unlock ( & ipclock ) ;
return - ENODEV ;
}
2020-04-16 11:15:33 +03:00
scu = ipcdev ;
2020-04-16 11:15:34 +03:00
cmdval = sub < < 12 | cmd ;
ipc_command ( scu , cmdval ) ;
2015-10-12 14:19:45 +03:00
err = intel_scu_ipc_check_status ( scu ) ;
2010-04-26 21:13:05 +04:00
mutex_unlock ( & ipclock ) ;
2020-04-16 11:15:34 +03:00
if ( err )
dev_err ( & scu - > dev , " IPC command %#x failed with %d \n " , cmdval , err ) ;
2010-04-26 21:13:05 +04:00
return err ;
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_simple_command ) ;
2010-04-26 21:13:05 +04:00
/**
2021-08-20 14:04:39 +03:00
* intel_scu_ipc_dev_command_with_size ( ) - Command with data
2020-04-16 11:15:36 +03:00
* @ scu : Optional SCU IPC instance
2020-01-22 19:28:13 +03:00
* @ cmd : Command
* @ sub : Sub type
* @ in : Input data
2020-04-16 11:15:36 +03:00
* @ inlen : Input length in bytes
* @ size : Input size written to the IPC command register in whatever
* units ( dword , byte ) the particular firmware requires . Normally
* should be the same as @ inlen .
2020-01-22 19:28:13 +03:00
* @ out : Output data
2020-04-16 11:15:36 +03:00
* @ outlen : Output length in bytes
2010-04-26 21:13:05 +04:00
*
2020-01-22 19:28:13 +03:00
* Issue a command to the SCU which involves data transfers . Do the
* data copies under the lock but leave it for the caller to interpret .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:36 +03:00
int intel_scu_ipc_dev_command_with_size ( struct intel_scu_ipc_dev * scu , int cmd ,
int sub , const void * in , size_t inlen ,
size_t size , void * out , size_t outlen )
2010-04-26 21:13:05 +04:00
{
2020-04-16 11:15:36 +03:00
size_t outbuflen = DIV_ROUND_UP ( outlen , sizeof ( u32 ) ) ;
size_t inbuflen = DIV_ROUND_UP ( inlen , sizeof ( u32 ) ) ;
u32 cmdval , inbuf [ 4 ] = { } ;
2011-01-25 17:12:12 +03:00
int i , err ;
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:36 +03:00
if ( inbuflen > 4 | | outbuflen > 4 )
return - EINVAL ;
2010-04-26 21:13:05 +04:00
mutex_lock ( & ipclock ) ;
2020-04-16 11:15:36 +03:00
if ( ! scu )
scu = ipcdev ;
if ( ! scu ) {
2010-04-26 21:13:05 +04:00
mutex_unlock ( & ipclock ) ;
return - ENODEV ;
}
2020-04-16 11:15:36 +03:00
memcpy ( inbuf , in , inlen ) ;
for ( i = 0 ; i < inbuflen ; i + + )
ipc_data_writel ( scu , inbuf [ i ] , 4 * i ) ;
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:36 +03:00
cmdval = ( size < < 16 ) | ( sub < < 12 ) | cmd ;
2020-04-16 11:15:34 +03:00
ipc_command ( scu , cmdval ) ;
2015-10-12 14:19:45 +03:00
err = intel_scu_ipc_check_status ( scu ) ;
2010-04-26 21:13:05 +04:00
2013-11-15 02:15:06 +04:00
if ( ! err ) {
2020-04-16 11:15:36 +03:00
u32 outbuf [ 4 ] = { } ;
for ( i = 0 ; i < outbuflen ; i + + )
outbuf [ i ] = ipc_data_readl ( scu , 4 * i ) ;
memcpy ( out , outbuf , outlen ) ;
2013-11-15 02:15:06 +04:00
}
2010-04-26 21:13:05 +04:00
mutex_unlock ( & ipclock ) ;
2020-04-16 11:15:34 +03:00
if ( err )
dev_err ( & scu - > dev , " IPC command %#x failed with %d \n " , cmdval , err ) ;
2010-04-26 21:13:05 +04:00
return err ;
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL ( intel_scu_ipc_dev_command_with_size ) ;
2010-04-26 21:13:05 +04:00
/*
* Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1
* When ioc bit is set to 1 , caller api must wait for interrupt handler called
* which in turn unlocks the caller api . Currently this is not used
*
* This is edge triggered so we need take no action to clear anything
*/
static irqreturn_t ioc ( int irq , void * dev_id )
{
2015-10-12 14:19:45 +03:00
struct intel_scu_ipc_dev * scu = dev_id ;
2020-01-22 19:28:04 +03:00
int status = ipc_read_status ( scu ) ;
2015-10-12 14:19:45 +03:00
2020-01-22 19:28:04 +03:00
writel ( status | IPC_STATUS_IRQ , scu - > ipc_base + IPC_STATUS ) ;
complete ( & scu - > cmd_complete ) ;
2013-11-16 04:21:54 +04:00
2010-04-26 21:13:05 +04:00
return IRQ_HANDLED ;
}
2020-04-16 11:15:33 +03:00
static void intel_scu_ipc_release ( struct device * dev )
{
struct intel_scu_ipc_dev * scu ;
scu = container_of ( dev , struct intel_scu_ipc_dev , dev ) ;
if ( scu - > irq > 0 )
free_irq ( scu - > irq , scu ) ;
iounmap ( scu - > ipc_base ) ;
release_mem_region ( scu - > mem . start , resource_size ( & scu - > mem ) ) ;
kfree ( scu ) ;
}
2010-04-26 21:13:05 +04:00
/**
2020-04-16 11:15:36 +03:00
* __intel_scu_ipc_register ( ) - Register SCU IPC device
2020-04-16 11:15:33 +03:00
* @ parent : Parent device
* @ scu_data : Data used to configure SCU IPC
2020-04-16 11:15:36 +03:00
* @ owner : Module registering the SCU IPC device
2010-04-26 21:13:05 +04:00
*
2020-04-16 11:15:33 +03:00
* Call this function to register SCU IPC mechanism under @ parent .
* Returns pointer to the new SCU IPC device or ERR_PTR ( ) in case of
2020-04-16 11:15:36 +03:00
* failure . The caller may use the returned instance if it needs to do
* SCU IPC calls itself .
2010-04-26 21:13:05 +04:00
*/
2020-04-16 11:15:33 +03:00
struct intel_scu_ipc_dev *
2020-04-16 11:15:36 +03:00
__intel_scu_ipc_register ( struct device * parent ,
const struct intel_scu_ipc_data * scu_data ,
struct module * owner )
2010-04-26 21:13:05 +04:00
{
2013-12-03 04:20:00 +04:00
int err ;
2020-04-16 11:15:33 +03:00
struct intel_scu_ipc_dev * scu ;
void __iomem * ipc_base ;
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:33 +03:00
mutex_lock ( & ipclock ) ;
/* We support only one IPC */
if ( ipcdev ) {
err = - EBUSY ;
goto err_unlock ;
}
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:33 +03:00
scu = kzalloc ( sizeof ( * scu ) , GFP_KERNEL ) ;
if ( ! scu ) {
err = - ENOMEM ;
goto err_unlock ;
}
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:36 +03:00
scu - > owner = owner ;
2020-04-16 11:15:33 +03:00
scu - > dev . parent = parent ;
scu - > dev . class = & intel_scu_ipc_class ;
scu - > dev . release = intel_scu_ipc_release ;
dev_set_name ( & scu - > dev , " intel_scu_ipc " ) ;
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:33 +03:00
if ( ! request_mem_region ( scu_data - > mem . start , resource_size ( & scu_data - > mem ) ,
" intel_scu_ipc " ) ) {
err = - EBUSY ;
goto err_free ;
}
ipc_base = ioremap ( scu_data - > mem . start , resource_size ( & scu_data - > mem ) ) ;
if ( ! ipc_base ) {
err = - ENOMEM ;
goto err_release ;
}
scu - > ipc_base = ipc_base ;
scu - > mem = scu_data - > mem ;
scu - > irq = scu_data - > irq ;
2015-10-12 14:19:45 +03:00
init_completion ( & scu - > cmd_complete ) ;
2013-11-16 04:21:54 +04:00
2020-04-16 11:15:33 +03:00
if ( scu - > irq > 0 ) {
err = request_irq ( scu - > irq , ioc , 0 , " intel_scu_ipc " , scu ) ;
if ( err )
goto err_unmap ;
}
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:33 +03:00
/*
* After this point intel_scu_ipc_release ( ) takes care of
* releasing the SCU IPC resources once refcount drops to zero .
*/
err = device_register ( & scu - > dev ) ;
if ( err ) {
put_device ( & scu - > dev ) ;
goto err_unlock ;
}
2017-04-05 19:05:25 +03:00
/* Assign device at last */
2020-04-16 11:15:33 +03:00
ipcdev = scu ;
mutex_unlock ( & ipclock ) ;
2017-04-05 19:05:25 +03:00
2020-04-16 11:15:33 +03:00
return scu ;
2010-11-09 14:22:58 +03:00
2020-04-16 11:15:33 +03:00
err_unmap :
iounmap ( ipc_base ) ;
err_release :
release_mem_region ( scu_data - > mem . start , resource_size ( & scu_data - > mem ) ) ;
err_free :
kfree ( scu ) ;
err_unlock :
mutex_unlock ( & ipclock ) ;
return ERR_PTR ( err ) ;
2010-04-26 21:13:05 +04:00
}
2020-04-16 11:15:36 +03:00
EXPORT_SYMBOL_GPL ( __intel_scu_ipc_register ) ;
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:40 +03:00
/**
* intel_scu_ipc_unregister ( ) - Unregister SCU IPC
* @ scu : SCU IPC handle
*
* This unregisters the SCU IPC device and releases the acquired
* resources once the refcount goes to zero .
*/
void intel_scu_ipc_unregister ( struct intel_scu_ipc_dev * scu )
{
mutex_lock ( & ipclock ) ;
if ( ! WARN_ON ( ! ipcdev ) ) {
ipcdev = NULL ;
device_unregister ( & scu - > dev ) ;
}
mutex_unlock ( & ipclock ) ;
}
EXPORT_SYMBOL_GPL ( intel_scu_ipc_unregister ) ;
static void devm_intel_scu_ipc_unregister ( struct device * dev , void * res )
{
struct intel_scu_ipc_devres * dr = res ;
struct intel_scu_ipc_dev * scu = dr - > scu ;
intel_scu_ipc_unregister ( scu ) ;
}
/**
* __devm_intel_scu_ipc_register ( ) - Register managed SCU IPC device
* @ parent : Parent device
* @ scu_data : Data used to configure SCU IPC
* @ owner : Module registering the SCU IPC device
*
* Call this function to register managed SCU IPC mechanism under
* @ parent . Returns pointer to the new SCU IPC device or ERR_PTR ( ) in
* case of failure . The caller may use the returned instance if it needs
* to do SCU IPC calls itself .
*/
struct intel_scu_ipc_dev *
__devm_intel_scu_ipc_register ( struct device * parent ,
const struct intel_scu_ipc_data * scu_data ,
struct module * owner )
{
struct intel_scu_ipc_devres * dr ;
struct intel_scu_ipc_dev * scu ;
dr = devres_alloc ( devm_intel_scu_ipc_unregister , sizeof ( * dr ) , GFP_KERNEL ) ;
if ( ! dr )
return NULL ;
scu = __intel_scu_ipc_register ( parent , scu_data , owner ) ;
if ( IS_ERR ( scu ) ) {
devres_free ( dr ) ;
return scu ;
}
dr - > scu = scu ;
devres_add ( parent , dr ) ;
return scu ;
}
EXPORT_SYMBOL_GPL ( __devm_intel_scu_ipc_register ) ;
2020-04-16 11:15:33 +03:00
static int __init intel_scu_ipc_init ( void )
{
return class_register ( & intel_scu_ipc_class ) ;
}
subsys_initcall ( intel_scu_ipc_init ) ;
2010-04-26 21:13:05 +04:00
2020-04-16 11:15:33 +03:00
static void __exit intel_scu_ipc_exit ( void )
{
class_unregister ( & intel_scu_ipc_class ) ;
}
module_exit ( intel_scu_ipc_exit ) ;