2019-06-04 10:11:23 +02:00
// SPDX-License-Identifier: GPL-2.0-only
2015-07-09 21:38:55 +05:30
/*
* skl - sst - dsp . c - SKL SST library generic function
*
* Copyright ( C ) 2014 - 15 , Intel Corporation .
* Author : Rafal Redzimski < rafal . f . redzimski @ intel . com >
* Jeeja KP < jeeja . kp @ intel . com >
* ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
*/
# include <sound/pcm.h>
# include "../common/sst-dsp.h"
# include "../common/sst-ipc.h"
# include "../common/sst-dsp-priv.h"
2019-07-23 16:58:48 +02:00
# include "skl.h"
2015-07-09 21:38:55 +05:30
/* various timeout values */
# define SKL_DSP_PU_TO 50
# define SKL_DSP_PD_TO 50
# define SKL_DSP_RESET_TO 50
void skl_dsp_set_state_locked ( struct sst_dsp * ctx , int state )
{
mutex_lock ( & ctx - > mutex ) ;
ctx - > sst_state = state ;
mutex_unlock ( & ctx - > mutex ) ;
}
2016-06-21 10:17:41 +05:30
/*
* Initialize core power state and usage count . To be called after
* successful first boot . Hence core 0 will be running and other cores
* will be reset
*/
void skl_dsp_init_core_state ( struct sst_dsp * ctx )
{
2019-07-23 16:58:48 +02:00
struct skl_dev * skl = ctx - > thread_context ;
2016-06-21 10:17:41 +05:30
int i ;
skl - > cores . state [ SKL_DSP_CORE0_ID ] = SKL_DSP_RUNNING ;
skl - > cores . usage_count [ SKL_DSP_CORE0_ID ] = 1 ;
2017-08-02 21:51:13 +05:30
for ( i = SKL_DSP_CORE0_ID + 1 ; i < skl - > cores . count ; i + + ) {
2016-06-21 10:17:41 +05:30
skl - > cores . state [ i ] = SKL_DSP_RESET ;
skl - > cores . usage_count [ i ] = 0 ;
}
}
/* Get the mask for all enabled cores */
unsigned int skl_dsp_get_enabled_cores ( struct sst_dsp * ctx )
{
2019-07-23 16:58:48 +02:00
struct skl_dev * skl = ctx - > thread_context ;
2016-06-21 10:17:41 +05:30
unsigned int core_mask , en_cores_mask ;
u32 val ;
core_mask = SKL_DSP_CORES_MASK ( skl - > cores . count ) ;
val = sst_dsp_shim_read_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ) ;
/* Cores having CPA bit set */
en_cores_mask = ( val & SKL_ADSPCS_CPA_MASK ( core_mask ) ) > >
SKL_ADSPCS_CPA_SHIFT ;
/* And cores having CRST bit cleared */
en_cores_mask & = ( ~ val & SKL_ADSPCS_CRST_MASK ( core_mask ) ) > >
SKL_ADSPCS_CRST_SHIFT ;
/* And cores having CSTALL bit cleared */
en_cores_mask & = ( ~ val & SKL_ADSPCS_CSTALL_MASK ( core_mask ) ) > >
SKL_ADSPCS_CSTALL_SHIFT ;
en_cores_mask & = core_mask ;
dev_dbg ( ctx - > dev , " DSP enabled cores mask = %x \n " , en_cores_mask ) ;
return en_cores_mask ;
}
static int
skl_dsp_core_set_reset_state ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int ret ;
/* update bits */
sst_dsp_shim_update_bits_unlocked ( ctx ,
2016-06-21 10:17:41 +05:30
SKL_ADSP_REG_ADSPCS , SKL_ADSPCS_CRST_MASK ( core_mask ) ,
SKL_ADSPCS_CRST_MASK ( core_mask ) ) ;
2015-07-09 21:38:55 +05:30
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll ( ctx ,
SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CRST_MASK ( core_mask ) ,
SKL_ADSPCS_CRST_MASK ( core_mask ) ,
2015-07-09 21:38:55 +05:30
SKL_DSP_RESET_TO ,
" Set reset " ) ;
if ( ( sst_dsp_shim_read_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ) &
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CRST_MASK ( core_mask ) ) ! =
SKL_ADSPCS_CRST_MASK ( core_mask ) ) {
dev_err ( ctx - > dev , " Set reset state failed: core_mask %x \n " ,
core_mask ) ;
2015-07-09 21:38:55 +05:30
ret = - EIO ;
}
return ret ;
}
2016-06-21 10:17:41 +05:30
int skl_dsp_core_unset_reset_state (
struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int ret ;
dev_dbg ( ctx - > dev , " In %s \n " , __func__ ) ;
/* update bits */
sst_dsp_shim_update_bits_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CRST_MASK ( core_mask ) , 0 ) ;
2015-07-09 21:38:55 +05:30
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll ( ctx ,
SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CRST_MASK ( core_mask ) ,
2015-07-09 21:38:55 +05:30
0 ,
SKL_DSP_RESET_TO ,
" Unset reset " ) ;
if ( ( sst_dsp_shim_read_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ) &
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CRST_MASK ( core_mask ) ) ! = 0 ) {
dev_err ( ctx - > dev , " Unset reset state failed: core_mask %x \n " ,
core_mask ) ;
2015-07-09 21:38:55 +05:30
ret = - EIO ;
}
return ret ;
}
2016-06-21 10:17:41 +05:30
static bool
is_skl_dsp_core_enable ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int val ;
bool is_enable ;
val = sst_dsp_shim_read_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ) ;
2016-06-21 10:17:41 +05:30
is_enable = ( ( val & SKL_ADSPCS_CPA_MASK ( core_mask ) ) & &
( val & SKL_ADSPCS_SPA_MASK ( core_mask ) ) & &
! ( val & SKL_ADSPCS_CRST_MASK ( core_mask ) ) & &
! ( val & SKL_ADSPCS_CSTALL_MASK ( core_mask ) ) ) ;
dev_dbg ( ctx - > dev , " DSP core(s) enabled? %d : core_mask %x \n " ,
is_enable , core_mask ) ;
2015-07-09 21:38:55 +05:30
return is_enable ;
}
2016-06-21 10:17:41 +05:30
static int skl_dsp_reset_core ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
/* stall core */
2016-06-13 17:59:03 +05:30
sst_dsp_shim_update_bits_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CSTALL_MASK ( core_mask ) ,
SKL_ADSPCS_CSTALL_MASK ( core_mask ) ) ;
2015-07-09 21:38:55 +05:30
/* set reset state */
2016-06-21 10:17:41 +05:30
return skl_dsp_core_set_reset_state ( ctx , core_mask ) ;
2015-07-09 21:38:55 +05:30
}
2016-06-21 10:17:41 +05:30
int skl_dsp_start_core ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int ret ;
/* unset reset state */
2016-06-21 10:17:41 +05:30
ret = skl_dsp_core_unset_reset_state ( ctx , core_mask ) ;
if ( ret < 0 )
2015-07-09 21:38:55 +05:30
return ret ;
/* run core */
2016-06-21 10:17:41 +05:30
dev_dbg ( ctx - > dev , " unstall/run core: core_mask = %x \n " , core_mask ) ;
2016-06-13 17:59:03 +05:30
sst_dsp_shim_update_bits_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CSTALL_MASK ( core_mask ) , 0 ) ;
2015-07-09 21:38:55 +05:30
2016-06-21 10:17:41 +05:30
if ( ! is_skl_dsp_core_enable ( ctx , core_mask ) ) {
skl_dsp_reset_core ( ctx , core_mask ) ;
dev_err ( ctx - > dev , " DSP start core failed: core_mask %x \n " ,
core_mask ) ;
2015-07-09 21:38:55 +05:30
ret = - EIO ;
}
return ret ;
}
2016-06-21 10:17:41 +05:30
int skl_dsp_core_power_up ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int ret ;
/* update bits */
sst_dsp_shim_update_bits_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_SPA_MASK ( core_mask ) ,
SKL_ADSPCS_SPA_MASK ( core_mask ) ) ;
2015-07-09 21:38:55 +05:30
/* poll with timeout to check if operation successful */
ret = sst_dsp_register_poll ( ctx ,
SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CPA_MASK ( core_mask ) ,
SKL_ADSPCS_CPA_MASK ( core_mask ) ,
2015-07-09 21:38:55 +05:30
SKL_DSP_PU_TO ,
" Power up " ) ;
if ( ( sst_dsp_shim_read_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ) &
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CPA_MASK ( core_mask ) ) ! =
SKL_ADSPCS_CPA_MASK ( core_mask ) ) {
dev_err ( ctx - > dev , " DSP core power up failed: core_mask %x \n " ,
core_mask ) ;
2015-07-09 21:38:55 +05:30
ret = - EIO ;
}
return ret ;
}
2016-06-21 10:17:41 +05:30
int skl_dsp_core_power_down ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
/* update bits */
sst_dsp_shim_update_bits_unlocked ( ctx , SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_SPA_MASK ( core_mask ) , 0 ) ;
2015-07-09 21:38:55 +05:30
/* poll with timeout to check if operation successful */
return sst_dsp_register_poll ( ctx ,
SKL_ADSP_REG_ADSPCS ,
2016-06-21 10:17:41 +05:30
SKL_ADSPCS_CPA_MASK ( core_mask ) ,
2015-07-09 21:38:55 +05:30
0 ,
SKL_DSP_PD_TO ,
" Power down " ) ;
}
2016-06-21 10:17:41 +05:30
int skl_dsp_enable_core ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int ret ;
/* power up */
2016-06-21 10:17:41 +05:30
ret = skl_dsp_core_power_up ( ctx , core_mask ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 ) {
2016-06-21 10:17:41 +05:30
dev_err ( ctx - > dev , " dsp core power up failed: core_mask %x \n " ,
core_mask ) ;
2015-07-09 21:38:55 +05:30
return ret ;
}
2016-06-21 10:17:41 +05:30
return skl_dsp_start_core ( ctx , core_mask ) ;
2015-07-09 21:38:55 +05:30
}
2016-06-21 10:17:41 +05:30
int skl_dsp_disable_core ( struct sst_dsp * ctx , unsigned int core_mask )
2015-07-09 21:38:55 +05:30
{
int ret ;
2016-06-21 10:17:41 +05:30
ret = skl_dsp_reset_core ( ctx , core_mask ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 ) {
2016-06-21 10:17:41 +05:30
dev_err ( ctx - > dev , " dsp core reset failed: core_mask %x \n " ,
core_mask ) ;
2015-07-09 21:38:55 +05:30
return ret ;
}
/* power down core*/
2016-06-21 10:17:41 +05:30
ret = skl_dsp_core_power_down ( ctx , core_mask ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 ) {
2016-06-21 10:17:41 +05:30
dev_err ( ctx - > dev , " dsp core power down fail mask %x: %d \n " ,
core_mask , ret ) ;
2015-07-09 21:38:55 +05:30
return ret ;
}
2016-06-21 10:17:41 +05:30
if ( is_skl_dsp_core_enable ( ctx , core_mask ) ) {
dev_err ( ctx - > dev , " dsp core disable fail mask %x: %d \n " ,
core_mask , ret ) ;
2015-07-09 21:38:55 +05:30
ret = - EIO ;
}
return ret ;
}
int skl_dsp_boot ( struct sst_dsp * ctx )
{
int ret ;
2016-06-21 10:17:41 +05:30
if ( is_skl_dsp_core_enable ( ctx , SKL_DSP_CORE0_MASK ) ) {
ret = skl_dsp_reset_core ( ctx , SKL_DSP_CORE0_MASK ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 ) {
2016-06-21 10:17:41 +05:30
dev_err ( ctx - > dev , " dsp core0 reset fail: %d \n " , ret ) ;
2015-07-09 21:38:55 +05:30
return ret ;
}
2016-06-21 10:17:41 +05:30
ret = skl_dsp_start_core ( ctx , SKL_DSP_CORE0_MASK ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 ) {
2016-06-21 10:17:41 +05:30
dev_err ( ctx - > dev , " dsp core0 start fail: %d \n " , ret ) ;
2015-07-09 21:38:55 +05:30
return ret ;
}
} else {
2016-06-21 10:17:41 +05:30
ret = skl_dsp_disable_core ( ctx , SKL_DSP_CORE0_MASK ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 ) {
2016-06-21 10:17:41 +05:30
dev_err ( ctx - > dev , " dsp core0 disable fail: %d \n " , ret ) ;
2015-07-09 21:38:55 +05:30
return ret ;
}
2016-06-21 10:17:41 +05:30
ret = skl_dsp_enable_core ( ctx , SKL_DSP_CORE0_MASK ) ;
2015-07-09 21:38:55 +05:30
}
return ret ;
}
irqreturn_t skl_dsp_sst_interrupt ( int irq , void * dev_id )
{
struct sst_dsp * ctx = dev_id ;
u32 val ;
irqreturn_t result = IRQ_NONE ;
spin_lock ( & ctx - > spinlock ) ;
val = sst_dsp_shim_read_unlocked ( ctx , SKL_ADSP_REG_ADSPIS ) ;
ctx - > intr_status = val ;
2015-10-09 09:01:49 +01:00
if ( val = = 0xffffffff ) {
spin_unlock ( & ctx - > spinlock ) ;
return IRQ_NONE ;
}
2015-07-09 21:38:55 +05:30
if ( val & SKL_ADSPIS_IPC ) {
skl_ipc_int_disable ( ctx ) ;
result = IRQ_WAKE_THREAD ;
}
2015-07-10 22:18:43 +05:30
if ( val & SKL_ADSPIS_CL_DMA ) {
skl_cldma_int_disable ( ctx ) ;
result = IRQ_WAKE_THREAD ;
}
2015-07-09 21:38:55 +05:30
spin_unlock ( & ctx - > spinlock ) ;
return result ;
}
2016-06-21 10:17:41 +05:30
/*
* skl_dsp_get_core / skl_dsp_put_core will be called inside DAPM context
* within the dapm mutex . Hence no separate lock is used .
*/
int skl_dsp_get_core ( struct sst_dsp * ctx , unsigned int core_id )
{
2019-07-23 16:58:48 +02:00
struct skl_dev * skl = ctx - > thread_context ;
2016-06-21 10:17:41 +05:30
int ret = 0 ;
if ( core_id > = skl - > cores . count ) {
dev_err ( ctx - > dev , " invalid core id: %d \n " , core_id ) ;
return - EINVAL ;
}
2017-08-22 16:45:53 +05:30
skl - > cores . usage_count [ core_id ] + + ;
2016-06-21 10:17:41 +05:30
if ( skl - > cores . state [ core_id ] = = SKL_DSP_RESET ) {
ret = ctx - > fw_ops . set_state_D0 ( ctx , core_id ) ;
if ( ret < 0 ) {
dev_err ( ctx - > dev , " unable to get core%d \n " , core_id ) ;
2017-03-13 22:11:26 +05:30
goto out ;
2016-06-21 10:17:41 +05:30
}
}
2017-03-13 22:11:26 +05:30
out :
2016-06-21 10:17:41 +05:30
dev_dbg ( ctx - > dev , " core id %d state %d usage_count %d \n " ,
core_id , skl - > cores . state [ core_id ] ,
skl - > cores . usage_count [ core_id ] ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( skl_dsp_get_core ) ;
int skl_dsp_put_core ( struct sst_dsp * ctx , unsigned int core_id )
{
2019-07-23 16:58:48 +02:00
struct skl_dev * skl = ctx - > thread_context ;
2016-06-21 10:17:41 +05:30
int ret = 0 ;
if ( core_id > = skl - > cores . count ) {
dev_err ( ctx - > dev , " invalid core id: %d \n " , core_id ) ;
return - EINVAL ;
}
2017-03-13 22:11:26 +05:30
if ( ( - - skl - > cores . usage_count [ core_id ] = = 0 ) & &
( skl - > cores . state [ core_id ] ! = SKL_DSP_RESET ) ) {
2016-06-21 10:17:41 +05:30
ret = ctx - > fw_ops . set_state_D3 ( ctx , core_id ) ;
if ( ret < 0 ) {
dev_err ( ctx - > dev , " unable to put core %d: %d \n " ,
core_id , ret ) ;
skl - > cores . usage_count [ core_id ] + + ;
}
}
dev_dbg ( ctx - > dev , " core id %d state %d usage_count %d \n " ,
core_id , skl - > cores . state [ core_id ] ,
skl - > cores . usage_count [ core_id ] ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( skl_dsp_put_core ) ;
2015-07-09 21:38:55 +05:30
int skl_dsp_wake ( struct sst_dsp * ctx )
{
2016-06-21 10:17:41 +05:30
return skl_dsp_get_core ( ctx , SKL_DSP_CORE0_ID ) ;
2015-07-09 21:38:55 +05:30
}
EXPORT_SYMBOL_GPL ( skl_dsp_wake ) ;
int skl_dsp_sleep ( struct sst_dsp * ctx )
{
2016-06-21 10:17:41 +05:30
return skl_dsp_put_core ( ctx , SKL_DSP_CORE0_ID ) ;
2015-07-09 21:38:55 +05:30
}
EXPORT_SYMBOL_GPL ( skl_dsp_sleep ) ;
struct sst_dsp * skl_dsp_ctx_init ( struct device * dev ,
struct sst_dsp_device * sst_dev , int irq )
{
int ret ;
struct sst_dsp * sst ;
sst = devm_kzalloc ( dev , sizeof ( * sst ) , GFP_KERNEL ) ;
if ( sst = = NULL )
return NULL ;
spin_lock_init ( & sst - > spinlock ) ;
mutex_init ( & sst - > mutex ) ;
sst - > dev = dev ;
sst - > sst_dev = sst_dev ;
sst - > irq = irq ;
sst - > ops = sst_dev - > ops ;
sst - > thread_context = sst_dev - > thread_context ;
/* Initialise SST Audio DSP */
if ( sst - > ops - > init ) {
2020-10-06 08:49:06 +02:00
ret = sst - > ops - > init ( sst ) ;
2015-07-09 21:38:55 +05:30
if ( ret < 0 )
return NULL ;
}
2017-12-18 10:46:49 +05:30
return sst ;
}
int skl_dsp_acquire_irq ( struct sst_dsp * sst )
{
struct sst_dsp_device * sst_dev = sst - > sst_dev ;
int ret ;
2015-07-09 21:38:55 +05:30
/* Register the ISR */
ret = request_threaded_irq ( sst - > irq , sst - > ops - > irq_handler ,
sst_dev - > thread , IRQF_SHARED , " AudioDSP " , sst ) ;
2017-12-18 10:46:49 +05:30
if ( ret )
2015-07-09 21:38:55 +05:30
dev_err ( sst - > dev , " unable to grab threaded IRQ %d, disabling device \n " ,
sst - > irq ) ;
2017-12-18 10:46:49 +05:30
return ret ;
2015-07-09 21:38:55 +05:30
}
void skl_dsp_free ( struct sst_dsp * dsp )
{
skl_ipc_int_disable ( dsp ) ;
free_irq ( dsp - > irq , dsp ) ;
2016-03-15 16:39:25 +05:30
skl_ipc_op_int_disable ( dsp ) ;
2016-06-21 10:17:41 +05:30
skl_dsp_disable_core ( dsp , SKL_DSP_CORE0_MASK ) ;
2015-07-09 21:38:55 +05:30
}
EXPORT_SYMBOL_GPL ( skl_dsp_free ) ;
bool is_skl_dsp_running ( struct sst_dsp * ctx )
{
return ( ctx - > sst_state = = SKL_DSP_RUNNING ) ;
}
EXPORT_SYMBOL_GPL ( is_skl_dsp_running ) ;