2020-05-01 17:58:50 +03:00
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2019-04-12 19:08:51 +03:00
//
// This file is provided under a dual BSD/GPLv2 license. When using or
// redistributing this file, you may do so under either license.
//
2024-05-03 17:03:51 +03:00
// Copyright(c) 2018 Intel Corporation
2019-04-12 19:08:51 +03:00
//
// Authors: Liam Girdwood <liam.r.girdwood@linux.intel.com>
// Ranjani Sridharan <ranjani.sridharan@linux.intel.com>
// Rander Wang <rander.wang@intel.com>
// Keyon Jie <yang.jie@linux.intel.com>
//
/*
* Hardware interface for HDA DSP code loader
*/
# include <linux/firmware.h>
# include <sound/hdaudio_ext.h>
2020-08-26 21:45:30 +03:00
# include <sound/hda_register.h>
2019-04-12 19:08:51 +03:00
# include <sound/sof.h>
2022-10-20 15:12:34 +03:00
# include <sound/sof/ipc4/header.h>
2020-11-27 19:40:19 +03:00
# include "ext_manifest.h"
2022-10-20 15:12:34 +03:00
# include "../ipc4-priv.h"
2019-04-12 19:08:51 +03:00
# include "../ops.h"
2022-01-21 02:15:32 +03:00
# include "../sof-priv.h"
2019-04-12 19:08:51 +03:00
# include "hda.h"
2022-01-21 02:15:30 +03:00
static void hda_ssp_set_cbp_cfp ( struct snd_sof_dev * sdev )
{
struct sof_intel_hda_dev * hda = sdev - > pdata - > hw_pdata ;
const struct sof_intel_dsp_desc * chip = hda - > desc ;
int i ;
/* DSP is powered up, set all SSPs to clock consumer/codec provider mode */
for ( i = 0 ; i < chip - > ssp_count ; i + + ) {
snd_sof_dsp_update_bits_unlocked ( sdev , HDA_DSP_BAR ,
chip - > ssp_base_offset
+ i * SSP_DEV_MEM_SIZE
+ SSP_SSC1_OFFSET ,
SSP_SET_CBP_CFP ,
SSP_SET_CBP_CFP ) ;
}
}
2024-04-04 21:54:46 +03:00
struct hdac_ext_stream * hda_cl_prepare ( struct device * dev , unsigned int format ,
unsigned int size , struct snd_dma_buffer * dmab ,
int direction , bool is_iccmax )
2019-04-12 19:08:51 +03:00
{
2024-04-04 21:54:46 +03:00
struct snd_sof_dev * sdev = dev_get_drvdata ( dev ) ;
2022-02-09 09:31:04 +03:00
struct hdac_ext_stream * hext_stream ;
2019-04-12 19:08:51 +03:00
struct hdac_stream * hstream ;
int ret ;
2022-02-09 09:31:04 +03:00
hext_stream = hda_dsp_stream_get ( sdev , direction , 0 ) ;
2019-04-12 19:08:51 +03:00
2022-02-09 09:31:04 +03:00
if ( ! hext_stream ) {
2019-04-12 19:08:51 +03:00
dev_err ( sdev - > dev , " error: no stream available \n " ) ;
2020-08-26 21:45:32 +03:00
return ERR_PTR ( - ENODEV ) ;
2019-04-12 19:08:51 +03:00
}
2022-02-09 09:31:04 +03:00
hstream = & hext_stream - > hstream ;
2019-09-27 23:05:35 +03:00
hstream - > substream = NULL ;
2019-04-12 19:08:51 +03:00
/* allocate DMA buffer */
2024-04-04 21:54:46 +03:00
ret = snd_dma_alloc_pages ( SNDRV_DMA_TYPE_DEV_SG , dev , size , dmab ) ;
2019-04-12 19:08:51 +03:00
if ( ret < 0 ) {
2021-02-09 02:21:46 +03:00
dev_err ( sdev - > dev , " error: memory alloc failed: %d \n " , ret ) ;
2022-02-24 21:58:36 +03:00
goto out_put ;
2019-04-12 19:08:51 +03:00
}
hstream - > period_bytes = 0 ; /* initialize period_bytes */
hstream - > format_val = format ;
hstream - > bufsize = size ;
2024-04-04 21:54:45 +03:00
if ( is_iccmax ) {
2022-02-09 09:31:04 +03:00
ret = hda_dsp_iccmax_stream_hw_params ( sdev , hext_stream , dmab , NULL ) ;
2020-08-26 21:45:30 +03:00
if ( ret < 0 ) {
2021-02-09 02:21:46 +03:00
dev_err ( sdev - > dev , " error: iccmax stream prepare failed: %d \n " , ret ) ;
2022-02-24 21:58:36 +03:00
goto out_free ;
2020-08-26 21:45:30 +03:00
}
} else {
2022-02-09 09:31:04 +03:00
ret = hda_dsp_stream_hw_params ( sdev , hext_stream , dmab , NULL ) ;
2020-08-26 21:45:30 +03:00
if ( ret < 0 ) {
2021-02-09 02:21:46 +03:00
dev_err ( sdev - > dev , " error: hdac prepare failed: %d \n " , ret ) ;
2022-02-24 21:58:36 +03:00
goto out_free ;
2020-08-26 21:45:30 +03:00
}
2022-02-09 09:31:04 +03:00
hda_dsp_stream_spib_config ( sdev , hext_stream , HDA_DSP_SPIB_ENABLE , size ) ;
2019-04-12 19:08:51 +03:00
}
2022-02-09 09:31:04 +03:00
return hext_stream ;
2019-04-12 19:08:51 +03:00
2022-02-24 21:58:36 +03:00
out_free :
2019-04-12 19:08:51 +03:00
snd_dma_free_pages ( dmab ) ;
2022-02-24 21:58:36 +03:00
out_put :
hda_dsp_stream_put ( sdev , direction , hstream - > stream_tag ) ;
2020-08-26 21:45:32 +03:00
return ERR_PTR ( ret ) ;
2019-04-12 19:08:51 +03:00
}
2024-04-04 21:54:46 +03:00
EXPORT_SYMBOL_NS ( hda_cl_prepare , SND_SOC_SOF_INTEL_HDA_COMMON ) ;
2019-04-12 19:08:51 +03:00
/*
2022-06-09 11:59:49 +03:00
* first boot sequence has some extra steps .
* power on all host managed cores and only unstall / run the boot core to boot the
* DSP then turn off all non boot cores ( if any ) is powered on .
2019-04-12 19:08:51 +03:00
*/
2022-06-15 11:43:47 +03:00
int cl_dsp_init ( struct snd_sof_dev * sdev , int stream_tag , bool imr_boot )
2019-04-12 19:08:51 +03:00
{
struct sof_intel_hda_dev * hda = sdev - > pdata - > hw_pdata ;
const struct sof_intel_dsp_desc * chip = hda - > desc ;
2022-04-21 23:20:31 +03:00
unsigned int status , target_status ;
u32 flags , ipc_hdr , j ;
2021-11-19 22:26:19 +03:00
unsigned long mask ;
2021-12-23 14:36:14 +03:00
char * dump_msg ;
2019-04-12 19:08:51 +03:00
int ret ;
/* step 1: power up corex */
2022-06-09 11:59:48 +03:00
ret = hda_dsp_core_power_up ( sdev , chip - > host_managed_cores_mask ) ;
2019-04-12 19:08:51 +03:00
if ( ret < 0 ) {
2020-09-17 13:56:33 +03:00
if ( hda - > boot_iteration = = HDA_FW_BOOT_ATTEMPTS )
2020-08-26 02:50:34 +03:00
dev_err ( sdev - > dev , " error: dsp core 0/1 power up failed \n " ) ;
2019-04-12 19:08:51 +03:00
goto err ;
}
2022-01-21 02:15:30 +03:00
hda_ssp_set_cbp_cfp ( sdev ) ;
2019-05-01 02:09:22 +03:00
2022-04-21 23:20:31 +03:00
/* step 2: Send ROM_CONTROL command (stream_tag is ignored for IMR boot) */
ipc_hdr = chip - > ipc_req_mask | HDA_DSP_ROM_IPC_CONTROL ;
if ( ! imr_boot )
ipc_hdr | = HDA_DSP_ROM_IPC_PURGE_FW | ( ( stream_tag - 1 ) < < 9 ) ;
snd_sof_dsp_write ( sdev , HDA_DSP_BAR , chip - > ipc_req , ipc_hdr ) ;
2019-04-12 19:08:51 +03:00
/* step 3: unset core 0 reset state & unstall/run core 0 */
2022-06-09 11:59:49 +03:00
ret = hda_dsp_core_run ( sdev , chip - > init_core_mask ) ;
2019-04-12 19:08:51 +03:00
if ( ret < 0 ) {
2020-09-17 13:56:33 +03:00
if ( hda - > boot_iteration = = HDA_FW_BOOT_ATTEMPTS )
2020-08-26 02:50:34 +03:00
dev_err ( sdev - > dev ,
" error: dsp core start failed %d \n " , ret ) ;
2019-04-12 19:08:51 +03:00
ret = - EIO ;
goto err ;
}
/* step 4: wait for IPC DONE bit from ROM */
ret = snd_sof_dsp_read_poll_timeout ( sdev , HDA_DSP_BAR ,
chip - > ipc_ack , status ,
( ( status & chip - > ipc_ack_mask )
= = chip - > ipc_ack_mask ) ,
HDA_DSP_REG_POLL_INTERVAL_US ,
HDA_DSP_INIT_TIMEOUT_US ) ;
if ( ret < 0 ) {
2020-09-17 13:56:33 +03:00
if ( hda - > boot_iteration = = HDA_FW_BOOT_ATTEMPTS )
2020-08-26 02:50:34 +03:00
dev_err ( sdev - > dev ,
" error: %s: timeout for HIPCIE done \n " ,
__func__ ) ;
2019-04-12 19:08:51 +03:00
goto err ;
}
2020-02-29 02:18:49 +03:00
/* set DONE bit to clear the reply IPC message */
snd_sof_dsp_update_bits_forced ( sdev , HDA_DSP_BAR ,
chip - > ipc_ack ,
chip - > ipc_ack_mask ,
chip - > ipc_ack_mask ) ;
2021-01-28 12:38:47 +03:00
/* step 5: power down cores that are no longer needed */
2021-11-19 22:26:19 +03:00
ret = hda_dsp_core_reset_power_down ( sdev , chip - > host_managed_cores_mask &
~ ( chip - > init_core_mask ) ) ;
2019-04-12 19:08:51 +03:00
if ( ret < 0 ) {
2020-09-17 13:56:33 +03:00
if ( hda - > boot_iteration = = HDA_FW_BOOT_ATTEMPTS )
2020-08-26 02:50:34 +03:00
dev_err ( sdev - > dev ,
" error: dsp core x power down failed \n " ) ;
2019-04-12 19:08:51 +03:00
goto err ;
}
/* step 6: enable IPC interrupts */
hda_dsp_ipc_int_enable ( sdev ) ;
2022-04-21 23:20:31 +03:00
/*
* step 7 :
* - Cold / Full boot : wait for ROM init to proceed to download the firmware
* - IMR boot : wait for ROM firmware entered ( firmware booted up from IMR )
*/
if ( imr_boot )
2022-07-12 15:57:33 +03:00
target_status = FSR_STATE_FW_ENTERED ;
2022-04-21 23:20:31 +03:00
else
2022-07-12 15:57:33 +03:00
target_status = FSR_STATE_INIT_DONE ;
2022-04-21 23:20:31 +03:00
2019-04-12 19:08:51 +03:00
ret = snd_sof_dsp_read_poll_timeout ( sdev , HDA_DSP_BAR ,
2022-04-14 21:48:15 +03:00
chip - > rom_status_reg , status ,
2022-07-12 15:57:33 +03:00
( FSR_TO_STATE_CODE ( status ) = = target_status ) ,
2019-04-12 19:08:51 +03:00
HDA_DSP_REG_POLL_INTERVAL_US ,
chip - > rom_init_timeout *
USEC_PER_MSEC ) ;
2021-11-19 22:26:19 +03:00
if ( ! ret ) {
/* set enabled cores mask and increment ref count for cores in init_core_mask */
sdev - > enabled_cores_mask | = chip - > init_core_mask ;
mask = sdev - > enabled_cores_mask ;
for_each_set_bit ( j , & mask , SOF_MAX_DSP_NUM_CORES )
sdev - > dsp_core_ref_count [ j ] + + ;
2019-04-12 19:08:51 +03:00
return 0 ;
2021-11-19 22:26:19 +03:00
}
2019-04-12 19:08:51 +03:00
2020-09-17 13:56:33 +03:00
if ( hda - > boot_iteration = = HDA_FW_BOOT_ATTEMPTS )
2020-08-26 02:50:34 +03:00
dev_err ( sdev - > dev ,
2022-04-14 21:48:15 +03:00
" %s: timeout with rom_status_reg (%#x) read \n " ,
__func__ , chip - > rom_status_reg ) ;
2019-10-22 22:28:44 +03:00
2019-04-12 19:08:51 +03:00
err :
2021-10-06 14:06:44 +03:00
flags = SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX | SOF_DBG_DUMP_OPTIONAL ;
2020-12-11 13:07:43 +03:00
2021-10-06 14:06:36 +03:00
/* after max boot attempts make sure that the dump is printed */
if ( hda - > boot_iteration = = HDA_FW_BOOT_ATTEMPTS )
2021-10-06 14:06:35 +03:00
flags & = ~ SOF_DBG_DUMP_OPTIONAL ;
2020-12-11 13:07:43 +03:00
2021-12-23 14:36:14 +03:00
dump_msg = kasprintf ( GFP_KERNEL , " Boot iteration failed: %d/%d " ,
hda - > boot_iteration , HDA_FW_BOOT_ATTEMPTS ) ;
snd_sof_dsp_dbg_dump ( sdev , dump_msg , flags ) ;
2021-11-19 22:26:19 +03:00
hda_dsp_core_reset_power_down ( sdev , chip - > host_managed_cores_mask ) ;
2019-04-12 19:08:51 +03:00
2021-12-23 14:36:14 +03:00
kfree ( dump_msg ) ;
2019-04-12 19:08:51 +03:00
return ret ;
}
2024-05-03 16:52:17 +03:00
EXPORT_SYMBOL_NS ( cl_dsp_init , SND_SOC_SOF_INTEL_HDA_COMMON ) ;
2019-04-12 19:08:51 +03:00
2024-04-04 21:54:46 +03:00
int hda_cl_trigger ( struct device * dev , struct hdac_ext_stream * hext_stream , int cmd )
2019-04-12 19:08:51 +03:00
{
2024-04-04 21:54:46 +03:00
struct snd_sof_dev * sdev = dev_get_drvdata ( dev ) ;
2022-02-09 09:31:04 +03:00
struct hdac_stream * hstream = & hext_stream - > hstream ;
2019-04-12 19:08:51 +03:00
int sd_offset = SOF_STREAM_SD_OFFSET ( hstream ) ;
2024-04-04 21:54:47 +03:00
struct sof_intel_hda_stream * hda_stream ;
2019-04-12 19:08:51 +03:00
/* code loader is special case that reuses stream ops */
switch ( cmd ) {
case SNDRV_PCM_TRIGGER_START :
2024-04-04 21:54:47 +03:00
hda_stream = container_of ( hext_stream , struct sof_intel_hda_stream ,
hext_stream ) ;
reinit_completion ( & hda_stream - > ioc ) ;
2019-04-12 19:08:51 +03:00
snd_sof_dsp_update_bits ( sdev , HDA_DSP_HDA_BAR , SOF_HDA_INTCTL ,
1 < < hstream - > index ,
1 < < hstream - > index ) ;
snd_sof_dsp_update_bits ( sdev , HDA_DSP_HDA_BAR ,
sd_offset ,
SOF_HDA_SD_CTL_DMA_START |
SOF_HDA_CL_DMA_SD_INT_MASK ,
SOF_HDA_SD_CTL_DMA_START |
SOF_HDA_CL_DMA_SD_INT_MASK ) ;
hstream - > running = true ;
return 0 ;
default :
2022-02-09 09:31:04 +03:00
return hda_dsp_stream_trigger ( sdev , hext_stream , cmd ) ;
2019-04-12 19:08:51 +03:00
}
}
2024-04-04 21:54:46 +03:00
EXPORT_SYMBOL_NS ( hda_cl_trigger , SND_SOC_SOF_INTEL_HDA_COMMON ) ;
2019-04-12 19:08:51 +03:00
2024-04-04 21:54:46 +03:00
int hda_cl_cleanup ( struct device * dev , struct snd_dma_buffer * dmab ,
2022-04-14 21:48:16 +03:00
struct hdac_ext_stream * hext_stream )
2019-04-12 19:08:51 +03:00
{
2024-04-04 21:54:46 +03:00
struct snd_sof_dev * sdev = dev_get_drvdata ( dev ) ;
2022-02-09 09:31:04 +03:00
struct hdac_stream * hstream = & hext_stream - > hstream ;
2019-04-12 19:08:51 +03:00
int sd_offset = SOF_STREAM_SD_OFFSET ( hstream ) ;
2020-08-26 21:45:30 +03:00
int ret = 0 ;
2019-04-12 19:08:51 +03:00
2020-08-26 21:45:30 +03:00
if ( hstream - > direction = = SNDRV_PCM_STREAM_PLAYBACK )
2022-02-09 09:31:04 +03:00
ret = hda_dsp_stream_spib_config ( sdev , hext_stream , HDA_DSP_SPIB_DISABLE , 0 ) ;
2020-08-26 21:45:30 +03:00
else
snd_sof_dsp_update_bits ( sdev , HDA_DSP_HDA_BAR , sd_offset ,
SOF_HDA_SD_CTL_DMA_START , 0 ) ;
2019-04-12 19:08:51 +03:00
2020-08-26 21:45:30 +03:00
hda_dsp_stream_put ( sdev , hstream - > direction , hstream - > stream_tag ) ;
2019-04-12 19:08:51 +03:00
hstream - > running = 0 ;
hstream - > substream = NULL ;
/* reset BDL address */
snd_sof_dsp_write ( sdev , HDA_DSP_HDA_BAR ,
2022-10-24 19:53:07 +03:00
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPL , 0 ) ;
2019-04-12 19:08:51 +03:00
snd_sof_dsp_write ( sdev , HDA_DSP_HDA_BAR ,
2022-10-24 19:53:07 +03:00
sd_offset + SOF_HDA_ADSP_REG_SD_BDLPU , 0 ) ;
2019-04-12 19:08:51 +03:00
snd_sof_dsp_write ( sdev , HDA_DSP_HDA_BAR , sd_offset , 0 ) ;
snd_dma_free_pages ( dmab ) ;
dmab - > area = NULL ;
hstream - > bufsize = 0 ;
hstream - > format_val = 0 ;
return ret ;
}
2024-04-04 21:54:46 +03:00
EXPORT_SYMBOL_NS ( hda_cl_cleanup , SND_SOC_SOF_INTEL_HDA_COMMON ) ;
2019-04-12 19:08:51 +03:00
2024-04-04 21:54:47 +03:00
# define HDA_CL_DMA_IOC_TIMEOUT_MS 500
2022-04-14 21:48:16 +03:00
int hda_cl_copy_fw ( struct snd_sof_dev * sdev , struct hdac_ext_stream * hext_stream )
2019-04-12 19:08:51 +03:00
{
2022-04-14 21:48:15 +03:00
struct sof_intel_hda_dev * hda = sdev - > pdata - > hw_pdata ;
const struct sof_intel_dsp_desc * chip = hda - > desc ;
2024-04-04 21:54:47 +03:00
struct sof_intel_hda_stream * hda_stream ;
unsigned long time_left ;
2019-04-12 19:08:51 +03:00
unsigned int reg ;
int ret , status ;
2024-04-04 21:54:47 +03:00
hda_stream = container_of ( hext_stream , struct sof_intel_hda_stream ,
hext_stream ) ;
dev_dbg ( sdev - > dev , " Code loader DMA starting \n " ) ;
2024-04-04 21:54:46 +03:00
ret = hda_cl_trigger ( sdev - > dev , hext_stream , SNDRV_PCM_TRIGGER_START ) ;
2019-04-12 19:08:51 +03:00
if ( ret < 0 ) {
dev_err ( sdev - > dev , " error: DMA trigger start failed \n " ) ;
return ret ;
}
2024-04-04 21:54:47 +03:00
/* Wait for completion of transfer */
time_left = wait_for_completion_timeout ( & hda_stream - > ioc ,
msecs_to_jiffies ( HDA_CL_DMA_IOC_TIMEOUT_MS ) ) ;
if ( ! time_left ) {
dev_err ( sdev - > dev , " Code loader DMA did not complete \n " ) ;
return - ETIMEDOUT ;
}
dev_dbg ( sdev - > dev , " Code loader DMA done, waiting for FW_ENTERED status \n " ) ;
2019-04-12 19:08:51 +03:00
status = snd_sof_dsp_read_poll_timeout ( sdev , HDA_DSP_BAR ,
2022-04-14 21:48:15 +03:00
chip - > rom_status_reg , reg ,
2022-07-12 15:57:33 +03:00
( FSR_TO_STATE_CODE ( reg ) = = FSR_STATE_FW_ENTERED ) ,
2019-04-12 19:08:51 +03:00
HDA_DSP_REG_POLL_INTERVAL_US ,
HDA_DSP_BASEFW_TIMEOUT_US ) ;
2019-10-22 22:28:43 +03:00
/*
* even in case of errors we still need to stop the DMAs ,
* but we return the initial error should the DMA stop also fail
*/
2019-10-22 22:28:44 +03:00
if ( status < 0 ) {
dev_err ( sdev - > dev ,
2022-04-14 21:48:15 +03:00
" %s: timeout with rom_status_reg (%#x) read \n " ,
__func__ , chip - > rom_status_reg ) ;
2024-04-04 21:54:47 +03:00
} else {
dev_dbg ( sdev - > dev , " Code loader FW_ENTERED status \n " ) ;
2019-10-22 22:28:44 +03:00
}
2024-04-04 21:54:46 +03:00
ret = hda_cl_trigger ( sdev - > dev , hext_stream , SNDRV_PCM_TRIGGER_STOP ) ;
2019-04-12 19:08:51 +03:00
if ( ret < 0 ) {
dev_err ( sdev - > dev , " error: DMA trigger stop failed \n " ) ;
2019-10-22 22:28:43 +03:00
if ( ! status )
status = ret ;
2024-04-04 21:54:47 +03:00
} else {
dev_dbg ( sdev - > dev , " Code loader DMA stopped \n " ) ;
2019-04-12 19:08:51 +03:00
}
return status ;
}
2020-08-26 21:45:30 +03:00
int hda_dsp_cl_boot_firmware_iccmax ( struct snd_sof_dev * sdev )
{
struct hdac_ext_stream * iccmax_stream ;
2022-03-30 23:19:16 +03:00
struct snd_dma_buffer dmab_bdl ;
2020-08-26 21:45:30 +03:00
int ret , ret1 ;
u8 original_gb ;
/* save the original LTRP guardband value */
2023-03-07 12:51:58 +03:00
original_gb = snd_sof_dsp_read8 ( sdev , HDA_DSP_HDA_BAR , HDA_VS_INTEL_LTRP ) &
HDA_VS_INTEL_LTRP_GB_MASK ;
2020-08-26 21:45:30 +03:00
2022-11-07 10:26:21 +03:00
/*
* Prepare capture stream for ICCMAX . We do not need to store
* the data , so use a buffer of PAGE_SIZE for receiving .
*/
2024-04-04 21:54:46 +03:00
iccmax_stream = hda_cl_prepare ( sdev - > dev , HDA_CL_STREAM_FORMAT , PAGE_SIZE ,
& dmab_bdl , SNDRV_PCM_STREAM_CAPTURE , true ) ;
2020-08-26 21:45:32 +03:00
if ( IS_ERR ( iccmax_stream ) ) {
dev_err ( sdev - > dev , " error: dma prepare for ICCMAX stream failed \n " ) ;
return PTR_ERR ( iccmax_stream ) ;
2020-08-26 21:45:30 +03:00
}
2020-08-26 21:45:32 +03:00
ret = hda_dsp_cl_boot_firmware ( sdev ) ;
2020-08-26 21:45:30 +03:00
/*
* Perform iccmax stream cleanup . This should be done even if firmware loading fails .
* If the cleanup also fails , we return the initial error
*/
2024-04-04 21:54:46 +03:00
ret1 = hda_cl_cleanup ( sdev - > dev , & dmab_bdl , iccmax_stream ) ;
2020-08-26 21:45:30 +03:00
if ( ret1 < 0 ) {
dev_err ( sdev - > dev , " error: ICCMAX stream cleanup failed \n " ) ;
/* set return value to indicate cleanup failure */
if ( ! ret )
ret = ret1 ;
}
/* restore the original guardband value after FW boot */
2023-03-07 12:51:58 +03:00
snd_sof_dsp_update8 ( sdev , HDA_DSP_HDA_BAR , HDA_VS_INTEL_LTRP ,
HDA_VS_INTEL_LTRP_GB_MASK , original_gb ) ;
2020-08-26 21:45:30 +03:00
return ret ;
}
2024-05-03 16:52:17 +03:00
EXPORT_SYMBOL_NS ( hda_dsp_cl_boot_firmware_iccmax , SND_SOC_SOF_INTEL_CNL ) ;
2020-08-26 21:45:30 +03:00
2022-01-21 02:15:31 +03:00
static int hda_dsp_boot_imr ( struct snd_sof_dev * sdev )
{
2022-06-15 11:43:47 +03:00
const struct sof_intel_dsp_desc * chip_info ;
2022-01-21 02:15:31 +03:00
int ret ;
2022-06-15 11:43:47 +03:00
chip_info = get_chip_info ( sdev - > pdata ) ;
if ( chip_info - > cl_init )
ret = chip_info - > cl_init ( sdev , 0 , true ) ;
else
ret = - EINVAL ;
2022-04-27 14:51:59 +03:00
if ( ! ret )
2022-04-21 23:20:31 +03:00
hda_sdw_process_wakeen ( sdev ) ;
2022-01-21 02:15:31 +03:00
return ret ;
}
2019-04-12 19:08:51 +03:00
int hda_dsp_cl_boot_firmware ( struct snd_sof_dev * sdev )
{
2020-09-17 13:56:33 +03:00
struct sof_intel_hda_dev * hda = sdev - > pdata - > hw_pdata ;
2019-04-12 19:08:51 +03:00
struct snd_sof_pdata * plat_data = sdev - > pdata ;
const struct sof_dev_desc * desc = plat_data - > desc ;
const struct sof_intel_dsp_desc * chip_info ;
2022-02-09 09:31:04 +03:00
struct hdac_ext_stream * hext_stream ;
2019-04-12 19:08:51 +03:00
struct firmware stripped_firmware ;
2022-03-30 23:19:16 +03:00
struct snd_dma_buffer dmab ;
2020-08-26 21:45:32 +03:00
int ret , ret1 , i ;
2019-04-12 19:08:51 +03:00
2022-07-12 15:09:35 +03:00
if ( hda - > imrboot_supported & & ! sdev - > first_boot & & ! hda - > skip_imr_boot ) {
2022-01-21 02:15:31 +03:00
dev_dbg ( sdev - > dev , " IMR restore supported, booting from IMR directly \n " ) ;
2022-04-21 23:20:31 +03:00
hda - > boot_iteration = 0 ;
ret = hda_dsp_boot_imr ( sdev ) ;
2022-10-20 15:12:33 +03:00
if ( ! ret ) {
hda - > booted_from_imr = true ;
2022-04-27 14:51:59 +03:00
return 0 ;
2022-10-20 15:12:33 +03:00
}
2022-04-21 23:20:31 +03:00
dev_warn ( sdev - > dev , " IMR restore failed, trying to cold boot \n " ) ;
2022-01-21 02:15:31 +03:00
}
2022-10-20 15:12:33 +03:00
hda - > booted_from_imr = false ;
2019-04-12 19:08:51 +03:00
chip_info = desc - > chip_info ;
2022-10-20 15:12:23 +03:00
if ( sdev - > basefw . fw - > size < = sdev - > basefw . payload_offset ) {
2020-04-15 23:27:55 +03:00
dev_err ( sdev - > dev , " error: firmware size must be greater than firmware offset \n " ) ;
return - EINVAL ;
}
2022-10-20 15:12:23 +03:00
stripped_firmware . data = sdev - > basefw . fw - > data + sdev - > basefw . payload_offset ;
stripped_firmware . size = sdev - > basefw . fw - > size - sdev - > basefw . payload_offset ;
2019-04-12 19:08:51 +03:00
/* init for booting wait */
init_waitqueue_head ( & sdev - > boot_wait ) ;
/* prepare DMA for code loader stream */
2024-04-04 21:54:46 +03:00
hext_stream = hda_cl_prepare ( sdev - > dev , HDA_CL_STREAM_FORMAT ,
stripped_firmware . size ,
& dmab , SNDRV_PCM_STREAM_PLAYBACK , false ) ;
2022-02-09 09:31:04 +03:00
if ( IS_ERR ( hext_stream ) ) {
2020-08-26 21:45:32 +03:00
dev_err ( sdev - > dev , " error: dma prepare for fw loading failed \n " ) ;
2022-02-09 09:31:04 +03:00
return PTR_ERR ( hext_stream ) ;
2019-04-12 19:08:51 +03:00
}
2022-03-30 23:19:16 +03:00
memcpy ( dmab . area , stripped_firmware . data ,
2019-04-12 19:08:51 +03:00
stripped_firmware . size ) ;
/* try ROM init a few times before giving up */
for ( i = 0 ; i < HDA_FW_BOOT_ATTEMPTS ; i + + ) {
2020-08-26 02:50:34 +03:00
dev_dbg ( sdev - > dev ,
" Attempting iteration %d of Core En/ROM load... \n " , i ) ;
2020-09-17 13:56:33 +03:00
hda - > boot_iteration = i + 1 ;
2022-06-15 11:43:47 +03:00
if ( chip_info - > cl_init )
ret = chip_info - > cl_init ( sdev , hext_stream - > hstream . stream_tag , false ) ;
else
ret = - EINVAL ;
2019-04-12 19:08:51 +03:00
/* don't retry anymore if successful */
if ( ! ret )
break ;
2020-08-26 02:50:34 +03:00
}
2019-04-12 19:08:51 +03:00
2020-08-26 02:50:34 +03:00
if ( i = = HDA_FW_BOOT_ATTEMPTS ) {
dev_err ( sdev - > dev , " error: dsp init failed after %d attempts with err: %d \n " ,
2020-01-11 02:57:48 +03:00
i , ret ) ;
2019-04-12 19:08:51 +03:00
goto cleanup ;
}
2020-03-26 00:50:25 +03:00
/*
* When a SoundWire link is in clock stop state , a Slave
* device may trigger in - band wakes for events such as jack
* insertion or acoustic event detection . This event will lead
* to a WAKEEN interrupt , handled by the PCI device and routed
* to PME if the PCI device is in D3 . The resume function in
* audio PCI driver will be invoked by ACPI for PME event and
* initialize the device and process WAKEEN interrupt .
*
* The WAKEEN interrupt should be processed ASAP to prevent an
* interrupt flood , otherwise other interrupts , such IPC ,
* cannot work normally . The WAKEEN is handled after the ROM
* is initialized successfully , which ensures power rails are
* enabled before accessing the SoundWire SHIM registers
*/
if ( ! sdev - > first_boot )
hda_sdw_process_wakeen ( sdev ) ;
2019-04-12 19:08:51 +03:00
/*
2021-12-23 14:36:13 +03:00
* Set the boot_iteration to the last attempt , indicating that the
* DSP ROM has been initialized and from this point there will be no
* retry done to boot .
*
* Continue with code loading and firmware boot
2019-04-12 19:08:51 +03:00
*/
2021-12-23 14:36:13 +03:00
hda - > boot_iteration = HDA_FW_BOOT_ATTEMPTS ;
2022-04-14 21:48:16 +03:00
ret = hda_cl_copy_fw ( sdev , hext_stream ) ;
2022-07-12 15:09:35 +03:00
if ( ! ret ) {
2019-04-12 19:08:51 +03:00
dev_dbg ( sdev - > dev , " Firmware download successful, booting... \n " ) ;
2022-07-12 15:09:35 +03:00
hda - > skip_imr_boot = false ;
} else {
2021-12-23 14:36:14 +03:00
snd_sof_dsp_dbg_dump ( sdev , " Firmware download failed " ,
SOF_DBG_DUMP_PCI | SOF_DBG_DUMP_MBOX ) ;
2022-07-12 15:09:35 +03:00
hda - > skip_imr_boot = true ;
}
2019-04-12 19:08:51 +03:00
cleanup :
/*
* Perform codeloader stream cleanup .
* This should be done even if firmware loading fails .
2019-10-22 22:28:43 +03:00
* If the cleanup also fails , we return the initial error
2019-04-12 19:08:51 +03:00
*/
2024-04-04 21:54:46 +03:00
ret1 = hda_cl_cleanup ( sdev - > dev , & dmab , hext_stream ) ;
2019-04-12 19:08:51 +03:00
if ( ret1 < 0 ) {
dev_err ( sdev - > dev , " error: Code loader DSP cleanup failed \n " ) ;
/* set return value to indicate cleanup failure */
2019-10-22 22:28:43 +03:00
if ( ! ret )
ret = ret1 ;
2019-04-12 19:08:51 +03:00
}
/*
2020-09-10 19:41:25 +03:00
* return primary core id if both fw copy
2019-04-12 19:08:51 +03:00
* and stream clean up are successful
*/
if ( ! ret )
return chip_info - > init_core_mask ;
/* disable DSP */
2023-11-27 13:43:13 +03:00
hda_dsp_ctrl_ppcap_enable ( sdev , false ) ;
2019-04-12 19:08:51 +03:00
return ret ;
}
2024-05-03 16:52:17 +03:00
EXPORT_SYMBOL_NS ( hda_dsp_cl_boot_firmware , SND_SOC_SOF_INTEL_HDA_COMMON ) ;
2019-04-12 19:08:51 +03:00
2022-10-20 15:12:34 +03:00
int hda_dsp_ipc4_load_library ( struct snd_sof_dev * sdev ,
struct sof_ipc4_fw_library * fw_lib , bool reload )
{
struct sof_intel_hda_dev * hda = sdev - > pdata - > hw_pdata ;
2023-12-15 11:31:02 +03:00
struct sof_ipc4_fw_data * ipc4_data = sdev - > private ;
2022-10-20 15:12:34 +03:00
struct hdac_ext_stream * hext_stream ;
struct firmware stripped_firmware ;
struct sof_ipc4_msg msg = { } ;
struct snd_dma_buffer dmab ;
int ret , ret1 ;
2023-12-15 11:31:02 +03:00
/* if IMR booting is enabled and fw context is saved for D3 state, skip the loading */
if ( reload & & hda - > booted_from_imr & & ipc4_data - > fw_context_save )
2022-10-20 15:12:34 +03:00
return 0 ;
/* the fw_lib has been verified during loading, we can trust the validity here */
stripped_firmware . data = fw_lib - > sof_fw . fw - > data + fw_lib - > sof_fw . payload_offset ;
stripped_firmware . size = fw_lib - > sof_fw . fw - > size - fw_lib - > sof_fw . payload_offset ;
/* prepare DMA for code loader stream */
2024-04-04 21:54:46 +03:00
hext_stream = hda_cl_prepare ( sdev - > dev , HDA_CL_STREAM_FORMAT ,
stripped_firmware . size ,
& dmab , SNDRV_PCM_STREAM_PLAYBACK , false ) ;
2022-10-20 15:12:34 +03:00
if ( IS_ERR ( hext_stream ) ) {
dev_err ( sdev - > dev , " %s: DMA prepare failed \n " , __func__ ) ;
return PTR_ERR ( hext_stream ) ;
}
memcpy ( dmab . area , stripped_firmware . data , stripped_firmware . size ) ;
2023-09-15 14:40:18 +03:00
/*
* 1 st stage : SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE
* Message includes the dma_id to be prepared for the library loading .
* If the firmware does not have support for the message , we will
* receive - EOPNOTSUPP . In this case we will use single step library
* loading and proceed to send the LOAD_LIBRARY message .
*/
2022-10-20 15:12:34 +03:00
msg . primary = hext_stream - > hstream . stream_tag - 1 ;
2023-09-15 14:40:18 +03:00
msg . primary | = SOF_IPC4_MSG_TYPE_SET ( SOF_IPC4_GLB_LOAD_LIBRARY_PREPARE ) ;
2022-10-20 15:12:34 +03:00
msg . primary | = SOF_IPC4_MSG_DIR ( SOF_IPC4_MSG_REQUEST ) ;
msg . primary | = SOF_IPC4_MSG_TARGET ( SOF_IPC4_FW_GEN_MSG ) ;
2023-09-15 14:40:18 +03:00
ret = sof_ipc_tx_message_no_reply ( sdev - > ipc , & msg , 0 ) ;
if ( ! ret ) {
int sd_offset = SOF_STREAM_SD_OFFSET ( & hext_stream - > hstream ) ;
unsigned int status ;
/*
* Make sure that the FIFOS value is not 0 in SDxFIFOS register
* which indicates that the firmware set the GEN bit and we can
* continue to start the DMA
*/
ret = snd_sof_dsp_read_poll_timeout ( sdev , HDA_DSP_HDA_BAR ,
sd_offset + SOF_HDA_ADSP_REG_SD_FIFOSIZE ,
status ,
status & SOF_HDA_SD_FIFOSIZE_FIFOS_MASK ,
HDA_DSP_REG_POLL_INTERVAL_US ,
HDA_DSP_BASEFW_TIMEOUT_US ) ;
if ( ret < 0 )
dev_warn ( sdev - > dev ,
" %s: timeout waiting for FIFOS \n " , __func__ ) ;
} else if ( ret ! = - EOPNOTSUPP ) {
goto cleanup ;
}
2022-10-20 15:12:34 +03:00
2024-04-04 21:54:46 +03:00
ret = hda_cl_trigger ( sdev - > dev , hext_stream , SNDRV_PCM_TRIGGER_START ) ;
2022-10-20 15:12:34 +03:00
if ( ret < 0 ) {
dev_err ( sdev - > dev , " %s: DMA trigger start failed \n " , __func__ ) ;
goto cleanup ;
}
2023-09-15 14:40:18 +03:00
/*
* 2 nd stage : LOAD_LIBRARY
* Message includes the dma_id and the lib_id , the dma_id must be
* identical to the one sent via LOAD_LIBRARY_PREPARE
*/
msg . primary & = ~ SOF_IPC4_MSG_TYPE_MASK ;
msg . primary | = SOF_IPC4_MSG_TYPE_SET ( SOF_IPC4_GLB_LOAD_LIBRARY ) ;
msg . primary | = SOF_IPC4_GLB_LOAD_LIBRARY_LIB_ID ( fw_lib - > id ) ;
2023-04-19 22:40:57 +03:00
ret = sof_ipc_tx_message_no_reply ( sdev - > ipc , & msg , 0 ) ;
2022-10-20 15:12:34 +03:00
2023-09-15 14:40:18 +03:00
/* Stop the DMA channel */
2024-04-04 21:54:46 +03:00
ret1 = hda_cl_trigger ( sdev - > dev , hext_stream , SNDRV_PCM_TRIGGER_STOP ) ;
2022-10-20 15:12:34 +03:00
if ( ret1 < 0 ) {
dev_err ( sdev - > dev , " %s: DMA trigger stop failed \n " , __func__ ) ;
if ( ! ret )
ret = ret1 ;
}
cleanup :
/* clean up even in case of error and return the first error */
2024-04-04 21:54:46 +03:00
ret1 = hda_cl_cleanup ( sdev - > dev , & dmab , hext_stream ) ;
2022-10-20 15:12:34 +03:00
if ( ret1 < 0 ) {
dev_err ( sdev - > dev , " %s: Code loader DSP cleanup failed \n " , __func__ ) ;
/* set return value to indicate cleanup failure */
if ( ! ret )
ret = ret1 ;
}
return ret ;
}
2024-05-03 16:52:17 +03:00
EXPORT_SYMBOL_NS ( hda_dsp_ipc4_load_library , SND_SOC_SOF_INTEL_HDA_COMMON ) ;
2022-10-20 15:12:34 +03:00
2020-11-27 19:40:19 +03:00
int hda_dsp_ext_man_get_cavs_config_data ( struct snd_sof_dev * sdev ,
const struct sof_ext_man_elem_header * hdr )
{
const struct sof_ext_man_cavs_config_data * config_data =
container_of ( hdr , struct sof_ext_man_cavs_config_data , hdr ) ;
struct sof_intel_hda_dev * hda = sdev - > pdata - > hw_pdata ;
int i , elem_num ;
/* calculate total number of config data elements */
elem_num = ( hdr - > size - sizeof ( struct sof_ext_man_elem_header ) )
/ sizeof ( struct sof_config_elem ) ;
if ( elem_num < = 0 ) {
dev_err ( sdev - > dev , " cavs config data is inconsistent: %d \n " , elem_num ) ;
return - EINVAL ;
}
for ( i = 0 ; i < elem_num ; i + + )
switch ( config_data - > elems [ i ] . token ) {
case SOF_EXT_MAN_CAVS_CONFIG_EMPTY :
/* skip empty token */
break ;
case SOF_EXT_MAN_CAVS_CONFIG_CAVS_LPRO :
hda - > clk_config_lpro = config_data - > elems [ i ] . value ;
dev_dbg ( sdev - > dev , " FW clock config: %s \n " ,
hda - > clk_config_lpro ? " LPRO " : " HPRO " ) ;
break ;
2020-11-27 19:40:20 +03:00
case SOF_EXT_MAN_CAVS_CONFIG_OUTBOX_SIZE :
case SOF_EXT_MAN_CAVS_CONFIG_INBOX_SIZE :
/* These elements are defined but not being used yet. No warn is required */
break ;
2020-11-27 19:40:19 +03:00
default :
2020-11-27 19:40:20 +03:00
dev_info ( sdev - > dev , " unsupported token type: %d \n " ,
2020-11-27 19:40:19 +03:00
config_data - > elems [ i ] . token ) ;
}
return 0 ;
}
2024-05-03 16:52:21 +03:00
EXPORT_SYMBOL_NS ( hda_dsp_ext_man_get_cavs_config_data , SND_SOC_SOF_INTEL_HDA_COMMON ) ;