2013-06-12 20:52:10 +03:00
/*
* Copyright ( c ) 2005 - 2011 Atheros Communications Inc .
* Copyright ( c ) 2011 - 2013 Qualcomm Atheros , Inc .
*
* Permission to use , copy , modify , and / or distribute this software for any
* purpose with or without fee is hereby granted , provided that the above
* copyright notice and this permission notice appear in all copies .
*
* THE SOFTWARE IS PROVIDED " AS IS " AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS . IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL , DIRECT , INDIRECT , OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE , DATA OR PROFITS , WHETHER IN AN
* ACTION OF CONTRACT , NEGLIGENCE OR OTHER TORTIOUS ACTION , ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE .
*/
# include <linux/pci.h>
# include <linux/module.h>
# include <linux/interrupt.h>
# include <linux/spinlock.h>
2013-11-20 10:00:49 +02:00
# include <linux/bitops.h>
2013-06-12 20:52:10 +03:00
# include "core.h"
# include "debug.h"
# include "targaddrs.h"
# include "bmi.h"
# include "hif.h"
# include "htc.h"
# include "ce.h"
# include "pci.h"
2013-11-25 14:06:27 +01:00
enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_AUTO = 0 ,
ATH10K_PCI_IRQ_LEGACY = 1 ,
ATH10K_PCI_IRQ_MSI = 2 ,
} ;
2014-03-28 09:32:27 +02:00
enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_AUTO = 0 ,
ATH10K_PCI_RESET_WARM_ONLY = 1 ,
} ;
2013-11-25 14:06:27 +01:00
static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO ;
2014-03-28 09:32:27 +02:00
static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO ;
2013-11-25 14:06:27 +01:00
module_param_named ( irq_mode , ath10k_pci_irq_mode , uint , 0644 ) ;
MODULE_PARM_DESC ( irq_mode , " 0: auto, 1: legacy, 2: msi (default: 0) " ) ;
2014-03-28 09:32:27 +02:00
module_param_named ( reset_mode , ath10k_pci_reset_mode , uint , 0644 ) ;
MODULE_PARM_DESC ( reset_mode , " 0: auto, 1: warm only (default: 0) " ) ;
2014-03-28 09:32:21 +02:00
/* how long wait to wait for target to initialise, in ms */
# define ATH10K_PCI_TARGET_WAIT 3000
2014-05-14 16:56:16 +03:00
# define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
2014-03-28 09:32:21 +02:00
2013-06-12 20:52:10 +03:00
# define QCA988X_2_0_DEVICE_ID (0x003c)
2015-01-24 12:14:49 +02:00
# define QCA6174_2_1_DEVICE_ID (0x003e)
2015-06-18 12:31:03 +05:30
# define QCA99X0_2_0_DEVICE_ID (0x0040)
2013-06-12 20:52:10 +03:00
2014-08-08 15:56:03 +02:00
static const struct pci_device_id ath10k_pci_id_table [ ] = {
2013-06-12 20:52:10 +03:00
{ PCI_VDEVICE ( ATHEROS , QCA988X_2_0_DEVICE_ID ) } , /* PCI-E QCA988X V2 */
2015-01-24 12:14:49 +02:00
{ PCI_VDEVICE ( ATHEROS , QCA6174_2_1_DEVICE_ID ) } , /* PCI-E QCA6174 V2.1 */
2013-06-12 20:52:10 +03:00
{ 0 }
} ;
2014-12-02 10:55:54 +02:00
static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips [ ] = {
/* QCA988X pre 2.0 chips are not supported because they need some nasty
* hacks . ath10k doesn ' t have them and these devices crash horribly
* because of that .
*/
{ QCA988X_2_0_DEVICE_ID , QCA988X_HW_2_0_CHIP_ID_REV } ,
2015-01-24 12:14:49 +02:00
{ QCA6174_2_1_DEVICE_ID , QCA6174_HW_2_1_CHIP_ID_REV } ,
{ QCA6174_2_1_DEVICE_ID , QCA6174_HW_2_2_CHIP_ID_REV } ,
{ QCA6174_2_1_DEVICE_ID , QCA6174_HW_3_0_CHIP_ID_REV } ,
{ QCA6174_2_1_DEVICE_ID , QCA6174_HW_3_1_CHIP_ID_REV } ,
{ QCA6174_2_1_DEVICE_ID , QCA6174_HW_3_2_CHIP_ID_REV } ,
2014-12-02 10:55:54 +02:00
} ;
2014-08-22 14:33:14 +02:00
static void ath10k_pci_buffer_cleanup ( struct ath10k * ar ) ;
2014-02-10 17:14:22 +01:00
static int ath10k_pci_cold_reset ( struct ath10k * ar ) ;
2015-06-18 12:31:06 +05:30
static int ath10k_pci_safe_chip_reset ( struct ath10k * ar ) ;
2013-11-08 08:01:26 +01:00
static int ath10k_pci_wait_for_target_init ( struct ath10k * ar ) ;
2013-11-25 14:06:21 +01:00
static int ath10k_pci_init_irq ( struct ath10k * ar ) ;
static int ath10k_pci_deinit_irq ( struct ath10k * ar ) ;
static int ath10k_pci_request_irq ( struct ath10k * ar ) ;
static void ath10k_pci_free_irq ( struct ath10k * ar ) ;
2013-11-25 14:06:22 +01:00
static int ath10k_pci_bmi_wait ( struct ath10k_ce_pipe * tx_pipe ,
struct ath10k_ce_pipe * rx_pipe ,
struct bmi_xfer * xfer ) ;
2015-06-18 12:31:06 +05:30
static int ath10k_pci_qca99x0_chip_reset ( struct ath10k * ar ) ;
2013-06-12 20:52:10 +03:00
static const struct ce_attr host_ce_config_wlan [ ] = {
2013-09-01 10:01:32 +03:00
/* CE0: host->target HTC control and raw streams */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 16 ,
. src_sz_max = 256 ,
. dest_nentries = 0 ,
} ,
/* CE1: target->host HTT + HTC control */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
2015-02-09 15:04:55 +01:00
. src_sz_max = 2048 ,
2013-09-01 10:01:32 +03:00
. dest_nentries = 512 ,
} ,
/* CE2: target->host WMI */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 2048 ,
2015-03-04 15:43:44 +02:00
. dest_nentries = 128 ,
2013-09-01 10:01:32 +03:00
} ,
/* CE3: host->target WMI */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 32 ,
. src_sz_max = 2048 ,
. dest_nentries = 0 ,
} ,
/* CE4: host->target HTT */
{
. flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR ,
. src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES ,
. src_sz_max = 256 ,
. dest_nentries = 0 ,
} ,
/* CE5: unused */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 0 ,
. dest_nentries = 0 ,
} ,
/* CE6: target autonomous hif_memcpy */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 0 ,
. dest_nentries = 0 ,
} ,
/* CE7: ce_diag, the Diagnostic Window */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 2 ,
. src_sz_max = DIAG_TRANSFER_LIMIT ,
. dest_nentries = 2 ,
} ,
2015-06-18 12:31:04 +05:30
/* CE8: target->host pktlog */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 2048 ,
. dest_nentries = 128 ,
} ,
/* CE9 target autonomous qcache memcpy */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 0 ,
. dest_nentries = 0 ,
} ,
/* CE10: target autonomous hif memcpy */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 0 ,
. dest_nentries = 0 ,
} ,
/* CE11: target autonomous hif memcpy */
{
. flags = CE_ATTR_FLAGS ,
. src_nentries = 0 ,
. src_sz_max = 0 ,
. dest_nentries = 0 ,
} ,
2013-06-12 20:52:10 +03:00
} ;
/* Target firmware's Copy Engine configuration. */
static const struct ce_pipe_config target_ce_config_wlan [ ] = {
2013-09-01 10:01:39 +03:00
/* CE0: host->target HTC control and raw streams */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 0 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_OUT ) ,
. nentries = __cpu_to_le32 ( 32 ) ,
. nbytes_max = __cpu_to_le32 ( 256 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
/* CE1: target->host HTT + HTC control */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 1 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_IN ) ,
. nentries = __cpu_to_le32 ( 32 ) ,
2015-02-09 15:04:55 +01:00
. nbytes_max = __cpu_to_le32 ( 2048 ) ,
2014-08-26 19:14:03 +03:00
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
/* CE2: target->host WMI */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 2 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_IN ) ,
2015-03-04 15:43:44 +02:00
. nentries = __cpu_to_le32 ( 64 ) ,
2014-08-26 19:14:03 +03:00
. nbytes_max = __cpu_to_le32 ( 2048 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
/* CE3: host->target WMI */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 3 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_OUT ) ,
. nentries = __cpu_to_le32 ( 32 ) ,
. nbytes_max = __cpu_to_le32 ( 2048 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
/* CE4: host->target HTT */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 4 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_OUT ) ,
. nentries = __cpu_to_le32 ( 256 ) ,
. nbytes_max = __cpu_to_le32 ( 256 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
2013-06-12 20:52:10 +03:00
/* NB: 50% of src nentries, since tx has 2 frags */
2013-09-01 10:01:39 +03:00
/* CE5: unused */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 5 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_OUT ) ,
. nentries = __cpu_to_le32 ( 32 ) ,
. nbytes_max = __cpu_to_le32 ( 2048 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
/* CE6: Reserved for target autonomous hif_memcpy */
{
2014-08-26 19:14:03 +03:00
. pipenum = __cpu_to_le32 ( 6 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_INOUT ) ,
. nentries = __cpu_to_le32 ( 32 ) ,
. nbytes_max = __cpu_to_le32 ( 4096 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
2013-09-01 10:01:39 +03:00
} ,
2013-06-12 20:52:10 +03:00
/* CE7 used only by Host */
2015-06-18 12:31:04 +05:30
{
. pipenum = __cpu_to_le32 ( 7 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_INOUT ) ,
. nentries = __cpu_to_le32 ( 0 ) ,
. nbytes_max = __cpu_to_le32 ( 0 ) ,
. flags = __cpu_to_le32 ( 0 ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
} ,
/* CE8 target->host packtlog */
{
. pipenum = __cpu_to_le32 ( 8 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_IN ) ,
. nentries = __cpu_to_le32 ( 64 ) ,
. nbytes_max = __cpu_to_le32 ( 2048 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS | CE_ATTR_DIS_INTR ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
} ,
/* CE9 target autonomous qcache memcpy */
{
. pipenum = __cpu_to_le32 ( 9 ) ,
. pipedir = __cpu_to_le32 ( PIPEDIR_INOUT ) ,
. nentries = __cpu_to_le32 ( 32 ) ,
. nbytes_max = __cpu_to_le32 ( 2048 ) ,
. flags = __cpu_to_le32 ( CE_ATTR_FLAGS | CE_ATTR_DIS_INTR ) ,
. reserved = __cpu_to_le32 ( 0 ) ,
} ,
/* It not necessary to send target wlan configuration for CE10 & CE11
* as these CEs are not actively used in target .
*/
2013-06-12 20:52:10 +03:00
} ;
2014-08-26 19:14:02 +03:00
/*
* Map from service / endpoint to Copy Engine .
* This table is derived from the CE_PCI TABLE , above .
* It is passed to the Target at startup for use by firmware .
*/
static const struct service_to_pipe target_service_to_ce_map_wlan [ ] = {
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_VO ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 3 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_VO ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 2 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_BK ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 3 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_BK ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 2 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_BE ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 3 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_BE ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 2 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_VI ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 3 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_DATA_VI ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 2 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_CONTROL ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 3 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_WMI_CONTROL ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 2 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_RSVD_CTRL ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 0 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_RSVD_CTRL ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 1 ) ,
2014-08-26 19:14:02 +03:00
} ,
2014-08-26 19:14:03 +03:00
{ /* not used */
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 0 ) ,
2014-08-26 19:14:02 +03:00
} ,
2014-08-26 19:14:03 +03:00
{ /* not used */
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 1 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_HTT_DATA_MSG ) ,
__cpu_to_le32 ( PIPEDIR_OUT ) , /* out = UL = host -> target */
__cpu_to_le32 ( 4 ) ,
2014-08-26 19:14:02 +03:00
} ,
{
2014-08-26 19:14:03 +03:00
__cpu_to_le32 ( ATH10K_HTC_SVC_ID_HTT_DATA_MSG ) ,
__cpu_to_le32 ( PIPEDIR_IN ) , /* in = DL = target -> host */
__cpu_to_le32 ( 1 ) ,
2014-08-26 19:14:02 +03:00
} ,
/* (Additions here) */
2014-08-26 19:14:03 +03:00
{ /* must be last */
__cpu_to_le32 ( 0 ) ,
__cpu_to_le32 ( 0 ) ,
__cpu_to_le32 ( 0 ) ,
2014-08-26 19:14:02 +03:00
} ,
} ;
2015-05-18 09:38:18 +00:00
static bool ath10k_pci_is_awake ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
u32 val = ioread32 ( ar_pci - > mem + PCIE_LOCAL_BASE_ADDRESS +
RTC_STATE_ADDRESS ) ;
return RTC_STATE_V_GET ( val ) = = RTC_STATE_V_ON ;
}
static void __ath10k_pci_wake ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
lockdep_assert_held ( & ar_pci - > ps_lock ) ;
ath10k_dbg ( ar , ATH10K_DBG_PCI_PS , " pci ps wake reg refcount %lu awake %d \n " ,
ar_pci - > ps_wake_refcount , ar_pci - > ps_awake ) ;
iowrite32 ( PCIE_SOC_WAKE_V_MASK ,
ar_pci - > mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS ) ;
}
static void __ath10k_pci_sleep ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
lockdep_assert_held ( & ar_pci - > ps_lock ) ;
ath10k_dbg ( ar , ATH10K_DBG_PCI_PS , " pci ps sleep reg refcount %lu awake %d \n " ,
ar_pci - > ps_wake_refcount , ar_pci - > ps_awake ) ;
iowrite32 ( PCIE_SOC_WAKE_RESET ,
ar_pci - > mem + PCIE_LOCAL_BASE_ADDRESS +
PCIE_SOC_WAKE_ADDRESS ) ;
ar_pci - > ps_awake = false ;
}
static int ath10k_pci_wake_wait ( struct ath10k * ar )
{
int tot_delay = 0 ;
int curr_delay = 5 ;
while ( tot_delay < PCIE_WAKE_TIMEOUT ) {
if ( ath10k_pci_is_awake ( ar ) )
return 0 ;
udelay ( curr_delay ) ;
tot_delay + = curr_delay ;
if ( curr_delay < 50 )
curr_delay + = 5 ;
}
return - ETIMEDOUT ;
}
static int ath10k_pci_wake ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
unsigned long flags ;
int ret = 0 ;
spin_lock_irqsave ( & ar_pci - > ps_lock , flags ) ;
ath10k_dbg ( ar , ATH10K_DBG_PCI_PS , " pci ps wake refcount %lu awake %d \n " ,
ar_pci - > ps_wake_refcount , ar_pci - > ps_awake ) ;
/* This function can be called very frequently. To avoid excessive
* CPU stalls for MMIO reads use a cache var to hold the device state .
*/
if ( ! ar_pci - > ps_awake ) {
__ath10k_pci_wake ( ar ) ;
ret = ath10k_pci_wake_wait ( ar ) ;
if ( ret = = 0 )
ar_pci - > ps_awake = true ;
}
if ( ret = = 0 ) {
ar_pci - > ps_wake_refcount + + ;
WARN_ON ( ar_pci - > ps_wake_refcount = = 0 ) ;
}
spin_unlock_irqrestore ( & ar_pci - > ps_lock , flags ) ;
return ret ;
}
static void ath10k_pci_sleep ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
unsigned long flags ;
spin_lock_irqsave ( & ar_pci - > ps_lock , flags ) ;
ath10k_dbg ( ar , ATH10K_DBG_PCI_PS , " pci ps sleep refcount %lu awake %d \n " ,
ar_pci - > ps_wake_refcount , ar_pci - > ps_awake ) ;
if ( WARN_ON ( ar_pci - > ps_wake_refcount = = 0 ) )
goto skip ;
ar_pci - > ps_wake_refcount - - ;
mod_timer ( & ar_pci - > ps_timer , jiffies +
msecs_to_jiffies ( ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC ) ) ;
skip :
spin_unlock_irqrestore ( & ar_pci - > ps_lock , flags ) ;
}
static void ath10k_pci_ps_timer ( unsigned long ptr )
{
struct ath10k * ar = ( void * ) ptr ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
unsigned long flags ;
spin_lock_irqsave ( & ar_pci - > ps_lock , flags ) ;
ath10k_dbg ( ar , ATH10K_DBG_PCI_PS , " pci ps timer refcount %lu awake %d \n " ,
ar_pci - > ps_wake_refcount , ar_pci - > ps_awake ) ;
if ( ar_pci - > ps_wake_refcount > 0 )
goto skip ;
__ath10k_pci_sleep ( ar ) ;
skip :
spin_unlock_irqrestore ( & ar_pci - > ps_lock , flags ) ;
}
static void ath10k_pci_sleep_sync ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
unsigned long flags ;
del_timer_sync ( & ar_pci - > ps_timer ) ;
spin_lock_irqsave ( & ar_pci - > ps_lock , flags ) ;
WARN_ON ( ar_pci - > ps_wake_refcount > 0 ) ;
__ath10k_pci_sleep ( ar ) ;
spin_unlock_irqrestore ( & ar_pci - > ps_lock , flags ) ;
}
void ath10k_pci_write32 ( struct ath10k * ar , u32 offset , u32 value )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ret ;
2015-06-15 14:46:42 +03:00
if ( unlikely ( offset + sizeof ( value ) > ar_pci - > mem_len ) ) {
ath10k_warn ( ar , " refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx) \n " ,
offset , offset + sizeof ( value ) , ar_pci - > mem_len ) ;
return ;
}
2015-05-18 09:38:18 +00:00
ret = ath10k_pci_wake ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to wake target for write32 of 0x%08x at 0x%08x: %d \n " ,
value , offset , ret ) ;
return ;
}
iowrite32 ( value , ar_pci - > mem + offset ) ;
ath10k_pci_sleep ( ar ) ;
}
u32 ath10k_pci_read32 ( struct ath10k * ar , u32 offset )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
u32 val ;
int ret ;
2015-06-15 14:46:42 +03:00
if ( unlikely ( offset + sizeof ( val ) > ar_pci - > mem_len ) ) {
ath10k_warn ( ar , " refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx) \n " ,
offset , offset + sizeof ( val ) , ar_pci - > mem_len ) ;
return 0 ;
}
2015-05-18 09:38:18 +00:00
ret = ath10k_pci_wake ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to wake target for read32 at 0x%08x: %d \n " ,
offset , ret ) ;
return 0xffffffff ;
}
val = ioread32 ( ar_pci - > mem + offset ) ;
ath10k_pci_sleep ( ar ) ;
return val ;
}
u32 ath10k_pci_soc_read32 ( struct ath10k * ar , u32 addr )
{
return ath10k_pci_read32 ( ar , RTC_SOC_BASE_ADDRESS + addr ) ;
}
void ath10k_pci_soc_write32 ( struct ath10k * ar , u32 addr , u32 val )
{
ath10k_pci_write32 ( ar , RTC_SOC_BASE_ADDRESS + addr , val ) ;
}
u32 ath10k_pci_reg_read32 ( struct ath10k * ar , u32 addr )
{
return ath10k_pci_read32 ( ar , PCIE_LOCAL_BASE_ADDRESS + addr ) ;
}
void ath10k_pci_reg_write32 ( struct ath10k * ar , u32 addr , u32 val )
{
ath10k_pci_write32 ( ar , PCIE_LOCAL_BASE_ADDRESS + addr , val ) ;
}
2013-11-25 14:06:20 +01:00
static bool ath10k_pci_irq_pending ( struct ath10k * ar )
{
u32 cause ;
/* Check if the shared legacy irq is for us */
cause = ath10k_pci_read32 ( ar , SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS ) ;
if ( cause & ( PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL ) )
return true ;
return false ;
}
2013-11-25 14:06:25 +01:00
static void ath10k_pci_disable_and_clear_legacy_irq ( struct ath10k * ar )
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0 , otherwise interrupt can not be
* really cleared . */
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS ,
0 ) ;
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS ,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL ) ;
/* IMPORTANT: this extra read transaction is required to
* flush the posted write buffer . */
2014-09-14 12:50:23 +03:00
( void ) ath10k_pci_read32 ( ar , SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS ) ;
2013-11-25 14:06:25 +01:00
}
static void ath10k_pci_enable_legacy_irq ( struct ath10k * ar )
{
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS ,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL ) ;
/* IMPORTANT: this extra read transaction is required to
* flush the posted write buffer . */
2014-09-14 12:50:23 +03:00
( void ) ath10k_pci_read32 ( ar , SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS ) ;
2013-11-25 14:06:25 +01:00
}
2014-08-22 14:23:31 +02:00
static inline const char * ath10k_pci_get_irq_method ( struct ath10k * ar )
2013-11-25 14:06:26 +01:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-08-22 14:23:31 +02:00
if ( ar_pci - > num_msi_intrs > 1 )
return " msi-x " ;
2014-09-14 12:50:33 +03:00
if ( ar_pci - > num_msi_intrs = = 1 )
2014-08-22 14:23:31 +02:00
return " msi " ;
2014-09-14 12:50:33 +03:00
return " legacy " ;
2013-11-25 14:06:26 +01:00
}
2014-08-22 14:33:14 +02:00
static int __ath10k_pci_rx_post_buf ( struct ath10k_pci_pipe * pipe )
2013-11-25 14:06:26 +01:00
{
2014-08-22 14:33:14 +02:00
struct ath10k * ar = pipe - > hif_ce_state ;
2013-11-25 14:06:26 +01:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-08-22 14:33:14 +02:00
struct ath10k_ce_pipe * ce_pipe = pipe - > ce_hdl ;
struct sk_buff * skb ;
dma_addr_t paddr ;
2013-11-25 14:06:26 +01:00
int ret ;
2014-08-22 14:33:14 +02:00
lockdep_assert_held ( & ar_pci - > ce_lock ) ;
skb = dev_alloc_skb ( pipe - > buf_sz ) ;
if ( ! skb )
return - ENOMEM ;
WARN_ONCE ( ( unsigned long ) skb - > data & 3 , " unaligned skb " ) ;
paddr = dma_map_single ( ar - > dev , skb - > data ,
skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
if ( unlikely ( dma_mapping_error ( ar - > dev , paddr ) ) ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to dma map pci rx buf \n " ) ;
2014-08-22 14:33:14 +02:00
dev_kfree_skb_any ( skb ) ;
return - EIO ;
}
2015-01-24 12:14:47 +02:00
ATH10K_SKB_RXCB ( skb ) - > paddr = paddr ;
2014-08-22 14:33:14 +02:00
ret = __ath10k_ce_rx_post_buf ( ce_pipe , skb , paddr ) ;
2013-11-25 14:06:26 +01:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to post pci rx buf: %d \n " , ret ) ;
2014-08-22 14:33:14 +02:00
dma_unmap_single ( ar - > dev , paddr , skb - > len + skb_tailroom ( skb ) ,
DMA_FROM_DEVICE ) ;
dev_kfree_skb_any ( skb ) ;
2013-11-25 14:06:26 +01:00
return ret ;
}
return 0 ;
}
2014-08-22 14:33:14 +02:00
static void __ath10k_pci_rx_post_pipe ( struct ath10k_pci_pipe * pipe )
2013-11-25 14:06:26 +01:00
{
2014-08-22 14:33:14 +02:00
struct ath10k * ar = pipe - > hif_ce_state ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
struct ath10k_ce_pipe * ce_pipe = pipe - > ce_hdl ;
int ret , num ;
lockdep_assert_held ( & ar_pci - > ce_lock ) ;
if ( pipe - > buf_sz = = 0 )
return ;
if ( ! ce_pipe - > dest_ring )
return ;
num = __ath10k_ce_rx_num_free_bufs ( ce_pipe ) ;
while ( num - - ) {
ret = __ath10k_pci_rx_post_buf ( pipe ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to post pci rx buf: %d \n " , ret ) ;
2014-08-22 14:33:14 +02:00
mod_timer ( & ar_pci - > rx_post_retry , jiffies +
ATH10K_PCI_RX_POST_RETRY_MS ) ;
break ;
}
}
}
static void ath10k_pci_rx_post_pipe ( struct ath10k_pci_pipe * pipe )
{
struct ath10k * ar = pipe - > hif_ce_state ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
spin_lock_bh ( & ar_pci - > ce_lock ) ;
__ath10k_pci_rx_post_pipe ( pipe ) ;
spin_unlock_bh ( & ar_pci - > ce_lock ) ;
}
static void ath10k_pci_rx_post ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int i ;
spin_lock_bh ( & ar_pci - > ce_lock ) ;
for ( i = 0 ; i < CE_COUNT ; i + + )
__ath10k_pci_rx_post_pipe ( & ar_pci - > pipe_info [ i ] ) ;
spin_unlock_bh ( & ar_pci - > ce_lock ) ;
}
static void ath10k_pci_rx_replenish_retry ( unsigned long ptr )
{
struct ath10k * ar = ( void * ) ptr ;
ath10k_pci_rx_post ( ar ) ;
2013-11-25 14:06:26 +01:00
}
2015-06-18 12:31:05 +05:30
static u32 ath10k_pci_targ_cpu_to_ce_addr ( struct ath10k * ar , u32 addr )
{
u32 val = 0 ;
switch ( ar - > hw_rev ) {
case ATH10K_HW_QCA988X :
case ATH10K_HW_QCA6174 :
val = ( ath10k_pci_read32 ( ar , SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS ) &
2015-07-03 19:25:27 +05:30
0x7ff ) < < 21 ;
2015-06-18 12:31:05 +05:30
break ;
case ATH10K_HW_QCA99X0 :
val = ath10k_pci_read32 ( ar , PCIE_BAR_REG_ADDRESS ) ;
break ;
}
val | = 0x100000 | ( addr & 0xfffff ) ;
return val ;
}
2013-06-12 20:52:10 +03:00
/*
* Diagnostic read / write access is provided for startup / config / debug usage .
* Caller must guarantee proper alignment , when applicable , and single user
* at any moment .
*/
static int ath10k_pci_diag_read_mem ( struct ath10k * ar , u32 address , void * data ,
int nbytes )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ret = 0 ;
u32 buf ;
unsigned int completed_nbytes , orig_nbytes , remaining_bytes ;
unsigned int id ;
unsigned int flags ;
2013-08-27 13:08:02 +02:00
struct ath10k_ce_pipe * ce_diag ;
2013-06-12 20:52:10 +03:00
/* Host buffer address in CE space */
u32 ce_data ;
dma_addr_t ce_data_base = 0 ;
void * data_buf = NULL ;
int i ;
2014-09-24 14:16:52 +03:00
spin_lock_bh ( & ar_pci - > ce_lock ) ;
2013-06-12 20:52:10 +03:00
ce_diag = ar_pci - > ce_diag ;
/*
* Allocate a temporary bounce buffer to hold caller ' s data
* to be DMA ' ed from Target . This guarantees
* 1 ) 4 - byte alignment
* 2 ) Buffer in DMA - able space
*/
orig_nbytes = nbytes ;
2014-03-28 10:02:35 +02:00
data_buf = ( unsigned char * ) dma_alloc_coherent ( ar - > dev ,
orig_nbytes ,
& ce_data_base ,
GFP_ATOMIC ) ;
2013-06-12 20:52:10 +03:00
if ( ! data_buf ) {
ret = - ENOMEM ;
goto done ;
}
memset ( data_buf , 0 , orig_nbytes ) ;
remaining_bytes = orig_nbytes ;
ce_data = ce_data_base ;
while ( remaining_bytes ) {
nbytes = min_t ( unsigned int , remaining_bytes ,
DIAG_TRANSFER_LIMIT ) ;
2014-09-24 14:16:52 +03:00
ret = __ath10k_ce_rx_post_buf ( ce_diag , NULL , ce_data ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 )
goto done ;
/* Request CE to send from Target(!) address to Host buffer */
/*
* The address supplied by the caller is in the
* Target CPU virtual address space .
*
* In order to use this address with the diagnostic CE ,
* convert it from Target CPU virtual address space
* to CE address space
*/
2015-06-18 12:31:05 +05:30
address = ath10k_pci_targ_cpu_to_ce_addr ( ar , address ) ;
2013-06-12 20:52:10 +03:00
2014-09-24 14:16:52 +03:00
ret = ath10k_ce_send_nolock ( ce_diag , NULL , ( u32 ) address , nbytes , 0 ,
0 ) ;
2013-06-12 20:52:10 +03:00
if ( ret )
goto done ;
i = 0 ;
2014-09-24 14:16:52 +03:00
while ( ath10k_ce_completed_send_next_nolock ( ce_diag , NULL , & buf ,
& completed_nbytes ,
& id ) ! = 0 ) {
2013-06-12 20:52:10 +03:00
mdelay ( 1 ) ;
if ( i + + > DIAG_ACCESS_CE_TIMEOUT_MS ) {
ret = - EBUSY ;
goto done ;
}
}
if ( nbytes ! = completed_nbytes ) {
ret = - EIO ;
goto done ;
}
2014-09-14 12:50:23 +03:00
if ( buf ! = ( u32 ) address ) {
2013-06-12 20:52:10 +03:00
ret = - EIO ;
goto done ;
}
i = 0 ;
2014-09-24 14:16:52 +03:00
while ( ath10k_ce_completed_recv_next_nolock ( ce_diag , NULL , & buf ,
& completed_nbytes ,
& id , & flags ) ! = 0 ) {
2013-06-12 20:52:10 +03:00
mdelay ( 1 ) ;
if ( i + + > DIAG_ACCESS_CE_TIMEOUT_MS ) {
ret = - EBUSY ;
goto done ;
}
}
if ( nbytes ! = completed_nbytes ) {
ret = - EIO ;
goto done ;
}
if ( buf ! = ce_data ) {
ret = - EIO ;
goto done ;
}
remaining_bytes - = nbytes ;
address + = nbytes ;
ce_data + = nbytes ;
}
done :
2014-08-26 19:14:03 +03:00
if ( ret = = 0 )
memcpy ( data , data_buf , orig_nbytes ) ;
else
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to read diag value at 0x%x: %d \n " ,
2014-03-28 09:32:52 +02:00
address , ret ) ;
2013-06-12 20:52:10 +03:00
if ( data_buf )
2014-03-28 10:02:35 +02:00
dma_free_coherent ( ar - > dev , orig_nbytes , data_buf ,
ce_data_base ) ;
2013-06-12 20:52:10 +03:00
2014-09-24 14:16:52 +03:00
spin_unlock_bh ( & ar_pci - > ce_lock ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
2014-08-25 08:37:26 +03:00
static int ath10k_pci_diag_read32 ( struct ath10k * ar , u32 address , u32 * value )
{
2014-08-26 19:14:03 +03:00
__le32 val = 0 ;
int ret ;
ret = ath10k_pci_diag_read_mem ( ar , address , & val , sizeof ( val ) ) ;
* value = __le32_to_cpu ( val ) ;
return ret ;
2014-08-25 08:37:26 +03:00
}
static int __ath10k_pci_diag_read_hi ( struct ath10k * ar , void * dest ,
u32 src , u32 len )
{
u32 host_addr , addr ;
int ret ;
host_addr = host_interest_item_address ( src ) ;
ret = ath10k_pci_diag_read32 ( ar , host_addr , & addr ) ;
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to get memcpy hi address for firmware address %d: %d \n " ,
2014-08-25 08:37:26 +03:00
src , ret ) ;
return ret ;
}
ret = ath10k_pci_diag_read_mem ( ar , addr , dest , len ) ;
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to memcpy firmware memory from %d (%d B): %d \n " ,
2014-08-25 08:37:26 +03:00
addr , len , ret ) ;
return ret ;
}
return 0 ;
}
# define ath10k_pci_diag_read_hi(ar, dest, src, len) \
2014-09-14 12:50:39 +03:00
__ath10k_pci_diag_read_hi ( ar , dest , HI_ITEM ( src ) , len )
2014-08-25 08:37:26 +03:00
2013-06-12 20:52:10 +03:00
static int ath10k_pci_diag_write_mem ( struct ath10k * ar , u32 address ,
const void * data , int nbytes )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ret = 0 ;
u32 buf ;
unsigned int completed_nbytes , orig_nbytes , remaining_bytes ;
unsigned int id ;
unsigned int flags ;
2013-08-27 13:08:02 +02:00
struct ath10k_ce_pipe * ce_diag ;
2013-06-12 20:52:10 +03:00
void * data_buf = NULL ;
u32 ce_data ; /* Host buffer address in CE space */
dma_addr_t ce_data_base = 0 ;
int i ;
2014-09-24 14:16:52 +03:00
spin_lock_bh ( & ar_pci - > ce_lock ) ;
2013-06-12 20:52:10 +03:00
ce_diag = ar_pci - > ce_diag ;
/*
* Allocate a temporary bounce buffer to hold caller ' s data
* to be DMA ' ed to Target . This guarantees
* 1 ) 4 - byte alignment
* 2 ) Buffer in DMA - able space
*/
orig_nbytes = nbytes ;
2014-03-28 10:02:35 +02:00
data_buf = ( unsigned char * ) dma_alloc_coherent ( ar - > dev ,
orig_nbytes ,
& ce_data_base ,
GFP_ATOMIC ) ;
2013-06-12 20:52:10 +03:00
if ( ! data_buf ) {
ret = - ENOMEM ;
goto done ;
}
/* Copy caller's data to allocated DMA buf */
2014-08-26 19:14:03 +03:00
memcpy ( data_buf , data , orig_nbytes ) ;
2013-06-12 20:52:10 +03:00
/*
* The address supplied by the caller is in the
* Target CPU virtual address space .
*
* In order to use this address with the diagnostic CE ,
* convert it from
* Target CPU virtual address space
* to
* CE address space
*/
2015-06-18 12:31:05 +05:30
address = ath10k_pci_targ_cpu_to_ce_addr ( ar , address ) ;
2013-06-12 20:52:10 +03:00
remaining_bytes = orig_nbytes ;
ce_data = ce_data_base ;
while ( remaining_bytes ) {
/* FIXME: check cast */
nbytes = min_t ( int , remaining_bytes , DIAG_TRANSFER_LIMIT ) ;
/* Set up to receive directly into Target(!) address */
2014-09-24 14:16:52 +03:00
ret = __ath10k_ce_rx_post_buf ( ce_diag , NULL , address ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 )
goto done ;
/*
* Request CE to send caller - supplied data that
* was copied to bounce buffer to Target ( ! ) address .
*/
2014-09-24 14:16:52 +03:00
ret = ath10k_ce_send_nolock ( ce_diag , NULL , ( u32 ) ce_data ,
nbytes , 0 , 0 ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 )
goto done ;
i = 0 ;
2014-09-24 14:16:52 +03:00
while ( ath10k_ce_completed_send_next_nolock ( ce_diag , NULL , & buf ,
& completed_nbytes ,
& id ) ! = 0 ) {
2013-06-12 20:52:10 +03:00
mdelay ( 1 ) ;
if ( i + + > DIAG_ACCESS_CE_TIMEOUT_MS ) {
ret = - EBUSY ;
goto done ;
}
}
if ( nbytes ! = completed_nbytes ) {
ret = - EIO ;
goto done ;
}
if ( buf ! = ce_data ) {
ret = - EIO ;
goto done ;
}
i = 0 ;
2014-09-24 14:16:52 +03:00
while ( ath10k_ce_completed_recv_next_nolock ( ce_diag , NULL , & buf ,
& completed_nbytes ,
& id , & flags ) ! = 0 ) {
2013-06-12 20:52:10 +03:00
mdelay ( 1 ) ;
if ( i + + > DIAG_ACCESS_CE_TIMEOUT_MS ) {
ret = - EBUSY ;
goto done ;
}
}
if ( nbytes ! = completed_nbytes ) {
ret = - EIO ;
goto done ;
}
if ( buf ! = address ) {
ret = - EIO ;
goto done ;
}
remaining_bytes - = nbytes ;
address + = nbytes ;
ce_data + = nbytes ;
}
done :
if ( data_buf ) {
2014-03-28 10:02:35 +02:00
dma_free_coherent ( ar - > dev , orig_nbytes , data_buf ,
ce_data_base ) ;
2013-06-12 20:52:10 +03:00
}
if ( ret ! = 0 )
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to write diag value at 0x%x: %d \n " ,
2014-03-28 09:32:52 +02:00
address , ret ) ;
2013-06-12 20:52:10 +03:00
2014-09-24 14:16:52 +03:00
spin_unlock_bh ( & ar_pci - > ce_lock ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
2014-08-26 19:14:03 +03:00
static int ath10k_pci_diag_write32 ( struct ath10k * ar , u32 address , u32 value )
{
__le32 val = __cpu_to_le32 ( value ) ;
return ath10k_pci_diag_write_mem ( ar , address , & val , sizeof ( val ) ) ;
}
2013-06-12 20:52:10 +03:00
/* Called by lower (CE) layer when a send to Target completes. */
2013-09-03 15:09:58 +02:00
static void ath10k_pci_ce_send_done ( struct ath10k_ce_pipe * ce_state )
2013-06-12 20:52:10 +03:00
{
struct ath10k * ar = ce_state - > ar ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-02-27 18:50:05 +02:00
struct ath10k_hif_cb * cb = & ar_pci - > msg_callbacks_current ;
2014-11-27 11:09:38 +01:00
struct sk_buff_head list ;
struct sk_buff * skb ;
2013-09-03 15:09:58 +02:00
u32 ce_data ;
unsigned int nbytes ;
unsigned int transfer_id ;
2013-06-12 20:52:10 +03:00
2014-11-27 11:09:38 +01:00
__skb_queue_head_init ( & list ) ;
while ( ath10k_ce_completed_send_next ( ce_state , ( void * * ) & skb , & ce_data ,
& nbytes , & transfer_id ) = = 0 ) {
2014-02-27 18:50:04 +02:00
/* no need to call tx completion for NULL pointers */
2014-11-27 11:09:38 +01:00
if ( skb = = NULL )
2014-02-27 18:50:04 +02:00
continue ;
2014-11-27 11:09:38 +01:00
__skb_queue_tail ( & list , skb ) ;
2013-09-03 15:09:58 +02:00
}
2014-11-27 11:09:38 +01:00
while ( ( skb = __skb_dequeue ( & list ) ) )
cb - > tx_completion ( ar , skb ) ;
2013-06-12 20:52:10 +03:00
}
/* Called by lower (CE) layer when data is received from the Target. */
2013-09-03 15:09:58 +02:00
static void ath10k_pci_ce_recv_data ( struct ath10k_ce_pipe * ce_state )
2013-06-12 20:52:10 +03:00
{
struct ath10k * ar = ce_state - > ar ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-08-27 13:08:01 +02:00
struct ath10k_pci_pipe * pipe_info = & ar_pci - > pipe_info [ ce_state - > id ] ;
2014-02-27 18:50:05 +02:00
struct ath10k_hif_cb * cb = & ar_pci - > msg_callbacks_current ;
2013-06-12 20:52:10 +03:00
struct sk_buff * skb ;
2014-11-27 11:09:38 +01:00
struct sk_buff_head list ;
2013-09-03 15:09:58 +02:00
void * transfer_context ;
u32 ce_data ;
2014-02-27 18:50:05 +02:00
unsigned int nbytes , max_nbytes ;
2013-09-03 15:09:58 +02:00
unsigned int transfer_id ;
unsigned int flags ;
2013-06-12 20:52:10 +03:00
2014-11-27 11:09:38 +01:00
__skb_queue_head_init ( & list ) ;
2013-09-03 15:09:58 +02:00
while ( ath10k_ce_completed_recv_next ( ce_state , & transfer_context ,
& ce_data , & nbytes , & transfer_id ,
& flags ) = = 0 ) {
2013-06-12 20:52:10 +03:00
skb = transfer_context ;
2014-02-27 18:50:05 +02:00
max_nbytes = skb - > len + skb_tailroom ( skb ) ;
2015-01-24 12:14:47 +02:00
dma_unmap_single ( ar - > dev , ATH10K_SKB_RXCB ( skb ) - > paddr ,
2014-02-27 18:50:05 +02:00
max_nbytes , DMA_FROM_DEVICE ) ;
if ( unlikely ( max_nbytes < nbytes ) ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " rxed more than expected (nbytes %d, max %d) " ,
2014-02-27 18:50:05 +02:00
nbytes , max_nbytes ) ;
dev_kfree_skb_any ( skb ) ;
continue ;
}
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:05 +02:00
skb_put ( skb , nbytes ) ;
2014-11-27 11:09:38 +01:00
__skb_queue_tail ( & list , skb ) ;
}
2014-09-23 10:22:54 +02:00
2014-11-27 11:09:38 +01:00
while ( ( skb = __skb_dequeue ( & list ) ) ) {
2014-09-23 10:22:54 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci rx ce pipe %d len %d \n " ,
ce_state - > id , skb - > len ) ;
ath10k_dbg_dump ( ar , ATH10K_DBG_PCI_DUMP , NULL , " pci rx: " ,
skb - > data , skb - > len ) ;
2014-11-27 11:09:36 +01:00
cb - > rx_completion ( ar , skb ) ;
2014-02-27 18:50:05 +02:00
}
2014-07-21 21:03:10 +03:00
2014-08-22 14:33:14 +02:00
ath10k_pci_rx_post_pipe ( pipe_info ) ;
2013-06-12 20:52:10 +03:00
}
2014-02-27 18:50:04 +02:00
static int ath10k_pci_hif_tx_sg ( struct ath10k * ar , u8 pipe_id ,
struct ath10k_hif_sg_item * items , int n_items )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-02-27 18:50:04 +02:00
struct ath10k_pci_pipe * pci_pipe = & ar_pci - > pipe_info [ pipe_id ] ;
struct ath10k_ce_pipe * ce_pipe = pci_pipe - > ce_hdl ;
struct ath10k_ce_ring * src_ring = ce_pipe - > src_ring ;
2014-05-26 12:02:58 +02:00
unsigned int nentries_mask ;
unsigned int sw_index ;
unsigned int write_index ;
2014-05-26 12:02:59 +02:00
int err , i = 0 ;
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:04 +02:00
spin_lock_bh ( & ar_pci - > ce_lock ) ;
2013-06-12 20:52:10 +03:00
2014-05-26 12:02:58 +02:00
nentries_mask = src_ring - > nentries_mask ;
sw_index = src_ring - > sw_index ;
write_index = src_ring - > write_index ;
2014-02-27 18:50:04 +02:00
if ( unlikely ( CE_RING_DELTA ( nentries_mask ,
write_index , sw_index - 1 ) < n_items ) ) {
err = - ENOBUFS ;
2014-05-26 12:02:59 +02:00
goto err ;
2014-02-27 18:50:04 +02:00
}
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:04 +02:00
for ( i = 0 ; i < n_items - 1 ; i + + ) {
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI ,
2014-02-27 18:50:04 +02:00
" pci tx item %d paddr 0x%08x len %d n_items %d \n " ,
i , items [ i ] . paddr , items [ i ] . len , n_items ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg_dump ( ar , ATH10K_DBG_PCI_DUMP , NULL , " pci tx data: " ,
2014-02-27 18:50:04 +02:00
items [ i ] . vaddr , items [ i ] . len ) ;
2013-06-12 20:52:10 +03:00
2014-02-27 18:50:04 +02:00
err = ath10k_ce_send_nolock ( ce_pipe ,
items [ i ] . transfer_context ,
items [ i ] . paddr ,
items [ i ] . len ,
items [ i ] . transfer_id ,
CE_SEND_FLAG_GATHER ) ;
if ( err )
2014-05-26 12:02:59 +02:00
goto err ;
2014-02-27 18:50:04 +02:00
}
/* `i` is equal to `n_items -1` after for() */
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI ,
2014-02-27 18:50:04 +02:00
" pci tx item %d paddr 0x%08x len %d n_items %d \n " ,
i , items [ i ] . paddr , items [ i ] . len , n_items ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg_dump ( ar , ATH10K_DBG_PCI_DUMP , NULL , " pci tx data: " ,
2014-02-27 18:50:04 +02:00
items [ i ] . vaddr , items [ i ] . len ) ;
err = ath10k_ce_send_nolock ( ce_pipe ,
items [ i ] . transfer_context ,
items [ i ] . paddr ,
items [ i ] . len ,
items [ i ] . transfer_id ,
0 ) ;
if ( err )
2014-05-26 12:02:59 +02:00
goto err ;
spin_unlock_bh ( & ar_pci - > ce_lock ) ;
return 0 ;
err :
for ( ; i > 0 ; i - - )
__ath10k_ce_send_revert ( ce_pipe ) ;
2014-02-27 18:50:04 +02:00
spin_unlock_bh ( & ar_pci - > ce_lock ) ;
return err ;
2013-06-12 20:52:10 +03:00
}
2014-09-24 14:16:52 +03:00
static int ath10k_pci_hif_diag_read ( struct ath10k * ar , u32 address , void * buf ,
size_t buf_len )
{
return ath10k_pci_diag_read_mem ( ar , address , buf , buf_len ) ;
}
2013-06-12 20:52:10 +03:00
static u16 ath10k_pci_hif_get_free_queue_number ( struct ath10k * ar , u8 pipe )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-03-28 09:32:52 +02:00
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci hif get free queue number \n " ) ;
2014-03-28 09:32:52 +02:00
2013-10-02 11:03:41 +02:00
return ath10k_ce_num_free_src_entries ( ar_pci - > pipe_info [ pipe ] . ce_hdl ) ;
2013-06-12 20:52:10 +03:00
}
2014-08-25 08:37:32 +03:00
static void ath10k_pci_dump_registers ( struct ath10k * ar ,
struct ath10k_fw_crash_data * crash_data )
2013-06-12 20:52:10 +03:00
{
2014-08-26 19:14:03 +03:00
__le32 reg_dump_values [ REG_DUMP_COUNT_QCA988X ] = { } ;
int i , ret ;
2013-06-12 20:52:10 +03:00
2014-08-25 08:37:32 +03:00
lockdep_assert_held ( & ar - > data_lock ) ;
2013-06-12 20:52:10 +03:00
2014-08-25 08:37:26 +03:00
ret = ath10k_pci_diag_read_hi ( ar , & reg_dump_values [ 0 ] ,
hi_failure_state ,
2014-08-26 19:14:03 +03:00
REG_DUMP_COUNT_QCA988X * sizeof ( __le32 ) ) ;
2013-11-08 08:01:34 +01:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to read firmware dump area: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ;
}
BUILD_BUG_ON ( REG_DUMP_COUNT_QCA988X % 4 ) ;
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " firmware register dump: \n " ) ;
2013-06-12 20:52:10 +03:00
for ( i = 0 ; i < REG_DUMP_COUNT_QCA988X ; i + = 4 )
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " [%02d]: 0x%08X 0x%08X 0x%08X 0x%08X \n " ,
2013-06-12 20:52:10 +03:00
i ,
2014-08-26 19:14:03 +03:00
__le32_to_cpu ( reg_dump_values [ i ] ) ,
__le32_to_cpu ( reg_dump_values [ i + 1 ] ) ,
__le32_to_cpu ( reg_dump_values [ i + 2 ] ) ,
__le32_to_cpu ( reg_dump_values [ i + 3 ] ) ) ;
2013-07-16 09:54:35 +02:00
2014-08-25 12:13:14 +02:00
if ( ! crash_data )
return ;
2014-08-25 08:37:32 +03:00
for ( i = 0 ; i < REG_DUMP_COUNT_QCA988X ; i + + )
2014-08-26 19:14:03 +03:00
crash_data - > registers [ i ] = reg_dump_values [ i ] ;
2014-08-25 08:37:32 +03:00
}
2014-08-25 08:37:37 +03:00
static void ath10k_pci_fw_crashed_dump ( struct ath10k * ar )
2014-08-25 08:37:32 +03:00
{
struct ath10k_fw_crash_data * crash_data ;
char uuid [ 50 ] ;
spin_lock_bh ( & ar - > data_lock ) ;
2014-09-29 14:41:46 +03:00
ar - > stats . fw_crash_counter + + ;
2014-08-25 08:37:32 +03:00
crash_data = ath10k_debug_get_new_fw_crash_data ( ar ) ;
if ( crash_data )
scnprintf ( uuid , sizeof ( uuid ) , " %pUl " , & crash_data - > uuid ) ;
else
scnprintf ( uuid , sizeof ( uuid ) , " n/a " ) ;
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " firmware crashed! (uuid %s) \n " , uuid ) ;
2014-08-25 08:37:45 +03:00
ath10k_print_driver_info ( ar ) ;
2014-08-25 08:37:32 +03:00
ath10k_pci_dump_registers ( ar , crash_data ) ;
spin_unlock_bh ( & ar - > data_lock ) ;
2013-07-16 09:54:35 +02:00
2013-10-16 16:46:05 +03:00
queue_work ( ar - > workqueue , & ar - > restart_work ) ;
2013-06-12 20:52:10 +03:00
}
static void ath10k_pci_hif_send_complete_check ( struct ath10k * ar , u8 pipe ,
int force )
{
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci hif send complete check \n " ) ;
2014-03-28 09:32:52 +02:00
2013-06-12 20:52:10 +03:00
if ( ! force ) {
int resources ;
/*
* Decide whether to actually poll for completions , or just
* wait for a later chance .
* If there seem to be plenty of resources left , then just wait
* since checking involves reading a CE register , which is a
* relatively expensive operation .
*/
resources = ath10k_pci_hif_get_free_queue_number ( ar , pipe ) ;
/*
* If at least 50 % of the total resources are still available ,
* don ' t bother checking again yet .
*/
if ( resources > ( host_ce_config_wlan [ pipe ] . src_nentries > > 1 ) )
return ;
}
ath10k_ce_per_engine_service ( ar , pipe ) ;
}
2013-07-05 16:15:12 +03:00
static void ath10k_pci_hif_set_callbacks ( struct ath10k * ar ,
struct ath10k_hif_cb * callbacks )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci hif set callbacks \n " ) ;
2013-06-12 20:52:10 +03:00
memcpy ( & ar_pci - > msg_callbacks_current , callbacks ,
sizeof ( ar_pci - > msg_callbacks_current ) ) ;
}
2013-11-08 08:01:25 +01:00
static void ath10k_pci_kill_tasklet ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int i ;
tasklet_kill ( & ar_pci - > intr_tq ) ;
2013-11-08 08:01:24 +01:00
tasklet_kill ( & ar_pci - > msi_fw_err ) ;
2013-06-12 20:52:10 +03:00
for ( i = 0 ; i < CE_COUNT ; i + + )
tasklet_kill ( & ar_pci - > pipe_info [ i ] . intr ) ;
2014-08-22 14:33:14 +02:00
del_timer_sync ( & ar_pci - > rx_post_retry ) ;
2013-11-08 08:01:25 +01:00
}
2013-06-12 20:52:10 +03:00
static int ath10k_pci_hif_map_service_to_pipe ( struct ath10k * ar ,
u16 service_id , u8 * ul_pipe ,
u8 * dl_pipe , int * ul_is_polled ,
int * dl_is_polled )
{
2014-08-26 19:14:03 +03:00
const struct service_to_pipe * entry ;
bool ul_set = false , dl_set = false ;
int i ;
2013-06-12 20:52:10 +03:00
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci hif map service \n " ) ;
2014-03-28 09:32:52 +02:00
2013-06-12 20:52:10 +03:00
/* polling for received messages not supported */
* dl_is_polled = 0 ;
2014-08-26 19:14:03 +03:00
for ( i = 0 ; i < ARRAY_SIZE ( target_service_to_ce_map_wlan ) ; i + + ) {
entry = & target_service_to_ce_map_wlan [ i ] ;
2013-06-12 20:52:10 +03:00
2014-08-26 19:14:03 +03:00
if ( __le32_to_cpu ( entry - > service_id ) ! = service_id )
2014-08-26 19:14:03 +03:00
continue ;
2013-06-12 20:52:10 +03:00
2014-08-26 19:14:03 +03:00
switch ( __le32_to_cpu ( entry - > pipedir ) ) {
2014-08-26 19:14:03 +03:00
case PIPEDIR_NONE :
break ;
case PIPEDIR_IN :
WARN_ON ( dl_set ) ;
2014-08-26 19:14:03 +03:00
* dl_pipe = __le32_to_cpu ( entry - > pipenum ) ;
2014-08-26 19:14:03 +03:00
dl_set = true ;
break ;
case PIPEDIR_OUT :
WARN_ON ( ul_set ) ;
2014-08-26 19:14:03 +03:00
* ul_pipe = __le32_to_cpu ( entry - > pipenum ) ;
2014-08-26 19:14:03 +03:00
ul_set = true ;
break ;
case PIPEDIR_INOUT :
WARN_ON ( dl_set ) ;
WARN_ON ( ul_set ) ;
2014-08-26 19:14:03 +03:00
* dl_pipe = __le32_to_cpu ( entry - > pipenum ) ;
* ul_pipe = __le32_to_cpu ( entry - > pipenum ) ;
2014-08-26 19:14:03 +03:00
dl_set = true ;
ul_set = true ;
break ;
}
2013-06-12 20:52:10 +03:00
}
2014-08-26 19:14:03 +03:00
if ( WARN_ON ( ! ul_set | | ! dl_set ) )
return - ENOENT ;
2013-06-12 20:52:10 +03:00
* ul_is_polled =
( host_ce_config_wlan [ * ul_pipe ] . flags & CE_ATTR_DIS_INTR ) ! = 0 ;
2014-08-26 19:14:03 +03:00
return 0 ;
2013-06-12 20:52:10 +03:00
}
static void ath10k_pci_hif_get_default_pipe ( struct ath10k * ar ,
2014-09-14 12:50:06 +03:00
u8 * ul_pipe , u8 * dl_pipe )
2013-06-12 20:52:10 +03:00
{
int ul_is_polled , dl_is_polled ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci hif get default pipe \n " ) ;
2014-03-28 09:32:52 +02:00
2013-06-12 20:52:10 +03:00
( void ) ath10k_pci_hif_map_service_to_pipe ( ar ,
ATH10K_HTC_SVC_ID_RSVD_CTRL ,
ul_pipe ,
dl_pipe ,
& ul_is_polled ,
& dl_is_polled ) ;
}
2014-10-20 14:14:38 +02:00
static void ath10k_pci_irq_msi_fw_mask ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2014-10-20 14:14:38 +02:00
u32 val ;
2015-06-18 12:31:06 +05:30
switch ( ar - > hw_rev ) {
case ATH10K_HW_QCA988X :
case ATH10K_HW_QCA6174 :
val = ath10k_pci_read32 ( ar , SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS ) ;
val & = ~ CORE_CTRL_PCIE_REG_31_MASK ;
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS , val ) ;
break ;
case ATH10K_HW_QCA99X0 :
/* TODO: Find appropriate register configuration for QCA99X0
* to mask irq / MSI .
*/
break ;
}
2014-10-20 14:14:38 +02:00
}
static void ath10k_pci_irq_msi_fw_unmask ( struct ath10k * ar )
{
u32 val ;
2015-06-18 12:31:06 +05:30
switch ( ar - > hw_rev ) {
case ATH10K_HW_QCA988X :
case ATH10K_HW_QCA6174 :
val = ath10k_pci_read32 ( ar , SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS ) ;
val | = CORE_CTRL_PCIE_REG_31_MASK ;
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS , val ) ;
break ;
case ATH10K_HW_QCA99X0 :
/* TODO: Find appropriate register configuration for QCA99X0
* to unmask irq / MSI .
*/
break ;
}
2014-10-20 14:14:38 +02:00
}
2013-06-12 20:52:10 +03:00
2014-10-20 14:14:38 +02:00
static void ath10k_pci_irq_disable ( struct ath10k * ar )
{
2014-08-22 14:23:33 +02:00
ath10k_ce_disable_interrupts ( ar ) ;
2014-08-28 22:14:16 +03:00
ath10k_pci_disable_and_clear_legacy_irq ( ar ) ;
2014-10-20 14:14:38 +02:00
ath10k_pci_irq_msi_fw_mask ( ar ) ;
}
static void ath10k_pci_irq_sync ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int i ;
2013-06-12 20:52:10 +03:00
2014-08-22 14:23:33 +02:00
for ( i = 0 ; i < max ( 1 , ar_pci - > num_msi_intrs ) ; i + + )
synchronize_irq ( ar_pci - > pdev - > irq + i ) ;
2013-06-12 20:52:10 +03:00
}
2014-08-22 14:23:33 +02:00
static void ath10k_pci_irq_enable ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2014-08-22 14:23:33 +02:00
ath10k_ce_enable_interrupts ( ar ) ;
2014-08-28 22:14:16 +03:00
ath10k_pci_enable_legacy_irq ( ar ) ;
2014-10-20 14:14:38 +02:00
ath10k_pci_irq_msi_fw_unmask ( ar ) ;
2013-06-12 20:52:10 +03:00
}
static int ath10k_pci_hif_start ( struct ath10k * ar )
{
2015-05-18 09:38:16 +00:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot hif start \n " ) ;
2013-06-12 20:52:10 +03:00
2014-08-22 14:23:33 +02:00
ath10k_pci_irq_enable ( ar ) ;
2014-08-22 14:33:14 +02:00
ath10k_pci_rx_post ( ar ) ;
2014-03-28 09:32:52 +02:00
2015-05-18 09:38:16 +00:00
pcie_capability_write_word ( ar_pci - > pdev , PCI_EXP_LNKCTL ,
ar_pci - > link_ctl ) ;
2013-06-12 20:52:10 +03:00
return 0 ;
}
2014-10-28 10:32:05 +01:00
static void ath10k_pci_rx_pipe_cleanup ( struct ath10k_pci_pipe * pci_pipe )
2013-06-12 20:52:10 +03:00
{
struct ath10k * ar ;
2014-10-28 10:32:05 +01:00
struct ath10k_ce_pipe * ce_pipe ;
struct ath10k_ce_ring * ce_ring ;
struct sk_buff * skb ;
int i ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:32:05 +01:00
ar = pci_pipe - > hif_ce_state ;
ce_pipe = pci_pipe - > ce_hdl ;
ce_ring = ce_pipe - > dest_ring ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:32:05 +01:00
if ( ! ce_ring )
2013-06-12 20:52:10 +03:00
return ;
2014-10-28 10:32:05 +01:00
if ( ! pci_pipe - > buf_sz )
return ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:32:05 +01:00
for ( i = 0 ; i < ce_ring - > nentries ; i + + ) {
skb = ce_ring - > per_transfer_context [ i ] ;
if ( ! skb )
continue ;
ce_ring - > per_transfer_context [ i ] = NULL ;
2015-01-24 12:14:47 +02:00
dma_unmap_single ( ar - > dev , ATH10K_SKB_RXCB ( skb ) - > paddr ,
2014-10-28 10:32:05 +01:00
skb - > len + skb_tailroom ( skb ) ,
2013-06-12 20:52:10 +03:00
DMA_FROM_DEVICE ) ;
2014-10-28 10:32:05 +01:00
dev_kfree_skb_any ( skb ) ;
2013-06-12 20:52:10 +03:00
}
}
2014-10-28 10:32:05 +01:00
static void ath10k_pci_tx_pipe_cleanup ( struct ath10k_pci_pipe * pci_pipe )
2013-06-12 20:52:10 +03:00
{
struct ath10k * ar ;
struct ath10k_pci * ar_pci ;
2014-10-28 10:32:05 +01:00
struct ath10k_ce_pipe * ce_pipe ;
struct ath10k_ce_ring * ce_ring ;
struct ce_desc * ce_desc ;
struct sk_buff * skb ;
int i ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:32:05 +01:00
ar = pci_pipe - > hif_ce_state ;
ar_pci = ath10k_pci_priv ( ar ) ;
ce_pipe = pci_pipe - > ce_hdl ;
ce_ring = ce_pipe - > src_ring ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:32:05 +01:00
if ( ! ce_ring )
2013-06-12 20:52:10 +03:00
return ;
2014-10-28 10:32:05 +01:00
if ( ! pci_pipe - > buf_sz )
return ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:32:05 +01:00
ce_desc = ce_ring - > shadow_base ;
if ( WARN_ON ( ! ce_desc ) )
return ;
for ( i = 0 ; i < ce_ring - > nentries ; i + + ) {
skb = ce_ring - > per_transfer_context [ i ] ;
if ( ! skb )
2013-11-08 08:01:32 +01:00
continue ;
2014-10-28 10:32:05 +01:00
ce_ring - > per_transfer_context [ i ] = NULL ;
2014-11-27 11:09:37 +01:00
ar_pci - > msg_callbacks_current . tx_completion ( ar , skb ) ;
2013-06-12 20:52:10 +03:00
}
}
/*
* Cleanup residual buffers for device shutdown :
* buffers that were enqueued for receive
* buffers that were to be sent
* Note : Buffers that had completed but which were
* not yet processed are on a completion queue . They
* are handled when the completion thread shuts down .
*/
static void ath10k_pci_buffer_cleanup ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int pipe_num ;
2013-11-08 08:01:23 +01:00
for ( pipe_num = 0 ; pipe_num < CE_COUNT ; pipe_num + + ) {
2013-08-27 13:08:01 +02:00
struct ath10k_pci_pipe * pipe_info ;
2013-06-12 20:52:10 +03:00
pipe_info = & ar_pci - > pipe_info [ pipe_num ] ;
ath10k_pci_rx_pipe_cleanup ( pipe_info ) ;
ath10k_pci_tx_pipe_cleanup ( pipe_info ) ;
}
}
static void ath10k_pci_ce_deinit ( struct ath10k * ar )
{
2014-03-28 10:02:38 +02:00
int i ;
2013-06-12 20:52:10 +03:00
2014-03-28 10:02:38 +02:00
for ( i = 0 ; i < CE_COUNT ; i + + )
ath10k_ce_deinit_pipe ( ar , i ) ;
2013-06-12 20:52:10 +03:00
}
2014-08-22 14:33:14 +02:00
static void ath10k_pci_flush ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2013-11-25 14:06:24 +01:00
ath10k_pci_kill_tasklet ( ar ) ;
2014-08-22 14:33:14 +02:00
ath10k_pci_buffer_cleanup ( ar ) ;
}
2013-06-12 20:52:10 +03:00
static void ath10k_pci_hif_stop ( struct ath10k * ar )
{
2015-05-18 09:38:18 +00:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
unsigned long flags ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot hif stop \n " ) ;
2013-06-12 20:52:10 +03:00
2014-08-22 14:33:15 +02:00
/* Most likely the device has HTT Rx ring configured. The only way to
* prevent the device from accessing ( and possible corrupting ) host
* memory is to reset the chip now .
2014-08-28 22:14:16 +03:00
*
* There ' s also no known way of masking MSI interrupts on the device .
* For ranged MSI the CE - related interrupts can be masked . However
* regardless how many MSI interrupts are assigned the first one
* is always used for firmware indications ( crashes ) and cannot be
* masked . To prevent the device from asserting the interrupt reset it
* before proceeding with cleanup .
2014-08-22 14:33:15 +02:00
*/
2015-06-18 12:31:06 +05:30
ath10k_pci_safe_chip_reset ( ar ) ;
2014-08-28 22:14:16 +03:00
ath10k_pci_irq_disable ( ar ) ;
2014-10-20 14:14:38 +02:00
ath10k_pci_irq_sync ( ar ) ;
2014-08-28 22:14:16 +03:00
ath10k_pci_flush ( ar ) ;
2015-05-18 09:38:18 +00:00
spin_lock_irqsave ( & ar_pci - > ps_lock , flags ) ;
WARN_ON ( ar_pci - > ps_wake_refcount > 0 ) ;
spin_unlock_irqrestore ( & ar_pci - > ps_lock , flags ) ;
2013-06-12 20:52:10 +03:00
}
static int ath10k_pci_hif_exchange_bmi_msg ( struct ath10k * ar ,
void * req , u32 req_len ,
void * resp , u32 * resp_len )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-08-27 13:08:02 +02:00
struct ath10k_pci_pipe * pci_tx = & ar_pci - > pipe_info [ BMI_CE_NUM_TO_TARG ] ;
struct ath10k_pci_pipe * pci_rx = & ar_pci - > pipe_info [ BMI_CE_NUM_TO_HOST ] ;
struct ath10k_ce_pipe * ce_tx = pci_tx - > ce_hdl ;
struct ath10k_ce_pipe * ce_rx = pci_rx - > ce_hdl ;
2013-06-12 20:52:10 +03:00
dma_addr_t req_paddr = 0 ;
dma_addr_t resp_paddr = 0 ;
struct bmi_xfer xfer = { } ;
void * treq , * tresp = NULL ;
int ret = 0 ;
2013-11-25 14:06:22 +01:00
might_sleep ( ) ;
2013-06-12 20:52:10 +03:00
if ( resp & & ! resp_len )
return - EINVAL ;
if ( resp & & resp_len & & * resp_len = = 0 )
return - EINVAL ;
treq = kmemdup ( req , req_len , GFP_KERNEL ) ;
if ( ! treq )
return - ENOMEM ;
req_paddr = dma_map_single ( ar - > dev , treq , req_len , DMA_TO_DEVICE ) ;
ret = dma_mapping_error ( ar - > dev , req_paddr ) ;
if ( ret )
goto err_dma ;
if ( resp & & resp_len ) {
tresp = kzalloc ( * resp_len , GFP_KERNEL ) ;
if ( ! tresp ) {
ret = - ENOMEM ;
goto err_req ;
}
resp_paddr = dma_map_single ( ar - > dev , tresp , * resp_len ,
DMA_FROM_DEVICE ) ;
ret = dma_mapping_error ( ar - > dev , resp_paddr ) ;
if ( ret )
goto err_req ;
xfer . wait_for_resp = true ;
xfer . resp_len = 0 ;
2014-08-22 14:33:14 +02:00
ath10k_ce_rx_post_buf ( ce_rx , & xfer , resp_paddr ) ;
2013-06-12 20:52:10 +03:00
}
ret = ath10k_ce_send ( ce_tx , & xfer , req_paddr , req_len , - 1 , 0 ) ;
if ( ret )
goto err_resp ;
2013-11-25 14:06:22 +01:00
ret = ath10k_pci_bmi_wait ( ce_tx , ce_rx , & xfer ) ;
if ( ret ) {
2013-06-12 20:52:10 +03:00
u32 unused_buffer ;
unsigned int unused_nbytes ;
unsigned int unused_id ;
ath10k_ce_cancel_send_next ( ce_tx , NULL , & unused_buffer ,
& unused_nbytes , & unused_id ) ;
} else {
/* non-zero means we did not time out */
ret = 0 ;
}
err_resp :
if ( resp ) {
u32 unused_buffer ;
ath10k_ce_revoke_recv_next ( ce_rx , NULL , & unused_buffer ) ;
dma_unmap_single ( ar - > dev , resp_paddr ,
* resp_len , DMA_FROM_DEVICE ) ;
}
err_req :
dma_unmap_single ( ar - > dev , req_paddr , req_len , DMA_TO_DEVICE ) ;
if ( ret = = 0 & & resp_len ) {
* resp_len = min ( * resp_len , xfer . resp_len ) ;
memcpy ( resp , tresp , xfer . resp_len ) ;
}
err_dma :
kfree ( treq ) ;
kfree ( tresp ) ;
return ret ;
}
2013-09-03 15:09:58 +02:00
static void ath10k_pci_bmi_send_done ( struct ath10k_ce_pipe * ce_state )
2013-06-12 20:52:10 +03:00
{
2013-09-03 15:09:58 +02:00
struct bmi_xfer * xfer ;
u32 ce_data ;
unsigned int nbytes ;
unsigned int transfer_id ;
if ( ath10k_ce_completed_send_next ( ce_state , ( void * * ) & xfer , & ce_data ,
& nbytes , & transfer_id ) )
return ;
2013-06-12 20:52:10 +03:00
2014-07-14 16:25:25 +03:00
xfer - > tx_done = true ;
2013-06-12 20:52:10 +03:00
}
2013-09-03 15:09:58 +02:00
static void ath10k_pci_bmi_recv_data ( struct ath10k_ce_pipe * ce_state )
2013-06-12 20:52:10 +03:00
{
2014-08-25 12:09:38 +02:00
struct ath10k * ar = ce_state - > ar ;
2013-09-03 15:09:58 +02:00
struct bmi_xfer * xfer ;
u32 ce_data ;
unsigned int nbytes ;
unsigned int transfer_id ;
unsigned int flags ;
if ( ath10k_ce_completed_recv_next ( ce_state , ( void * * ) & xfer , & ce_data ,
& nbytes , & transfer_id , & flags ) )
return ;
2013-06-12 20:52:10 +03:00
2014-10-28 10:34:36 +01:00
if ( WARN_ON_ONCE ( ! xfer ) )
return ;
2013-06-12 20:52:10 +03:00
if ( ! xfer - > wait_for_resp ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " unexpected: BMI data received; ignoring \n " ) ;
2013-06-12 20:52:10 +03:00
return ;
}
xfer - > resp_len = nbytes ;
2014-07-14 16:25:25 +03:00
xfer - > rx_done = true ;
2013-06-12 20:52:10 +03:00
}
2013-11-25 14:06:22 +01:00
static int ath10k_pci_bmi_wait ( struct ath10k_ce_pipe * tx_pipe ,
struct ath10k_ce_pipe * rx_pipe ,
struct bmi_xfer * xfer )
{
unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ ;
while ( time_before_eq ( jiffies , timeout ) ) {
ath10k_pci_bmi_send_done ( tx_pipe ) ;
ath10k_pci_bmi_recv_data ( rx_pipe ) ;
2014-07-14 16:25:25 +03:00
if ( xfer - > tx_done & & ( xfer - > rx_done = = xfer - > wait_for_resp ) )
2013-11-25 14:06:22 +01:00
return 0 ;
schedule ( ) ;
}
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:22 +01:00
return - ETIMEDOUT ;
}
2013-06-12 20:52:10 +03:00
/*
* Send an interrupt to the device to wake up the Target CPU
* so it has an opportunity to notice any changed state .
*/
static int ath10k_pci_wake_target_cpu ( struct ath10k * ar )
{
2014-09-02 11:00:21 +03:00
u32 addr , val ;
2013-06-12 20:52:10 +03:00
2014-09-02 11:00:21 +03:00
addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS ;
val = ath10k_pci_read32 ( ar , addr ) ;
val | = CORE_CTRL_CPU_INTR_MASK ;
ath10k_pci_write32 ( ar , addr , val ) ;
2013-06-12 20:52:10 +03:00
2013-11-08 08:01:34 +01:00
return 0 ;
2013-06-12 20:52:10 +03:00
}
2015-01-24 12:14:49 +02:00
static int ath10k_pci_get_num_banks ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
switch ( ar_pci - > pdev - > device ) {
case QCA988X_2_0_DEVICE_ID :
2015-06-18 12:31:03 +05:30
case QCA99X0_2_0_DEVICE_ID :
2015-01-24 12:14:49 +02:00
return 1 ;
case QCA6174_2_1_DEVICE_ID :
switch ( MS ( ar - > chip_id , SOC_CHIP_ID_REV ) ) {
case QCA6174_HW_1_0_CHIP_ID_REV :
case QCA6174_HW_1_1_CHIP_ID_REV :
2015-04-20 09:20:41 +00:00
case QCA6174_HW_2_1_CHIP_ID_REV :
case QCA6174_HW_2_2_CHIP_ID_REV :
2015-01-24 12:14:49 +02:00
return 3 ;
case QCA6174_HW_1_3_CHIP_ID_REV :
return 2 ;
case QCA6174_HW_3_0_CHIP_ID_REV :
case QCA6174_HW_3_1_CHIP_ID_REV :
case QCA6174_HW_3_2_CHIP_ID_REV :
return 9 ;
}
break ;
}
ath10k_warn ( ar , " unknown number of banks, assuming 1 \n " ) ;
return 1 ;
}
2013-06-12 20:52:10 +03:00
static int ath10k_pci_init_config ( struct ath10k * ar )
{
u32 interconnect_targ_addr ;
u32 pcie_state_targ_addr = 0 ;
u32 pipe_cfg_targ_addr = 0 ;
u32 svc_to_pipe_map = 0 ;
u32 pcie_config_flags = 0 ;
u32 ealloc_value ;
u32 ealloc_targ_addr ;
u32 flag2_value ;
u32 flag2_targ_addr ;
int ret = 0 ;
/* Download to Target the CE Config and the service-to-CE map */
interconnect_targ_addr =
host_interest_item_address ( HI_ITEM ( hi_interconnect_state ) ) ;
/* Supply Target-side CE configuration */
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_read32 ( ar , interconnect_targ_addr ,
& pcie_state_targ_addr ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to get pcie state addr: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
if ( pcie_state_targ_addr = = 0 ) {
ret = - EIO ;
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Invalid pcie state addr \n " ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_read32 ( ar , ( pcie_state_targ_addr +
2013-06-12 20:52:10 +03:00
offsetof ( struct pcie_state ,
2014-09-02 11:00:21 +03:00
pipe_cfg_addr ) ) ,
& pipe_cfg_targ_addr ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to get pipe cfg addr: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
if ( pipe_cfg_targ_addr = = 0 ) {
ret = - EIO ;
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Invalid pipe cfg addr \n " ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
ret = ath10k_pci_diag_write_mem ( ar , pipe_cfg_targ_addr ,
2014-09-14 12:50:06 +03:00
target_ce_config_wlan ,
2015-06-18 12:31:04 +05:30
sizeof ( struct ce_pipe_config ) *
NUM_TARGET_CE_CONFIG_WLAN ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to write pipe cfg: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_read32 ( ar , ( pcie_state_targ_addr +
2013-06-12 20:52:10 +03:00
offsetof ( struct pcie_state ,
2014-09-02 11:00:21 +03:00
svc_to_pipe_map ) ) ,
& svc_to_pipe_map ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to get svc/pipe map: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
if ( svc_to_pipe_map = = 0 ) {
ret = - EIO ;
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Invalid svc_to_pipe map \n " ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
ret = ath10k_pci_diag_write_mem ( ar , svc_to_pipe_map ,
2014-09-14 12:50:06 +03:00
target_service_to_ce_map_wlan ,
sizeof ( target_service_to_ce_map_wlan ) ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to write svc/pipe map: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_read32 ( ar , ( pcie_state_targ_addr +
2013-06-12 20:52:10 +03:00
offsetof ( struct pcie_state ,
2014-09-02 11:00:21 +03:00
config_flags ) ) ,
& pcie_config_flags ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to get pcie config_flags: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
pcie_config_flags & = ~ PCIE_CONFIG_FLAG_ENABLE_L1 ;
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_write32 ( ar , ( pcie_state_targ_addr +
offsetof ( struct pcie_state ,
config_flags ) ) ,
pcie_config_flags ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to write pcie config_flags: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
/* configure early allocation */
ealloc_targ_addr = host_interest_item_address ( HI_ITEM ( hi_early_alloc ) ) ;
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_read32 ( ar , ealloc_targ_addr , & ealloc_value ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Faile to get early alloc val: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
/* first bank is switched to IRAM */
ealloc_value | = ( ( HI_EARLY_ALLOC_MAGIC < < HI_EARLY_ALLOC_MAGIC_SHIFT ) &
HI_EARLY_ALLOC_MAGIC_MASK ) ;
2015-01-24 12:14:49 +02:00
ealloc_value | = ( ( ath10k_pci_get_num_banks ( ar ) < <
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT ) &
2013-06-12 20:52:10 +03:00
HI_EARLY_ALLOC_IRAM_BANKS_MASK ) ;
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_write32 ( ar , ealloc_targ_addr , ealloc_value ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to set early alloc val: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
/* Tell Target to proceed with initialization */
flag2_targ_addr = host_interest_item_address ( HI_ITEM ( hi_option_flag2 ) ) ;
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_read32 ( ar , flag2_targ_addr , & flag2_value ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to get option val: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
flag2_value | = HI_OPTION_EARLY_CFG_DONE ;
2014-09-02 11:00:21 +03:00
ret = ath10k_pci_diag_write32 ( ar , flag2_targ_addr , flag2_value ) ;
2013-06-12 20:52:10 +03:00
if ( ret ! = 0 ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " Failed to set option val: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
return 0 ;
}
2014-10-20 14:14:39 +02:00
static int ath10k_pci_alloc_pipes ( struct ath10k * ar )
2014-03-28 10:02:38 +02:00
{
2014-10-20 14:14:39 +02:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
struct ath10k_pci_pipe * pipe ;
2014-03-28 10:02:38 +02:00
int i , ret ;
for ( i = 0 ; i < CE_COUNT ; i + + ) {
2014-10-20 14:14:39 +02:00
pipe = & ar_pci - > pipe_info [ i ] ;
pipe - > ce_hdl = & ar_pci - > ce_states [ i ] ;
pipe - > pipe_num = i ;
pipe - > hif_ce_state = ar ;
ret = ath10k_ce_alloc_pipe ( ar , i , & host_ce_config_wlan [ i ] ,
ath10k_pci_ce_send_done ,
ath10k_pci_ce_recv_data ) ;
2014-03-28 10:02:38 +02:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to allocate copy engine pipe %d: %d \n " ,
2014-03-28 10:02:38 +02:00
i , ret ) ;
return ret ;
}
2014-10-20 14:14:39 +02:00
/* Last CE is Diagnostic Window */
2015-06-18 12:31:04 +05:30
if ( i = = CE_DIAG_PIPE ) {
2014-10-20 14:14:39 +02:00
ar_pci - > ce_diag = pipe - > ce_hdl ;
continue ;
}
pipe - > buf_sz = ( size_t ) ( host_ce_config_wlan [ i ] . src_sz_max ) ;
2014-03-28 10:02:38 +02:00
}
return 0 ;
}
2014-10-20 14:14:39 +02:00
static void ath10k_pci_free_pipes ( struct ath10k * ar )
2014-03-28 10:02:38 +02:00
{
int i ;
2013-06-12 20:52:10 +03:00
2014-03-28 10:02:38 +02:00
for ( i = 0 ; i < CE_COUNT ; i + + )
ath10k_ce_free_pipe ( ar , i ) ;
}
2013-06-12 20:52:10 +03:00
2014-10-20 14:14:39 +02:00
static int ath10k_pci_init_pipes ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2014-10-20 14:14:39 +02:00
int i , ret ;
2013-06-12 20:52:10 +03:00
2014-10-20 14:14:39 +02:00
for ( i = 0 ; i < CE_COUNT ; i + + ) {
ret = ath10k_ce_init_pipe ( ar , i , & host_ce_config_wlan [ i ] ) ;
2014-03-28 10:02:38 +02:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to initialize copy engine pipe %d: %d \n " ,
2014-10-20 14:14:39 +02:00
i , ret ) ;
2014-03-28 10:02:38 +02:00
return ret ;
2013-06-12 20:52:10 +03:00
}
}
return 0 ;
}
2014-08-22 14:23:34 +02:00
static bool ath10k_pci_has_fw_crashed ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2014-08-22 14:23:34 +02:00
return ath10k_pci_read32 ( ar , FW_INDICATOR_ADDRESS ) &
FW_IND_EVENT_PENDING ;
}
2013-06-12 20:52:10 +03:00
2014-08-22 14:23:34 +02:00
static void ath10k_pci_fw_crashed_clear ( struct ath10k * ar )
{
u32 val ;
2013-06-12 20:52:10 +03:00
2014-08-22 14:23:34 +02:00
val = ath10k_pci_read32 ( ar , FW_INDICATOR_ADDRESS ) ;
val & = ~ FW_IND_EVENT_PENDING ;
ath10k_pci_write32 ( ar , FW_INDICATOR_ADDRESS , val ) ;
2013-06-12 20:52:10 +03:00
}
2014-05-14 16:56:16 +03:00
/* this function effectively clears target memory controller assert line */
static void ath10k_pci_warm_reset_si0 ( struct ath10k * ar )
{
u32 val ;
val = ath10k_pci_soc_read32 ( ar , SOC_RESET_CONTROL_ADDRESS ) ;
ath10k_pci_soc_write32 ( ar , SOC_RESET_CONTROL_ADDRESS ,
val | SOC_RESET_CONTROL_SI0_RST_MASK ) ;
val = ath10k_pci_soc_read32 ( ar , SOC_RESET_CONTROL_ADDRESS ) ;
msleep ( 10 ) ;
val = ath10k_pci_soc_read32 ( ar , SOC_RESET_CONTROL_ADDRESS ) ;
ath10k_pci_soc_write32 ( ar , SOC_RESET_CONTROL_ADDRESS ,
val & ~ SOC_RESET_CONTROL_SI0_RST_MASK ) ;
val = ath10k_pci_soc_read32 ( ar , SOC_RESET_CONTROL_ADDRESS ) ;
msleep ( 10 ) ;
}
2014-10-28 10:32:06 +01:00
static void ath10k_pci_warm_reset_cpu ( struct ath10k * ar )
2014-02-10 17:14:22 +01:00
{
u32 val ;
2014-03-28 09:32:46 +02:00
ath10k_pci_write32 ( ar , FW_INDICATOR_ADDRESS , 0 ) ;
2014-02-10 17:14:22 +01:00
val = ath10k_pci_read32 ( ar , RTC_SOC_BASE_ADDRESS +
2014-10-28 10:32:06 +01:00
SOC_RESET_CONTROL_ADDRESS ) ;
ath10k_pci_write32 ( ar , RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS ,
val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK ) ;
}
static void ath10k_pci_warm_reset_ce ( struct ath10k * ar )
{
u32 val ;
2014-02-10 17:14:22 +01:00
val = ath10k_pci_read32 ( ar , RTC_SOC_BASE_ADDRESS +
SOC_RESET_CONTROL_ADDRESS ) ;
2014-10-28 10:32:06 +01:00
2014-02-10 17:14:22 +01:00
ath10k_pci_write32 ( ar , RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS ,
val | SOC_RESET_CONTROL_CE_RST_MASK ) ;
msleep ( 10 ) ;
ath10k_pci_write32 ( ar , RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS ,
val & ~ SOC_RESET_CONTROL_CE_RST_MASK ) ;
2014-10-28 10:32:06 +01:00
}
static void ath10k_pci_warm_reset_clear_lf ( struct ath10k * ar )
{
u32 val ;
2014-02-10 17:14:22 +01:00
val = ath10k_pci_read32 ( ar , RTC_SOC_BASE_ADDRESS +
2014-10-28 10:32:06 +01:00
SOC_LF_TIMER_CONTROL0_ADDRESS ) ;
ath10k_pci_write32 ( ar , RTC_SOC_BASE_ADDRESS +
SOC_LF_TIMER_CONTROL0_ADDRESS ,
val & ~ SOC_LF_TIMER_CONTROL0_ENABLE_MASK ) ;
}
2014-02-10 17:14:22 +01:00
2014-10-28 10:32:06 +01:00
static int ath10k_pci_warm_reset ( struct ath10k * ar )
{
int ret ;
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot warm reset \n " ) ;
2014-05-14 16:56:16 +03:00
2014-10-28 10:32:06 +01:00
spin_lock_bh ( & ar - > data_lock ) ;
ar - > stats . fw_warm_reset_counter + + ;
spin_unlock_bh ( & ar - > data_lock ) ;
2014-02-10 17:14:22 +01:00
2014-10-28 10:32:06 +01:00
ath10k_pci_irq_disable ( ar ) ;
2014-02-10 17:14:22 +01:00
2014-10-28 10:32:06 +01:00
/* Make sure the target CPU is not doing anything dangerous, e.g. if it
* were to access copy engine while host performs copy engine reset
* then it is possible for the device to confuse pci - e controller to
* the point of bringing host system to a complete stop ( i . e . hang ) .
*/
ath10k_pci_warm_reset_si0 ( ar ) ;
ath10k_pci_warm_reset_cpu ( ar ) ;
ath10k_pci_init_pipes ( ar ) ;
ath10k_pci_wait_for_target_init ( ar ) ;
2014-02-10 17:14:22 +01:00
2014-10-28 10:32:06 +01:00
ath10k_pci_warm_reset_clear_lf ( ar ) ;
ath10k_pci_warm_reset_ce ( ar ) ;
ath10k_pci_warm_reset_cpu ( ar ) ;
ath10k_pci_init_pipes ( ar ) ;
2014-02-10 17:14:22 +01:00
2014-10-28 10:32:06 +01:00
ret = ath10k_pci_wait_for_target_init ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to wait for target init: %d \n " , ret ) ;
return ret ;
}
2014-02-10 17:14:22 +01:00
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot warm reset complete \n " ) ;
2014-02-10 17:14:22 +01:00
2014-08-07 11:03:28 +02:00
return 0 ;
2014-02-10 17:14:22 +01:00
}
2015-06-18 12:31:06 +05:30
static int ath10k_pci_safe_chip_reset ( struct ath10k * ar )
{
if ( QCA_REV_988X ( ar ) | | QCA_REV_6174 ( ar ) ) {
return ath10k_pci_warm_reset ( ar ) ;
} else if ( QCA_REV_99X0 ( ar ) ) {
ath10k_pci_irq_disable ( ar ) ;
return ath10k_pci_qca99x0_chip_reset ( ar ) ;
} else {
return - ENOTSUPP ;
}
}
2015-01-24 12:14:49 +02:00
static int ath10k_pci_qca988x_chip_reset ( struct ath10k * ar )
2014-10-28 10:32:07 +01:00
{
int i , ret ;
u32 val ;
2015-01-24 12:14:49 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot 988x chip reset \n " ) ;
2014-10-28 10:32:07 +01:00
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
* It is thus preferred to use warm reset which is safer but may not be
* able to recover the device from all possible fail scenarios .
*
* Warm reset doesn ' t always work on first try so attempt it a few
* times before giving up .
*/
for ( i = 0 ; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS ; i + + ) {
ret = ath10k_pci_warm_reset ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to warm reset attempt %d of %d: %d \n " ,
i + 1 , ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS ,
ret ) ;
continue ;
}
/* FIXME: Sometimes copy engine doesn't recover after warm
* reset . In most cases this needs cold reset . In some of these
* cases the device is in such a state that a cold reset may
* lock up the host .
*
* Reading any host interest register via copy engine is
* sufficient to verify if device is capable of booting
* firmware blob .
*/
ret = ath10k_pci_init_pipes ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to init copy engine: %d \n " ,
ret ) ;
continue ;
}
ret = ath10k_pci_diag_read32 ( ar , QCA988X_HOST_INTEREST_ADDRESS ,
& val ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to poke copy engine: %d \n " ,
ret ) ;
continue ;
}
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot chip reset complete (warm) \n " ) ;
return 0 ;
}
if ( ath10k_pci_reset_mode = = ATH10K_PCI_RESET_WARM_ONLY ) {
ath10k_warn ( ar , " refusing cold reset as requested \n " ) ;
return - EPERM ;
}
ret = ath10k_pci_cold_reset ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to cold reset: %d \n " , ret ) ;
return ret ;
}
ret = ath10k_pci_wait_for_target_init ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to wait for target after cold reset: %d \n " ,
ret ) ;
return ret ;
}
2015-01-24 12:14:49 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot qca988x chip reset complete (cold) \n " ) ;
return 0 ;
}
static int ath10k_pci_qca6174_chip_reset ( struct ath10k * ar )
{
int ret ;
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot qca6174 chip reset \n " ) ;
/* FIXME: QCA6174 requires cold + warm reset to work. */
ret = ath10k_pci_cold_reset ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to cold reset: %d \n " , ret ) ;
return ret ;
}
ret = ath10k_pci_wait_for_target_init ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to wait for target after cold reset: %d \n " ,
ret ) ;
return ret ;
}
ret = ath10k_pci_warm_reset ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to warm reset: %d \n " , ret ) ;
return ret ;
}
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot qca6174 chip reset complete (cold) \n " ) ;
2014-10-28 10:32:07 +01:00
return 0 ;
}
2015-06-18 12:31:06 +05:30
static int ath10k_pci_qca99x0_chip_reset ( struct ath10k * ar )
{
int ret ;
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot qca99x0 chip reset \n " ) ;
ret = ath10k_pci_cold_reset ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to cold reset: %d \n " , ret ) ;
return ret ;
}
ret = ath10k_pci_wait_for_target_init ( ar ) ;
if ( ret ) {
ath10k_warn ( ar , " failed to wait for target after cold reset: %d \n " ,
ret ) ;
return ret ;
}
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot qca99x0 chip reset complete (cold) \n " ) ;
return 0 ;
}
2015-01-24 12:14:49 +02:00
static int ath10k_pci_chip_reset ( struct ath10k * ar )
{
if ( QCA_REV_988X ( ar ) )
return ath10k_pci_qca988x_chip_reset ( ar ) ;
else if ( QCA_REV_6174 ( ar ) )
return ath10k_pci_qca6174_chip_reset ( ar ) ;
2015-06-18 12:31:06 +05:30
else if ( QCA_REV_99X0 ( ar ) )
return ath10k_pci_qca99x0_chip_reset ( ar ) ;
2015-01-24 12:14:49 +02:00
else
return - ENOTSUPP ;
}
2014-10-28 10:32:07 +01:00
static int ath10k_pci_hif_power_up ( struct ath10k * ar )
2013-07-16 09:38:50 +02:00
{
2015-05-18 09:38:16 +00:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-07-16 09:38:50 +02:00
int ret ;
2014-10-28 10:32:07 +01:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot hif power up \n " ) ;
2015-05-18 09:38:16 +00:00
pcie_capability_read_word ( ar_pci - > pdev , PCI_EXP_LNKCTL ,
& ar_pci - > link_ctl ) ;
pcie_capability_write_word ( ar_pci - > pdev , PCI_EXP_LNKCTL ,
ar_pci - > link_ctl & ~ PCI_EXP_LNKCTL_ASPMC ) ;
2013-07-16 09:38:50 +02:00
/*
* Bring the target up cleanly .
*
* The target may be in an undefined state with an AUX - powered Target
* and a Host in WoW mode . If the Host crashes , loses power , or is
* restarted ( without unloading the driver ) then the Target is left
* ( aux ) powered and running . On a subsequent driver load , the Target
* is in an unexpected state . We try to catch that here in order to
* reset the Target and retry the probe .
*/
2014-10-28 10:32:07 +01:00
ret = ath10k_pci_chip_reset ( ar ) ;
2013-11-08 08:01:30 +01:00
if ( ret ) {
2015-01-12 15:29:37 +01:00
if ( ath10k_pci_has_fw_crashed ( ar ) ) {
ath10k_warn ( ar , " firmware crashed during chip reset \n " ) ;
ath10k_pci_fw_crashed_clear ( ar ) ;
ath10k_pci_fw_crashed_dump ( ar ) ;
}
2014-10-28 10:32:07 +01:00
ath10k_err ( ar , " failed to reset chip: %d \n " , ret ) ;
2014-10-31 09:03:43 +01:00
goto err_sleep ;
2013-11-08 08:01:30 +01:00
}
2013-07-16 09:38:50 +02:00
2014-10-20 14:14:39 +02:00
ret = ath10k_pci_init_pipes ( ar ) ;
2013-11-08 08:01:34 +01:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to initialize CE: %d \n " , ret ) ;
2014-10-31 09:03:43 +01:00
goto err_sleep ;
2013-11-25 14:06:26 +01:00
}
2013-11-08 08:01:33 +01:00
ret = ath10k_pci_init_config ( ar ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to setup init config: %d \n " , ret ) ;
2014-08-22 14:23:34 +02:00
goto err_ce ;
2013-11-08 08:01:33 +01:00
}
2013-07-16 09:38:50 +02:00
ret = ath10k_pci_wake_target_cpu ( ar ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " could not wake up target CPU: %d \n " , ret ) ;
2014-08-22 14:23:34 +02:00
goto err_ce ;
2013-07-16 09:38:50 +02:00
}
return 0 ;
err_ce :
ath10k_pci_ce_deinit ( ar ) ;
2014-05-14 16:56:16 +03:00
2014-10-31 09:03:43 +01:00
err_sleep :
2014-05-14 16:56:16 +03:00
return ret ;
}
2013-07-16 09:38:50 +02:00
static void ath10k_pci_hif_power_down ( struct ath10k * ar )
{
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot hif power down \n " ) ;
2014-03-28 09:32:52 +02:00
2014-10-28 10:32:08 +01:00
/* Currently hif_power_up performs effectively a reset and hif_stop
* resets the chip as well so there ' s no point in resetting here .
*/
2013-07-16 09:38:50 +02:00
}
2013-07-16 09:38:54 +02:00
# ifdef CONFIG_PM
static int ath10k_pci_hif_suspend ( struct ath10k * ar )
{
2015-05-18 09:38:18 +00:00
/* The grace timer can still be counting down and ar->ps_awake be true.
* It is known that the device may be asleep after resuming regardless
* of the SoC powersave state before suspending . Hence make sure the
* device is asleep before proceeding .
*/
ath10k_pci_sleep_sync ( ar ) ;
2015-03-02 13:22:13 +01:00
2013-07-16 09:38:54 +02:00
return 0 ;
}
static int ath10k_pci_hif_resume ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
struct pci_dev * pdev = ar_pci - > pdev ;
u32 val ;
2015-03-02 13:22:14 +01:00
/* Suspend/Resume resets the PCI configuration space, so we have to
* re - disable the RETRY_TIMEOUT register ( 0x41 ) to keep PCI Tx retries
* from interfering with C3 CPU state . pci_restore_state won ' t help
* here since it only restores the first 64 bytes pci config header .
*/
pci_read_config_dword ( pdev , 0x40 , & val ) ;
if ( ( val & 0x0000ff00 ) ! = 0 )
pci_write_config_dword ( pdev , 0x40 , val & 0xffff00ff ) ;
2013-07-16 09:38:54 +02:00
2015-05-18 09:38:18 +00:00
return 0 ;
2013-07-16 09:38:54 +02:00
}
# endif
2013-06-12 20:52:10 +03:00
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2014-02-27 18:50:04 +02:00
. tx_sg = ath10k_pci_hif_tx_sg ,
2014-09-24 14:16:52 +03:00
. diag_read = ath10k_pci_hif_diag_read ,
2014-11-25 12:24:48 +02:00
. diag_write = ath10k_pci_diag_write_mem ,
2013-06-12 20:52:10 +03:00
. exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg ,
. start = ath10k_pci_hif_start ,
. stop = ath10k_pci_hif_stop ,
. map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe ,
. get_default_pipe = ath10k_pci_hif_get_default_pipe ,
. send_complete_check = ath10k_pci_hif_send_complete_check ,
2013-07-05 16:15:12 +03:00
. set_callbacks = ath10k_pci_hif_set_callbacks ,
2013-06-12 20:52:10 +03:00
. get_free_queue_number = ath10k_pci_hif_get_free_queue_number ,
2013-07-16 09:38:50 +02:00
. power_up = ath10k_pci_hif_power_up ,
. power_down = ath10k_pci_hif_power_down ,
2014-11-25 12:24:33 +02:00
. read32 = ath10k_pci_read32 ,
. write32 = ath10k_pci_write32 ,
2013-07-16 09:38:54 +02:00
# ifdef CONFIG_PM
. suspend = ath10k_pci_hif_suspend ,
. resume = ath10k_pci_hif_resume ,
# endif
2013-06-12 20:52:10 +03:00
} ;
static void ath10k_pci_ce_tasklet ( unsigned long ptr )
{
2013-08-27 13:08:01 +02:00
struct ath10k_pci_pipe * pipe = ( struct ath10k_pci_pipe * ) ptr ;
2013-06-12 20:52:10 +03:00
struct ath10k_pci * ar_pci = pipe - > ar_pci ;
ath10k_ce_per_engine_service ( ar_pci - > ar , pipe - > pipe_num ) ;
}
static void ath10k_msi_err_tasklet ( unsigned long data )
{
struct ath10k * ar = ( struct ath10k * ) data ;
2014-08-22 14:23:34 +02:00
if ( ! ath10k_pci_has_fw_crashed ( ar ) ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " received unsolicited fw crash interrupt \n " ) ;
2014-08-22 14:23:34 +02:00
return ;
}
2015-01-24 12:14:52 +02:00
ath10k_pci_irq_disable ( ar ) ;
2014-08-22 14:23:34 +02:00
ath10k_pci_fw_crashed_clear ( ar ) ;
ath10k_pci_fw_crashed_dump ( ar ) ;
2013-06-12 20:52:10 +03:00
}
/*
* Handler for a per - engine interrupt on a PARTICULAR CE .
* This is used in cases where each CE has a private MSI interrupt .
*/
static irqreturn_t ath10k_pci_per_engine_handler ( int irq , void * arg )
{
struct ath10k * ar = arg ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ce_id = irq - ar_pci - > pdev - > irq - MSI_ASSIGN_CE_INITIAL ;
2013-06-18 10:28:46 +03:00
if ( ce_id < 0 | | ce_id > = ARRAY_SIZE ( ar_pci - > pipe_info ) ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " unexpected/invalid irq %d ce_id %d \n " , irq ,
ce_id ) ;
2013-06-12 20:52:10 +03:00
return IRQ_HANDLED ;
}
/*
* NOTE : We are able to derive ce_id from irq because we
* use a one - to - one mapping for CE ' s 0. .5 .
* CE ' s 6 & 7 do not use interrupts at all .
*
* This mapping must be kept in sync with the mapping
* used by firmware .
*/
tasklet_schedule ( & ar_pci - > pipe_info [ ce_id ] . intr ) ;
return IRQ_HANDLED ;
}
static irqreturn_t ath10k_pci_msi_fw_handler ( int irq , void * arg )
{
struct ath10k * ar = arg ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
tasklet_schedule ( & ar_pci - > msi_fw_err ) ;
return IRQ_HANDLED ;
}
/*
* Top - level interrupt handler for all PCI interrupts from a Target .
* When a block of MSI interrupts is allocated , this top - level handler
* is not used ; instead , we directly call the correct sub - handler .
*/
static irqreturn_t ath10k_pci_interrupt_handler ( int irq , void * arg )
{
struct ath10k * ar = arg ;
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
if ( ar_pci - > num_msi_intrs = = 0 ) {
2013-11-25 14:06:20 +01:00
if ( ! ath10k_pci_irq_pending ( ar ) )
return IRQ_NONE ;
2013-11-25 14:06:25 +01:00
ath10k_pci_disable_and_clear_legacy_irq ( ar ) ;
2013-06-12 20:52:10 +03:00
}
tasklet_schedule ( & ar_pci - > intr_tq ) ;
return IRQ_HANDLED ;
}
2014-08-22 14:23:34 +02:00
static void ath10k_pci_tasklet ( unsigned long data )
2013-11-25 14:06:26 +01:00
{
struct ath10k * ar = ( struct ath10k * ) data ;
2014-08-22 14:23:34 +02:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-11-25 14:06:26 +01:00
2014-08-22 14:23:34 +02:00
if ( ath10k_pci_has_fw_crashed ( ar ) ) {
2015-01-24 12:14:52 +02:00
ath10k_pci_irq_disable ( ar ) ;
2014-08-22 14:23:34 +02:00
ath10k_pci_fw_crashed_clear ( ar ) ;
2014-08-25 08:37:37 +03:00
ath10k_pci_fw_crashed_dump ( ar ) ;
2013-11-25 14:06:26 +01:00
return ;
}
2013-06-12 20:52:10 +03:00
ath10k_ce_per_engine_service_any ( ar ) ;
2013-11-25 14:06:25 +01:00
/* Re-enable legacy irq that was disabled in the irq handler */
if ( ar_pci - > num_msi_intrs = = 0 )
ath10k_pci_enable_legacy_irq ( ar ) ;
2013-06-12 20:52:10 +03:00
}
2013-11-25 14:06:21 +01:00
static int ath10k_pci_request_irq_msix ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-11-25 14:06:21 +01:00
int ret , i ;
2013-06-12 20:52:10 +03:00
ret = request_irq ( ar_pci - > pdev - > irq + MSI_ASSIGN_FW ,
ath10k_pci_msi_fw_handler ,
IRQF_SHARED , " ath10k_pci " , ar ) ;
2013-07-31 10:55:15 +02:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to request MSI-X fw irq %d: %d \n " ,
2013-07-31 10:55:15 +02:00
ar_pci - > pdev - > irq + MSI_ASSIGN_FW , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
2013-07-31 10:55:15 +02:00
}
2013-06-12 20:52:10 +03:00
for ( i = MSI_ASSIGN_CE_INITIAL ; i < = MSI_ASSIGN_CE_MAX ; i + + ) {
ret = request_irq ( ar_pci - > pdev - > irq + i ,
ath10k_pci_per_engine_handler ,
IRQF_SHARED , " ath10k_pci " , ar ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to request MSI-X ce irq %d: %d \n " ,
2013-06-12 20:52:10 +03:00
ar_pci - > pdev - > irq + i , ret ) ;
2013-06-26 08:50:50 +02:00
for ( i - - ; i > = MSI_ASSIGN_CE_INITIAL ; i - - )
free_irq ( ar_pci - > pdev - > irq + i , ar ) ;
2013-06-12 20:52:10 +03:00
2013-06-26 08:50:50 +02:00
free_irq ( ar_pci - > pdev - > irq + MSI_ASSIGN_FW , ar ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
}
return 0 ;
}
2013-11-25 14:06:21 +01:00
static int ath10k_pci_request_irq_msi ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ret ;
ret = request_irq ( ar_pci - > pdev - > irq ,
ath10k_pci_interrupt_handler ,
IRQF_SHARED , " ath10k_pci " , ar ) ;
2013-11-25 14:06:21 +01:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to request MSI irq %d: %d \n " ,
2013-11-25 14:06:21 +01:00
ar_pci - > pdev - > irq , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
return 0 ;
}
2013-11-25 14:06:21 +01:00
static int ath10k_pci_request_irq_legacy ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ret ;
ret = request_irq ( ar_pci - > pdev - > irq ,
ath10k_pci_interrupt_handler ,
IRQF_SHARED , " ath10k_pci " , ar ) ;
2013-10-17 11:36:15 +03:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to request legacy irq %d: %d \n " ,
2013-11-25 14:06:21 +01:00
ar_pci - > pdev - > irq , ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
2013-10-17 11:36:15 +03:00
}
2013-06-12 20:52:10 +03:00
return 0 ;
}
2013-11-25 14:06:21 +01:00
static int ath10k_pci_request_irq ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:21 +01:00
switch ( ar_pci - > num_msi_intrs ) {
case 0 :
return ath10k_pci_request_irq_legacy ( ar ) ;
case 1 :
return ath10k_pci_request_irq_msi ( ar ) ;
case MSI_NUM_REQUEST :
return ath10k_pci_request_irq_msix ( ar ) ;
}
2013-06-12 20:52:10 +03:00
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " unknown irq configuration upon request \n " ) ;
2013-11-25 14:06:21 +01:00
return - EINVAL ;
2013-06-12 20:52:10 +03:00
}
2013-11-25 14:06:21 +01:00
static void ath10k_pci_free_irq ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int i ;
/* There's at least one interrupt irregardless whether its legacy INTR
* or MSI or MSI - X */
for ( i = 0 ; i < max ( 1 , ar_pci - > num_msi_intrs ) ; i + + )
free_irq ( ar_pci - > pdev - > irq + i , ar ) ;
}
static void ath10k_pci_init_irq_tasklets ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int i ;
2013-11-25 14:06:21 +01:00
tasklet_init ( & ar_pci - > intr_tq , ath10k_pci_tasklet , ( unsigned long ) ar ) ;
2013-06-12 20:52:10 +03:00
tasklet_init ( & ar_pci - > msi_fw_err , ath10k_msi_err_tasklet ,
2013-11-25 14:06:21 +01:00
( unsigned long ) ar ) ;
2013-06-12 20:52:10 +03:00
for ( i = 0 ; i < CE_COUNT ; i + + ) {
ar_pci - > pipe_info [ i ] . ar_pci = ar_pci ;
2013-11-25 14:06:21 +01:00
tasklet_init ( & ar_pci - > pipe_info [ i ] . intr , ath10k_pci_ce_tasklet ,
2013-06-12 20:52:10 +03:00
( unsigned long ) & ar_pci - > pipe_info [ i ] ) ;
}
2013-11-25 14:06:21 +01:00
}
static int ath10k_pci_init_irq ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
int ret ;
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:21 +01:00
ath10k_pci_init_irq_tasklets ( ar ) ;
2013-06-12 20:52:10 +03:00
2014-08-22 14:23:31 +02:00
if ( ath10k_pci_irq_mode ! = ATH10K_PCI_IRQ_AUTO )
2014-08-25 12:09:38 +02:00
ath10k_info ( ar , " limiting irq mode to: %d \n " ,
ath10k_pci_irq_mode ) ;
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:21 +01:00
/* Try MSI-X */
2014-08-07 11:03:29 +02:00
if ( ath10k_pci_irq_mode = = ATH10K_PCI_IRQ_AUTO ) {
2013-11-25 14:06:27 +01:00
ar_pci - > num_msi_intrs = MSI_NUM_REQUEST ;
2014-02-13 17:50:02 +02:00
ret = pci_enable_msi_range ( ar_pci - > pdev , ar_pci - > num_msi_intrs ,
2014-09-14 12:50:06 +03:00
ar_pci - > num_msi_intrs ) ;
2014-02-13 17:50:02 +02:00
if ( ret > 0 )
2013-11-25 14:06:27 +01:00
return 0 ;
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:27 +01:00
/* fall-through */
2013-06-12 20:52:10 +03:00
}
2013-11-25 14:06:21 +01:00
/* Try MSI */
2013-11-25 14:06:27 +01:00
if ( ath10k_pci_irq_mode ! = ATH10K_PCI_IRQ_LEGACY ) {
ar_pci - > num_msi_intrs = 1 ;
ret = pci_enable_msi ( ar_pci - > pdev ) ;
2013-06-12 20:52:10 +03:00
if ( ret = = 0 )
2013-11-25 14:06:27 +01:00
return 0 ;
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:27 +01:00
/* fall-through */
2013-06-12 20:52:10 +03:00
}
2013-11-25 14:06:21 +01:00
/* Try legacy irq
*
* A potential race occurs here : The CORE_BASE write
* depends on target correctly decoding AXI address but
* host won ' t know when target writes BAR to CORE_CTRL .
* This write might get lost if target has NOT written BAR .
* For now , fix the race by repeating the write in below
* synchronization checking . */
ar_pci - > num_msi_intrs = 0 ;
2013-06-12 20:52:10 +03:00
2013-11-25 14:06:21 +01:00
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS ,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL ) ;
return 0 ;
2013-06-12 20:52:10 +03:00
}
2014-08-07 11:03:28 +02:00
static void ath10k_pci_deinit_irq_legacy ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2013-11-25 14:06:21 +01:00
ath10k_pci_write32 ( ar , SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS ,
0 ) ;
2013-06-12 20:52:10 +03:00
}
2013-11-25 14:06:21 +01:00
static int ath10k_pci_deinit_irq ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2013-11-25 14:06:21 +01:00
switch ( ar_pci - > num_msi_intrs ) {
case 0 :
2014-08-07 11:03:28 +02:00
ath10k_pci_deinit_irq_legacy ( ar ) ;
return 0 ;
2013-11-25 14:06:21 +01:00
case 1 :
/* fall-through */
case MSI_NUM_REQUEST :
2013-06-12 20:52:10 +03:00
pci_disable_msi ( ar_pci - > pdev ) ;
2013-11-25 14:06:21 +01:00
return 0 ;
2014-02-13 17:50:01 +02:00
default :
pci_disable_msi ( ar_pci - > pdev ) ;
2013-11-25 14:06:21 +01:00
}
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " unknown irq configuration upon deinit \n " ) ;
2013-11-25 14:06:21 +01:00
return - EINVAL ;
2013-06-12 20:52:10 +03:00
}
2013-11-08 08:01:26 +01:00
static int ath10k_pci_wait_for_target_init ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
2014-03-28 09:32:21 +02:00
unsigned long timeout ;
u32 val ;
2013-06-12 20:52:10 +03:00
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot waiting target to initialise \n " ) ;
2013-06-12 20:52:10 +03:00
2014-03-28 09:32:21 +02:00
timeout = jiffies + msecs_to_jiffies ( ATH10K_PCI_TARGET_WAIT ) ;
do {
val = ath10k_pci_read32 ( ar , FW_INDICATOR_ADDRESS ) ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot target indicator %x \n " ,
val ) ;
2014-03-28 09:32:52 +02:00
2014-03-28 09:32:21 +02:00
/* target should never return this */
if ( val = = 0xffffffff )
continue ;
2014-04-23 19:30:04 +03:00
/* the device has crashed so don't bother trying anymore */
if ( val & FW_IND_EVENT_PENDING )
break ;
2014-03-28 09:32:21 +02:00
if ( val & FW_IND_INITIALIZED )
break ;
2013-06-12 20:52:10 +03:00
if ( ar_pci - > num_msi_intrs = = 0 )
/* Fix potential race by repeating CORE_BASE writes */
2014-10-20 14:14:37 +02:00
ath10k_pci_enable_legacy_irq ( ar ) ;
2014-03-28 09:32:21 +02:00
2013-06-12 20:52:10 +03:00
mdelay ( 10 ) ;
2014-03-28 09:32:21 +02:00
} while ( time_before ( jiffies , timeout ) ) ;
2013-06-12 20:52:10 +03:00
2014-10-20 14:14:37 +02:00
ath10k_pci_disable_and_clear_legacy_irq ( ar ) ;
2014-10-20 14:14:38 +02:00
ath10k_pci_irq_msi_fw_mask ( ar ) ;
2014-10-20 14:14:37 +02:00
2014-04-23 19:30:03 +03:00
if ( val = = 0xffffffff ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to read device register, device is gone \n " ) ;
2014-08-07 11:03:28 +02:00
return - EIO ;
2014-04-23 19:30:03 +03:00
}
2014-04-23 19:30:04 +03:00
if ( val & FW_IND_EVENT_PENDING ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " device has crashed during init \n " ) ;
2014-08-07 11:03:28 +02:00
return - ECOMM ;
2014-04-23 19:30:04 +03:00
}
2014-04-23 19:30:03 +03:00
if ( ! ( val & FW_IND_INITIALIZED ) ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to receive initialized event from target: %08x \n " ,
2014-03-28 09:32:21 +02:00
val ) ;
2014-08-07 11:03:28 +02:00
return - ETIMEDOUT ;
2013-06-12 20:52:10 +03:00
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot target initialised \n " ) ;
2014-08-07 11:03:28 +02:00
return 0 ;
2013-06-12 20:52:10 +03:00
}
2014-02-10 17:14:22 +01:00
static int ath10k_pci_cold_reset ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2014-08-07 11:03:28 +02:00
int i ;
2013-06-12 20:52:10 +03:00
u32 val ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot cold reset \n " ) ;
2013-06-12 20:52:10 +03:00
2014-09-29 14:41:46 +03:00
spin_lock_bh ( & ar - > data_lock ) ;
ar - > stats . fw_cold_reset_counter + + ;
spin_unlock_bh ( & ar - > data_lock ) ;
2013-06-12 20:52:10 +03:00
/* Put Target, including PCIe, into RESET. */
2013-09-01 10:01:53 +03:00
val = ath10k_pci_reg_read32 ( ar , SOC_GLOBAL_RESET_ADDRESS ) ;
2013-06-12 20:52:10 +03:00
val | = 1 ;
2013-09-01 10:01:53 +03:00
ath10k_pci_reg_write32 ( ar , SOC_GLOBAL_RESET_ADDRESS , val ) ;
2013-06-12 20:52:10 +03:00
for ( i = 0 ; i < ATH_PCI_RESET_WAIT_MAX ; i + + ) {
2013-09-01 10:01:53 +03:00
if ( ath10k_pci_reg_read32 ( ar , RTC_STATE_ADDRESS ) &
2013-06-12 20:52:10 +03:00
RTC_STATE_COLD_RESET_MASK )
break ;
msleep ( 1 ) ;
}
/* Pull Target, including PCIe, out of RESET. */
val & = ~ 1 ;
2013-09-01 10:01:53 +03:00
ath10k_pci_reg_write32 ( ar , SOC_GLOBAL_RESET_ADDRESS , val ) ;
2013-06-12 20:52:10 +03:00
for ( i = 0 ; i < ATH_PCI_RESET_WAIT_MAX ; i + + ) {
2013-09-01 10:01:53 +03:00
if ( ! ( ath10k_pci_reg_read32 ( ar , RTC_STATE_ADDRESS ) &
2013-06-12 20:52:10 +03:00
RTC_STATE_COLD_RESET_MASK ) )
break ;
msleep ( 1 ) ;
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot cold reset complete \n " ) ;
2014-03-28 09:32:52 +02:00
2013-11-08 08:01:30 +01:00
return 0 ;
2013-06-12 20:52:10 +03:00
}
2014-08-07 11:03:30 +02:00
static int ath10k_pci_claim ( struct ath10k * ar )
2013-06-12 20:52:10 +03:00
{
2014-08-07 11:03:30 +02:00
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
struct pci_dev * pdev = ar_pci - > pdev ;
int ret ;
2013-06-12 20:52:10 +03:00
pci_set_drvdata ( pdev , ar ) ;
ret = pci_enable_device ( pdev ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to enable pci device: %d \n " , ret ) ;
2014-08-07 11:03:30 +02:00
return ret ;
2013-06-12 20:52:10 +03:00
}
ret = pci_request_region ( pdev , BAR_NUM , " ath " ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to request region BAR%d: %d \n " , BAR_NUM ,
2014-08-07 11:03:30 +02:00
ret ) ;
2013-06-12 20:52:10 +03:00
goto err_device ;
}
2014-08-07 11:03:30 +02:00
/* Target expects 32 bit DMA. Enforce it. */
2013-06-12 20:52:10 +03:00
ret = pci_set_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to set dma mask to 32-bit: %d \n " , ret ) ;
2013-06-12 20:52:10 +03:00
goto err_region ;
}
ret = pci_set_consistent_dma_mask ( pdev , DMA_BIT_MASK ( 32 ) ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to set consistent dma mask to 32-bit: %d \n " ,
2014-08-07 11:03:30 +02:00
ret ) ;
2013-06-12 20:52:10 +03:00
goto err_region ;
}
pci_set_master ( pdev ) ;
/* Arrange for access to Target SoC registers. */
2015-06-15 14:46:42 +03:00
ar_pci - > mem_len = pci_resource_len ( pdev , BAR_NUM ) ;
2014-08-07 11:03:30 +02:00
ar_pci - > mem = pci_iomap ( pdev , BAR_NUM , 0 ) ;
if ( ! ar_pci - > mem ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to iomap BAR%d \n " , BAR_NUM ) ;
2013-06-12 20:52:10 +03:00
ret = - EIO ;
goto err_master ;
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_BOOT , " boot pci_mem 0x%p \n " , ar_pci - > mem ) ;
2014-08-07 11:03:30 +02:00
return 0 ;
err_master :
pci_clear_master ( pdev ) ;
err_region :
pci_release_region ( pdev , BAR_NUM ) ;
err_device :
pci_disable_device ( pdev ) ;
return ret ;
}
static void ath10k_pci_release ( struct ath10k * ar )
{
struct ath10k_pci * ar_pci = ath10k_pci_priv ( ar ) ;
struct pci_dev * pdev = ar_pci - > pdev ;
pci_iounmap ( pdev , ar_pci - > mem ) ;
pci_release_region ( pdev , BAR_NUM ) ;
pci_clear_master ( pdev ) ;
pci_disable_device ( pdev ) ;
}
2014-12-02 10:55:54 +02:00
static bool ath10k_pci_chip_is_supported ( u32 dev_id , u32 chip_id )
{
const struct ath10k_pci_supp_chip * supp_chip ;
int i ;
u32 rev_id = MS ( chip_id , SOC_CHIP_ID_REV ) ;
for ( i = 0 ; i < ARRAY_SIZE ( ath10k_pci_supp_chips ) ; i + + ) {
supp_chip = & ath10k_pci_supp_chips [ i ] ;
if ( supp_chip - > dev_id = = dev_id & &
supp_chip - > rev_id = = rev_id )
return true ;
}
return false ;
}
2014-08-07 11:03:30 +02:00
static int ath10k_pci_probe ( struct pci_dev * pdev ,
const struct pci_device_id * pci_dev )
{
int ret = 0 ;
struct ath10k * ar ;
struct ath10k_pci * ar_pci ;
2015-01-24 12:14:49 +02:00
enum ath10k_hw_rev hw_rev ;
2014-08-07 11:03:30 +02:00
u32 chip_id ;
2015-01-24 12:14:49 +02:00
switch ( pci_dev - > device ) {
case QCA988X_2_0_DEVICE_ID :
hw_rev = ATH10K_HW_QCA988X ;
break ;
case QCA6174_2_1_DEVICE_ID :
hw_rev = ATH10K_HW_QCA6174 ;
break ;
2015-06-18 12:31:03 +05:30
case QCA99X0_2_0_DEVICE_ID :
hw_rev = ATH10K_HW_QCA99X0 ;
break ;
2015-01-24 12:14:49 +02:00
default :
WARN_ON ( 1 ) ;
return - ENOTSUPP ;
}
ar = ath10k_core_create ( sizeof ( * ar_pci ) , & pdev - > dev , ATH10K_BUS_PCI ,
hw_rev , & ath10k_pci_hif_ops ) ;
2014-08-07 11:03:30 +02:00
if ( ! ar ) {
2014-08-25 12:09:38 +02:00
dev_err ( & pdev - > dev , " failed to allocate core \n " ) ;
2014-08-07 11:03:30 +02:00
return - ENOMEM ;
}
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci probe \n " ) ;
2014-08-07 11:03:30 +02:00
ar_pci = ath10k_pci_priv ( ar ) ;
ar_pci - > pdev = pdev ;
ar_pci - > dev = & pdev - > dev ;
ar_pci - > ar = ar ;
2013-06-12 20:52:10 +03:00
2015-04-17 09:19:17 +00:00
if ( pdev - > subsystem_vendor | | pdev - > subsystem_device )
scnprintf ( ar - > spec_board_id , sizeof ( ar - > spec_board_id ) ,
" %04x:%04x:%04x:%04x " ,
pdev - > vendor , pdev - > device ,
pdev - > subsystem_vendor , pdev - > subsystem_device ) ;
2013-06-12 20:52:10 +03:00
spin_lock_init ( & ar_pci - > ce_lock ) ;
2015-05-18 09:38:18 +00:00
spin_lock_init ( & ar_pci - > ps_lock ) ;
2014-08-22 14:33:14 +02:00
setup_timer ( & ar_pci - > rx_post_retry , ath10k_pci_rx_replenish_retry ,
( unsigned long ) ar ) ;
2015-05-18 09:38:18 +00:00
setup_timer ( & ar_pci - > ps_timer , ath10k_pci_ps_timer ,
( unsigned long ) ar ) ;
2013-06-12 20:52:10 +03:00
2014-08-07 11:03:30 +02:00
ret = ath10k_pci_claim ( ar ) ;
2013-09-01 11:22:14 +03:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to claim device: %d \n " , ret ) ;
2014-08-07 11:03:30 +02:00
goto err_core_destroy ;
2013-09-01 11:22:14 +03:00
}
2014-10-20 14:14:39 +02:00
ret = ath10k_pci_alloc_pipes ( ar ) ;
2014-03-28 10:02:38 +02:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to allocate copy engine pipes: %d \n " ,
ret ) ;
2014-08-07 11:03:28 +02:00
goto err_sleep ;
2014-03-28 10:02:38 +02:00
}
2014-08-22 14:23:31 +02:00
ath10k_pci_ce_deinit ( ar ) ;
2014-10-20 14:14:38 +02:00
ath10k_pci_irq_disable ( ar ) ;
2013-09-08 17:55:50 +03:00
2014-08-22 14:23:31 +02:00
ret = ath10k_pci_init_irq ( ar ) ;
2013-06-12 20:52:10 +03:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to init irqs: %d \n " , ret ) ;
2014-10-20 14:14:39 +02:00
goto err_free_pipes ;
2013-06-12 20:52:10 +03:00
}
2014-08-25 12:09:38 +02:00
ath10k_info ( ar , " pci irq %s interrupts %d irq_mode %d reset_mode %d \n " ,
2014-08-22 14:23:31 +02:00
ath10k_pci_get_irq_method ( ar ) , ar_pci - > num_msi_intrs ,
ath10k_pci_irq_mode , ath10k_pci_reset_mode ) ;
2014-08-22 14:23:34 +02:00
ret = ath10k_pci_request_irq ( ar ) ;
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_warn ( ar , " failed to request irqs: %d \n " , ret ) ;
2014-08-22 14:23:34 +02:00
goto err_deinit_irq ;
}
2015-01-24 12:14:48 +02:00
ret = ath10k_pci_chip_reset ( ar ) ;
if ( ret ) {
ath10k_err ( ar , " failed to reset chip: %d \n " , ret ) ;
goto err_free_irq ;
}
chip_id = ath10k_pci_soc_read32 ( ar , SOC_CHIP_ID_ADDRESS ) ;
if ( chip_id = = 0xffffffff ) {
ath10k_err ( ar , " failed to get chip id \n " ) ;
goto err_free_irq ;
}
if ( ! ath10k_pci_chip_is_supported ( pdev - > device , chip_id ) ) {
ath10k_err ( ar , " device %04x with chip_id %08x isn't supported \n " ,
pdev - > device , chip_id ) ;
2015-04-10 13:01:27 +00:00
goto err_free_irq ;
2015-01-24 12:14:48 +02:00
}
2013-09-01 11:22:14 +03:00
ret = ath10k_core_register ( ar , chip_id ) ;
2013-06-12 20:52:10 +03:00
if ( ret ) {
2014-08-25 12:09:38 +02:00
ath10k_err ( ar , " failed to register driver core: %d \n " , ret ) ;
2014-08-22 14:23:34 +02:00
goto err_free_irq ;
2013-06-12 20:52:10 +03:00
}
return 0 ;
2014-08-22 14:23:34 +02:00
err_free_irq :
ath10k_pci_free_irq ( ar ) ;
2014-08-28 10:24:40 +02:00
ath10k_pci_kill_tasklet ( ar ) ;
2014-08-22 14:23:34 +02:00
2014-08-22 14:23:31 +02:00
err_deinit_irq :
ath10k_pci_deinit_irq ( ar ) ;
2014-10-20 14:14:39 +02:00
err_free_pipes :
ath10k_pci_free_pipes ( ar ) ;
2014-08-07 11:03:30 +02:00
2014-08-07 11:03:28 +02:00
err_sleep :
2015-05-29 07:35:24 +02:00
ath10k_pci_sleep_sync ( ar ) ;
2014-08-07 11:03:30 +02:00
ath10k_pci_release ( ar ) ;
2014-08-07 11:03:27 +02:00
err_core_destroy :
2013-06-12 20:52:10 +03:00
ath10k_core_destroy ( ar ) ;
return ret ;
}
static void ath10k_pci_remove ( struct pci_dev * pdev )
{
struct ath10k * ar = pci_get_drvdata ( pdev ) ;
struct ath10k_pci * ar_pci ;
2014-08-25 12:09:38 +02:00
ath10k_dbg ( ar , ATH10K_DBG_PCI , " pci remove \n " ) ;
2013-06-12 20:52:10 +03:00
if ( ! ar )
return ;
ar_pci = ath10k_pci_priv ( ar ) ;
if ( ! ar_pci )
return ;
ath10k_core_unregister ( ar ) ;
2014-08-22 14:23:34 +02:00
ath10k_pci_free_irq ( ar ) ;
2014-08-28 10:24:40 +02:00
ath10k_pci_kill_tasklet ( ar ) ;
2014-08-22 14:23:31 +02:00
ath10k_pci_deinit_irq ( ar ) ;
ath10k_pci_ce_deinit ( ar ) ;
2014-10-20 14:14:39 +02:00
ath10k_pci_free_pipes ( ar ) ;
2015-05-18 09:38:18 +00:00
ath10k_pci_sleep_sync ( ar ) ;
2014-08-07 11:03:30 +02:00
ath10k_pci_release ( ar ) ;
2013-06-12 20:52:10 +03:00
ath10k_core_destroy ( ar ) ;
}
MODULE_DEVICE_TABLE ( pci , ath10k_pci_id_table ) ;
static struct pci_driver ath10k_pci_driver = {
. name = " ath10k_pci " ,
. id_table = ath10k_pci_id_table ,
. probe = ath10k_pci_probe ,
. remove = ath10k_pci_remove ,
} ;
static int __init ath10k_pci_init ( void )
{
int ret ;
ret = pci_register_driver ( & ath10k_pci_driver ) ;
if ( ret )
2014-08-25 12:09:38 +02:00
printk ( KERN_ERR " failed to register ath10k pci driver: %d \n " ,
ret ) ;
2013-06-12 20:52:10 +03:00
return ret ;
}
module_init ( ath10k_pci_init ) ;
static void __exit ath10k_pci_exit ( void )
{
pci_unregister_driver ( & ath10k_pci_driver ) ;
}
module_exit ( ath10k_pci_exit ) ;
MODULE_AUTHOR ( " Qualcomm Atheros " ) ;
MODULE_DESCRIPTION ( " Driver support for Atheros QCA988X PCIe devices " ) ;
MODULE_LICENSE ( " Dual BSD/GPL " ) ;
2015-02-18 13:16:37 +01:00
/* QCA988x 2.0 firmware files */
2014-10-06 14:16:41 +02:00
MODULE_FIRMWARE ( QCA988X_HW_2_0_FW_DIR " / " QCA988X_HW_2_0_FW_FILE ) ;
MODULE_FIRMWARE ( QCA988X_HW_2_0_FW_DIR " / " ATH10K_FW_API2_FILE ) ;
MODULE_FIRMWARE ( QCA988X_HW_2_0_FW_DIR " / " ATH10K_FW_API3_FILE ) ;
2015-02-18 13:16:37 +01:00
MODULE_FIRMWARE ( QCA988X_HW_2_0_FW_DIR " / " ATH10K_FW_API4_FILE ) ;
2015-03-25 13:12:42 +02:00
MODULE_FIRMWARE ( QCA988X_HW_2_0_FW_DIR " / " ATH10K_FW_API5_FILE ) ;
2013-06-12 20:52:10 +03:00
MODULE_FIRMWARE ( QCA988X_HW_2_0_FW_DIR " / " QCA988X_HW_2_0_BOARD_DATA_FILE ) ;
2015-02-18 13:16:37 +01:00
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE ( QCA6174_HW_2_1_FW_DIR " / " ATH10K_FW_API4_FILE ) ;
2015-05-26 13:09:22 +02:00
MODULE_FIRMWARE ( QCA6174_HW_2_1_FW_DIR " / " ATH10K_FW_API5_FILE ) ;
2015-02-18 13:16:37 +01:00
MODULE_FIRMWARE ( QCA6174_HW_2_1_FW_DIR " / " QCA6174_HW_2_1_BOARD_DATA_FILE ) ;
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE ( QCA6174_HW_3_0_FW_DIR " / " ATH10K_FW_API4_FILE ) ;
2015-05-26 13:09:22 +02:00
MODULE_FIRMWARE ( QCA6174_HW_3_0_FW_DIR " / " ATH10K_FW_API5_FILE ) ;
2015-02-18 13:16:37 +01:00
MODULE_FIRMWARE ( QCA6174_HW_3_0_FW_DIR " / " QCA6174_HW_3_0_BOARD_DATA_FILE ) ;