wireless-drivers-next patches for 4.16

Here are patches which have been accumulating over the holidays and
 after the New Year. Business as usual and nothing special really
 standing out.
 
 But what's noteworthy here is that Larry Finger is stepping down as
 the rtlwifi maintainer. He has been maintaining rtlwifi since it was
 applied back in 2010 in commit 0c8173385e ("rtl8192ce: Add new
 driver") and it has been no easy role trying to juggle between the
 vendor, demanding upstream community and users. So big thank you to
 Larry for all his efforts!
 
 ath10k
 
 * more preparation work for wcn3990 support
 
 * add memory dump to firmware coredump files
 
 wil6210
 
 * support scheduled scan
 
 * support 40-bit DMA addresses
 
 qtnfmac
 
 * support MAC address based access control
 
 * support for radar detection and Channel Availibility Check (CAC)
 
 mwifiex
 
 * firmware coredump for usb devices
 
 rtlwifi
 
 * Larry Finger steps down as the maintainer and Ping-Ke Shih becomes
   the new maintainer
 
 * add debugfs interfaces to dump register and btcoex status, and also
   write registers and h2c
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJaWd2WAAoJEG4XJFUm622bDnsH/2+Y5MEn31xvv7FGP46dHCNB
 wk1SgcQOQdb7taIpZ++tFvhTQOXwrtGMA5iAFLeg9+07tqGIaZQqr58aab7BCQTi
 2PgUK9sEdI5LPw3KmhnaqfHMVDKMaYdjcAhLG4FzJqMoDfTuPr56Vnnde3J2A0mj
 hhFRarNKBArEvRaWtNypdZQN8HM10v3LJq+HNnK/yep7fW2EuwSwTO2YBqDhrwAD
 0gzfV6yi05497uVv6W+5CKRawu7RoYgbFTaEa8rmCViIjf9bK4gPdsW18a/DTNMz
 MfvLF2RQZk1k944+a+a983oqZNkq9dEWd0kF0CSFgbO3r/SO7264yc4PHpZKy/s=
 =2eQ4
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2018-01-13' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.16

Here are patches which have been accumulating over the holidays and
after the New Year. Business as usual and nothing special really
standing out.

But what's noteworthy here is that Larry Finger is stepping down as
the rtlwifi maintainer. He has been maintaining rtlwifi since it was
applied back in 2010 in commit 0c8173385e ("rtl8192ce: Add new
driver") and it has been no easy role trying to juggle between the
vendor, demanding upstream community and users. So big thank you to
Larry for all his efforts!

ath10k

* more preparation work for wcn3990 support

* add memory dump to firmware coredump files

wil6210

* support scheduled scan

* support 40-bit DMA addresses

qtnfmac

* support MAC address based access control

* support for radar detection and Channel Availibility Check (CAC)

mwifiex

* firmware coredump for usb devices

rtlwifi

* Larry Finger steps down as the maintainer and Ping-Ke Shih becomes
  the new maintainer

* add debugfs interfaces to dump register and btcoex status, and also
  write registers and h2c
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-01-15 14:46:16 -05:00
commit d9631c7a5d
152 changed files with 6850 additions and 2162 deletions

View File

@ -11780,15 +11780,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.g
S: Maintained
F: drivers/net/wireless/realtek/rtl818x/rtl8187/
RTL8192CE WIRELESS DRIVER
M: Larry Finger <Larry.Finger@lwfinger.net>
M: Chaoming Li <chaoming_li@realsil.com.cn>
REALTEK WIRELESS DRIVER (rtlwifi family)
M: Ping-Ke Shih <pkshih@realtek.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
S: Maintained
F: drivers/net/wireless/realtek/rtlwifi/
F: drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
M: Jes Sorensen <Jes.Sorensen@gmail.com>

View File

@ -21,6 +21,7 @@ ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -327,12 +327,12 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
* Guts of ath10k_ce_send.
* The caller takes responsibility for any needed locking.
*/
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
@ -384,6 +384,87 @@ exit:
return ret;
}
static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc_64 *desc, sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
unsigned int write_index = src_ring->write_index;
u32 ctrl_addr = ce_state->ctrl_addr;
__le32 *addr;
u32 desc_flags = 0;
int ret = 0;
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
return -ESHUTDOWN;
if (nbytes > ce_state->src_sz_max)
ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
ret = -ENOSR;
goto exit;
}
desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
write_index);
desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
if (flags & CE_SEND_FLAG_GATHER)
desc_flags |= CE_DESC_FLAGS_GATHER;
if (flags & CE_SEND_FLAG_BYTE_SWAP)
desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
addr = (__le32 *)&sdesc.addr;
flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
addr[0] = __cpu_to_le32(buffer);
addr[1] = __cpu_to_le32(flags);
if (flags & CE_SEND_FLAG_GATHER)
addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
else
addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
sdesc.nbytes = __cpu_to_le16(nbytes);
sdesc.flags = __cpu_to_le16(desc_flags);
*desc = sdesc;
src_ring->per_transfer_context[write_index] = per_transfer_context;
/* Update Source Ring Write Index */
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
if (!(flags & CE_SEND_FLAG_GATHER))
ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
src_ring->write_index = write_index;
exit:
return ret;
}
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
{
return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
buffer, nbytes, transfer_id, flags);
}
void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
{
struct ath10k *ar = pipe->ar;
@ -413,7 +494,7 @@ void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags)
@ -459,7 +540,8 @@ int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
}
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
@ -488,6 +570,39 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
void *ctx,
dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int write_index = dest_ring->write_index;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
struct ce_desc_64 *desc =
CE_DEST_RING_TO_DESC_64(base, write_index);
u32 ctrl_addr = pipe->ctrl_addr;
lockdep_assert_held(&ce->ce_lock);
if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -ENOSPC;
desc->addr = __cpu_to_le64(paddr);
desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
desc->nbytes = 0;
dest_ring->per_transfer_context[write_index] = ctx;
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
dest_ring->write_index = write_index;
return 0;
}
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
{
struct ath10k *ar = pipe->ar;
@ -508,14 +623,15 @@ void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
dest_ring->write_index = write_index;
}
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
dma_addr_t paddr)
{
struct ath10k *ar = pipe->ar;
struct ath10k_ce *ce = ath10k_ce_priv(ar);
int ret;
spin_lock_bh(&ce->ce_lock);
ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
spin_unlock_bh(&ce->ce_lock);
return ret;
@ -525,9 +641,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
unsigned int *nbytesp)
static int
_ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
@ -574,6 +691,64 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
static int
_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
struct ce_desc_64 *desc =
CE_DEST_RING_TO_DESC_64(base, sw_index);
struct ce_desc_64 sdesc;
u16 nbytes;
/* Copy in one go for performance reasons */
sdesc = *desc;
nbytes = __le16_to_cpu(sdesc.nbytes);
if (nbytes == 0) {
/* This closes a relatively unusual race where the Host
* sees the updated DRRI before the update to the
* corresponding descriptor has completed. We treat this
* as a descriptor that is not yet done.
*/
return -EIO;
}
desc->nbytes = 0;
/* Return data from completed destination descriptor */
*nbytesp = nbytes;
if (per_transfer_contextp)
*per_transfer_contextp =
dest_ring->per_transfer_context[sw_index];
/* Copy engine 5 (HTT Rx) will reuse the same transfer context.
* So update transfer context all CEs except CE5.
*/
if (ce_state->id != 5)
dest_ring->per_transfer_context[sw_index] = NULL;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
dest_ring->sw_index = sw_index;
return 0;
}
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_ctx,
unsigned int *nbytesp)
{
return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
per_transfer_ctx,
nbytesp);
}
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
unsigned int *nbytesp)
@ -583,17 +758,18 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
int ret;
spin_lock_bh(&ce->ce_lock);
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
nbytesp);
spin_unlock_bh(&ce->ce_lock);
return ret;
}
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp)
static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
dma_addr_t *bufferp)
{
struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
@ -644,6 +820,69 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
return ret;
}
static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
dma_addr_t *bufferp)
{
struct ath10k_ce_ring *dest_ring;
unsigned int nentries_mask;
unsigned int sw_index;
unsigned int write_index;
int ret;
struct ath10k *ar;
struct ath10k_ce *ce;
dest_ring = ce_state->dest_ring;
if (!dest_ring)
return -EIO;
ar = ce_state->ar;
ce = ath10k_ce_priv(ar);
spin_lock_bh(&ce->ce_lock);
nentries_mask = dest_ring->nentries_mask;
sw_index = dest_ring->sw_index;
write_index = dest_ring->write_index;
if (write_index != sw_index) {
struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
struct ce_desc_64 *desc =
CE_DEST_RING_TO_DESC_64(base, sw_index);
/* Return data from completed destination descriptor */
*bufferp = __le64_to_cpu(desc->addr);
if (per_transfer_contextp)
*per_transfer_contextp =
dest_ring->per_transfer_context[sw_index];
/* sanity */
dest_ring->per_transfer_context[sw_index] = NULL;
desc->nbytes = 0;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
dest_ring->sw_index = sw_index;
ret = 0;
} else {
ret = -EIO;
}
spin_unlock_bh(&ce->ce_lock);
return ret;
}
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
dma_addr_t *bufferp)
{
return ce_state->ops->ce_revoke_recv_next(ce_state,
per_transfer_contextp,
bufferp);
}
/*
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
@ -698,10 +937,45 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
static void ath10k_ce_extract_desc_data(struct ath10k *ar,
struct ath10k_ce_ring *src_ring,
u32 sw_index,
dma_addr_t *bufferp,
u32 *nbytesp,
u32 *transfer_idp)
{
struct ce_desc *base = src_ring->base_addr_owner_space;
struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
/* Return data from completed source descriptor */
*bufferp = __le32_to_cpu(desc->addr);
*nbytesp = __le16_to_cpu(desc->nbytes);
*transfer_idp = MS(__le16_to_cpu(desc->flags),
CE_DESC_FLAGS_META_DATA);
}
static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
struct ath10k_ce_ring *src_ring,
u32 sw_index,
dma_addr_t *bufferp,
u32 *nbytesp,
u32 *transfer_idp)
{
struct ce_desc_64 *base = src_ring->base_addr_owner_space;
struct ce_desc_64 *desc =
CE_SRC_RING_TO_DESC_64(base, sw_index);
/* Return data from completed source descriptor */
*bufferp = __le64_to_cpu(desc->addr);
*nbytesp = __le16_to_cpu(desc->nbytes);
*transfer_idp = MS(__le16_to_cpu(desc->flags),
CE_DESC_FLAGS_META_DATA);
}
/* NB: Modeled after ath10k_ce_completed_send_next */
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
@ -728,14 +1002,9 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
write_index = src_ring->write_index;
if (write_index != sw_index) {
struct ce_desc *base = src_ring->base_addr_owner_space;
struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
/* Return data from completed source descriptor */
*bufferp = __le32_to_cpu(desc->addr);
*nbytesp = __le16_to_cpu(desc->nbytes);
*transfer_idp = MS(__le16_to_cpu(desc->flags),
CE_DESC_FLAGS_META_DATA);
ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
bufferp, nbytesp,
transfer_idp);
if (per_transfer_contextp)
*per_transfer_contextp =
@ -897,8 +1166,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->src_nentries);
memset(src_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc));
if (ar->hw_params.target_64bit)
memset(src_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc_64));
else
memset(src_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc));
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask;
@ -934,8 +1207,12 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
nentries = roundup_pow_of_two(attr->dest_nentries);
memset(dest_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc));
if (ar->hw_params.target_64bit)
memset(dest_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc_64));
else
memset(dest_ring->base_addr_owner_space, 0,
nentries * sizeof(struct ce_desc));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
@ -993,12 +1270,57 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space = PTR_ALIGN(
src_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
src_ring->base_addr_ce_space = ALIGN(
src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
src_ring->base_addr_owner_space =
PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
src_ring->base_addr_ce_space =
ALIGN(src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return src_ring;
}
static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce_ring *src_ring;
u32 nentries = attr->src_nentries;
dma_addr_t base_addr;
nentries = roundup_pow_of_two(nentries);
src_ring = kzalloc(sizeof(*src_ring) +
(nentries *
sizeof(*src_ring->per_transfer_context)),
GFP_KERNEL);
if (!src_ring)
return ERR_PTR(-ENOMEM);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
/* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
src_ring->base_addr_owner_space_unaligned =
dma_alloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) {
kfree(src_ring);
return ERR_PTR(-ENOMEM);
}
src_ring->base_addr_ce_space_unaligned = base_addr;
src_ring->base_addr_owner_space =
PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
src_ring->base_addr_ce_space =
ALIGN(src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return src_ring;
}
@ -1039,12 +1361,63 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
dest_ring->base_addr_ce_space_unaligned = base_addr;
dest_ring->base_addr_owner_space = PTR_ALIGN(
dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
dest_ring->base_addr_ce_space = ALIGN(
dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
dest_ring->base_addr_owner_space =
PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
dest_ring->base_addr_ce_space =
ALIGN(dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return dest_ring;
}
static struct ath10k_ce_ring *
ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce_ring *dest_ring;
u32 nentries;
dma_addr_t base_addr;
nentries = roundup_pow_of_two(attr->dest_nentries);
dest_ring = kzalloc(sizeof(*dest_ring) +
(nentries *
sizeof(*dest_ring->per_transfer_context)),
GFP_KERNEL);
if (!dest_ring)
return ERR_PTR(-ENOMEM);
dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1;
/* Legacy platforms that do not support cache
* coherent DMA are unsupported
*/
dest_ring->base_addr_owner_space_unaligned =
dma_alloc_coherent(ar->dev,
(nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
return ERR_PTR(-ENOMEM);
}
dest_ring->base_addr_ce_space_unaligned = base_addr;
/* Correctly initialize memory to 0 to prevent garbage
* data crashing system when download firmware
*/
memset(dest_ring->base_addr_owner_space_unaligned, 0,
nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
dest_ring->base_addr_owner_space =
PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
CE_DESC_RING_ALIGN);
dest_ring->base_addr_ce_space =
ALIGN(dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
return dest_ring;
}
@ -1107,65 +1480,7 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_deinit_dest_ring(ar, ce_id);
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
int ret;
/*
* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
* additional TX locking checks.
*
* For the lack of a better place do the check here.
*/
BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
if (attr->src_nentries)
ce_state->send_cb = attr->send_cb;
if (attr->dest_nentries)
ce_state->recv_cb = attr->recv_cb;
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
}
}
if (attr->dest_nentries) {
ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;
}
}
return 0;
}
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
@ -1194,6 +1509,43 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
ce_state->dest_ring = NULL;
}
static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
if (ce_state->src_ring) {
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->src_ring->base_addr_owner_space,
ce_state->src_ring->base_addr_ce_space);
kfree(ce_state->src_ring);
}
if (ce_state->dest_ring) {
dma_free_coherent(ar->dev,
(ce_state->dest_ring->nentries *
sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
ce_state->dest_ring->base_addr_owner_space,
ce_state->dest_ring->base_addr_ce_space);
kfree(ce_state->dest_ring);
}
ce_state->src_ring = NULL;
ce_state->dest_ring = NULL;
}
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
ce_state->ops->ce_free_pipe(ar, ce_id);
}
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
@ -1232,3 +1584,99 @@ void ath10k_ce_dump_registers(struct ath10k *ar,
spin_unlock_bh(&ce->ce_lock);
}
static const struct ath10k_ce_ops ce_ops = {
.ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
.ce_rx_post_buf = __ath10k_ce_rx_post_buf,
.ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
.ce_extract_desc_data = ath10k_ce_extract_desc_data,
.ce_free_pipe = _ath10k_ce_free_pipe,
.ce_send_nolock = _ath10k_ce_send_nolock,
};
static const struct ath10k_ce_ops ce_64_ops = {
.ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
.ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
.ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
.ce_completed_recv_next_nolock =
_ath10k_ce_completed_recv_next_nolock_64,
.ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
.ce_free_pipe = _ath10k_ce_free_pipe_64,
.ce_send_nolock = _ath10k_ce_send_nolock_64,
};
static void ath10k_ce_set_ops(struct ath10k *ar,
struct ath10k_ce_pipe *ce_state)
{
switch (ar->hw_rev) {
case ATH10K_HW_WCN3990:
ce_state->ops = &ce_64_ops;
break;
default:
ce_state->ops = &ce_ops;
break;
}
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
int ret;
ath10k_ce_set_ops(ar, ce_state);
/* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
* additional TX locking checks.
*
* For the lack of a better place do the check here.
*/
BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
if (attr->src_nentries)
ce_state->send_cb = attr->send_cb;
if (attr->dest_nentries)
ce_state->recv_cb = attr->recv_cb;
if (attr->src_nentries) {
ce_state->src_ring =
ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
}
}
if (attr->dest_nentries) {
ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;
}
}
return 0;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -36,6 +36,10 @@ struct ath10k_ce_pipe;
#define CE_DESC_FLAGS_GATHER (1 << 0)
#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0)
#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0)
/* Following desc flags are used in QCA99X0 */
#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
@ -50,6 +54,16 @@ struct ce_desc {
__le16 flags; /* %CE_DESC_FLAGS_ */
};
struct ce_desc_64 {
__le64 addr;
__le16 nbytes; /* length in register map */
__le16 flags; /* fw_metadata_high */
__le32 toeplitz_hash_result;
};
#define CE_DESC_SIZE sizeof(struct ce_desc)
#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
struct ath10k_ce_ring {
/* Number of entries in this ring; must be power of 2 */
unsigned int nentries;
@ -117,6 +131,7 @@ struct ath10k_ce_pipe {
unsigned int src_sz_max;
struct ath10k_ce_ring *src_ring;
struct ath10k_ce_ring *dest_ring;
const struct ath10k_ce_ops *ops;
};
/* Copy Engine settable attributes */
@ -160,7 +175,7 @@ struct ath10k_ce {
*/
int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_send_context,
u32 buffer,
dma_addr_t buffer,
unsigned int nbytes,
/* 14 bits */
unsigned int transfer_id,
@ -168,7 +183,7 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
u32 buffer,
dma_addr_t buffer,
unsigned int nbytes,
unsigned int transfer_id,
unsigned int flags);
@ -180,8 +195,8 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
dma_addr_t paddr);
void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
/* recv flags */
@ -222,7 +237,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
*/
int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
dma_addr_t *bufferp);
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
@ -235,7 +250,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
*/
int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
dma_addr_t *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
@ -281,6 +296,31 @@ struct ce_attr {
void (*recv_cb)(struct ath10k_ce_pipe *);
};
struct ath10k_ce_ops {
struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
u32 ce_id,
const struct ce_attr *attr);
struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
u32 ce_id,
const struct ce_attr *attr);
int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
dma_addr_t paddr);
int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *nbytesp);
int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
dma_addr_t *nbytesp);
void (*ce_extract_desc_data)(struct ath10k *ar,
struct ath10k_ce_ring *src_ring,
u32 sw_index, dma_addr_t *bufferp,
u32 *nbytesp, u32 *transfer_idp);
void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
void *per_transfer_context,
dma_addr_t buffer, u32 nbytes,
u32 transfer_id, u32 flags);
};
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
@ -292,6 +332,12 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
#define CE_DEST_RING_TO_DESC(baddr, idx) \
(&(((struct ce_desc *)baddr)[idx]))
#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
(&(((struct ce_desc_64 *)baddr)[idx]))
#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
(&(((struct ce_desc_64 *)baddr)[idx]))
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
(((int)(toidx) - (int)(fromidx)) & (nentries_mask))

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -32,6 +32,7 @@
#include "htt.h"
#include "testmode.h"
#include "wmi-ops.h"
#include "coredump.h"
unsigned int ath10k_debug_mask;
static unsigned int ath10k_cryptmode_param;
@ -39,17 +40,25 @@ static bool uart_print;
static bool skip_otp;
static bool rawmode;
/* Enable ATH10K_FW_CRASH_DUMP_REGISTERS and ATH10K_FW_CRASH_DUMP_CE_DATA
* by default.
*/
unsigned long ath10k_coredump_mask = 0x3;
/* FIXME: most of these should be readonly */
module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
module_param(uart_print, bool, 0644);
module_param(skip_otp, bool, 0644);
module_param(rawmode, bool, 0644);
module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
MODULE_PARM_DESC(uart_print, "Uart target debugging");
MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
static const struct ath10k_hw_params ath10k_hw_params_list[] = {
{
@ -78,6 +87,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9887_HW_1_0_VERSION,
@ -105,6 +116,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -131,6 +144,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_2_1_VERSION,
@ -157,6 +172,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_3_0_VERSION,
@ -183,6 +200,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA6174_HW_3_2_VERSION,
@ -212,6 +231,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@ -244,6 +265,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9984_HW_1_0_DEV_VERSION,
@ -281,6 +304,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9888_HW_2_0_DEV_VERSION,
@ -317,6 +342,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
@ -343,6 +370,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA9377_HW_1_1_DEV_VERSION,
@ -371,6 +400,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = QCA4019_HW_1_0_DEV_VERSION,
@ -404,6 +435,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_TLV_NUM_PEERS,
.ast_skid_limit = 0x10,
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
},
{
.id = WCN3990_HW_1_0_DEV_VERSION,
@ -422,6 +455,8 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
},
};
@ -445,6 +480,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
[ATH10K_FW_FEATURE_NO_PS] = "no-ps",
[ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
[ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
@ -1524,8 +1560,8 @@ int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
data += ie_len;
}
if (!fw_file->firmware_data ||
!fw_file->firmware_len) {
if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
(!fw_file->firmware_data || !fw_file->firmware_len)) {
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
@ -1551,6 +1587,7 @@ static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
break;
case ATH10K_BUS_PCI:
case ATH10K_BUS_AHB:
case ATH10K_BUS_SNOC:
scnprintf(fw_name, fw_name_len, "%s-%d.bin",
ATH10K_FW_FILE_BASE, fw_api);
break;
@ -1836,7 +1873,7 @@ static void ath10k_core_restart(struct work_struct *work)
mutex_unlock(&ar->conf_mutex);
ret = ath10k_debug_fw_devcoredump(ar);
ret = ath10k_coredump_submit(ar);
if (ret)
ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
ret);
@ -2078,44 +2115,48 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw = fw;
ath10k_bmi_start(ar);
if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
ar->running_fw->fw_file.fw_features)) {
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
goto err;
}
status = ath10k_download_cal_data(ar);
if (status)
goto err;
/* Some of of qca988x solutions are having global reset issue
* during target initialization. Bypassing PLL setting before
* downloading firmware and letting the SoC run on REF_CLK is
* fixing the problem. Corresponding firmware change is also needed
* to set the clock source once the target is initialized.
*/
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
ar->running_fw->fw_file.fw_features)) {
status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
if (status) {
ath10k_err(ar, "could not write to skip_clock_init: %d\n",
status);
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
goto err;
}
status = ath10k_download_cal_data(ar);
if (status)
goto err;
/* Some of of qca988x solutions are having global reset issue
* during target initialization. Bypassing PLL setting before
* downloading firmware and letting the SoC run on REF_CLK is
* fixing the problem. Corresponding firmware change is also
* needed to set the clock source once the target is
* initialized.
*/
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
ar->running_fw->fw_file.fw_features)) {
status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
if (status) {
ath10k_err(ar, "could not write to skip_clock_init: %d\n",
status);
goto err;
}
}
status = ath10k_download_fw(ar);
if (status)
goto err;
status = ath10k_init_uart(ar);
if (status)
goto err;
if (ar->hif.bus == ATH10K_BUS_SDIO)
ath10k_init_sdio(ar);
}
status = ath10k_download_fw(ar);
if (status)
goto err;
status = ath10k_init_uart(ar);
if (status)
goto err;
if (ar->hif.bus == ATH10K_BUS_SDIO)
ath10k_init_sdio(ar);
ar->htc.htc_ops.target_send_suspend_complete =
ath10k_send_suspend_complete;
@ -2125,9 +2166,12 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err;
}
status = ath10k_bmi_done(ar);
if (status)
goto err;
if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
ar->running_fw->fw_file.fw_features)) {
status = ath10k_bmi_done(ar);
if (status)
goto err;
}
status = ath10k_wmi_attach(ar);
if (status) {
@ -2370,19 +2414,35 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
memset(&target_info, 0, sizeof(target_info));
if (ar->hif.bus == ATH10K_BUS_SDIO)
switch (ar->hif.bus) {
case ATH10K_BUS_SDIO:
memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
else
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
goto err_power_down;
}
ar->target_version = target_info.version;
ar->hw->wiphy->hw_version = target_info.version;
break;
case ATH10K_BUS_PCI:
case ATH10K_BUS_AHB:
case ATH10K_BUS_USB:
memset(&target_info, 0, sizeof(target_info));
ret = ath10k_bmi_get_target_info(ar, &target_info);
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
goto err_power_down;
if (ret) {
ath10k_err(ar, "could not get target info (%d)\n", ret);
goto err_power_down;
}
ar->target_version = target_info.version;
ar->hw->wiphy->hw_version = target_info.version;
break;
case ATH10K_BUS_SNOC:
break;
default:
ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
}
ar->target_version = target_info.version;
ar->hw->wiphy->hw_version = target_info.version;
ret = ath10k_init_hw_params(ar);
if (ret) {
ath10k_err(ar, "could not get hw params (%d)\n", ret);
@ -2402,38 +2462,41 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_hwfw_info(ar);
ret = ath10k_core_pre_cal_download(ar);
if (ret) {
/* pre calibration data download is not necessary
* for all the chipsets. Ignore failures and continue.
*/
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"could not load pre cal data: %d\n", ret);
if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
ar->normal_mode_fw.fw_file.fw_features)) {
ret = ath10k_core_pre_cal_download(ar);
if (ret) {
/* pre calibration data download is not necessary
* for all the chipsets. Ignore failures and continue.
*/
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"could not load pre cal data: %d\n", ret);
}
ret = ath10k_core_get_board_id_from_otp(ar);
if (ret && ret != -EOPNOTSUPP) {
ath10k_err(ar, "failed to get board id from otp: %d\n",
ret);
goto err_free_firmware_files;
}
ret = ath10k_core_check_smbios(ar);
if (ret)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
ret = ath10k_core_check_dt(ar);
if (ret)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
ret = ath10k_core_fetch_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
goto err_free_firmware_files;
}
ath10k_debug_print_board_info(ar);
}
ret = ath10k_core_get_board_id_from_otp(ar);
if (ret && ret != -EOPNOTSUPP) {
ath10k_err(ar, "failed to get board id from otp: %d\n",
ret);
goto err_free_firmware_files;
}
ret = ath10k_core_check_smbios(ar);
if (ret)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
ret = ath10k_core_check_dt(ar);
if (ret)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
ret = ath10k_core_fetch_board_file(ar);
if (ret) {
ath10k_err(ar, "failed to fetch board file: %d\n", ret);
goto err_free_firmware_files;
}
ath10k_debug_print_board_info(ar);
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
ath10k_err(ar, "fatal problem with firmware features: %d\n",
@ -2441,11 +2504,15 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_free_firmware_files;
}
ret = ath10k_swap_code_seg_init(ar, &ar->normal_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
goto err_free_firmware_files;
if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
ar->normal_mode_fw.fw_file.fw_features)) {
ret = ath10k_swap_code_seg_init(ar,
&ar->normal_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to initialize code swap segment: %d\n",
ret);
goto err_free_firmware_files;
}
}
mutex_lock(&ar->conf_mutex);
@ -2497,10 +2564,16 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_release_fw;
}
status = ath10k_coredump_register(ar);
if (status) {
ath10k_err(ar, "unable to register coredump\n");
goto err_unregister_mac;
}
status = ath10k_debug_register(ar);
if (status) {
ath10k_err(ar, "unable to initialize debugfs\n");
goto err_unregister_mac;
goto err_unregister_coredump;
}
status = ath10k_spectral_create(ar);
@ -2523,6 +2596,8 @@ err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
ath10k_debug_destroy(ar);
err_unregister_coredump:
ath10k_coredump_unregister(ar);
err_unregister_mac:
ath10k_mac_unregister(ar);
err_release_fw:
@ -2677,12 +2752,19 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_dummy_netdev(&ar->napi_dev);
ret = ath10k_debug_create(ar);
ret = ath10k_coredump_create(ar);
if (ret)
goto err_free_aux_wq;
ret = ath10k_debug_create(ar);
if (ret)
goto err_free_coredump;
return ar;
err_free_coredump:
ath10k_coredump_destroy(ar);
err_free_aux_wq:
destroy_workqueue(ar->workqueue_aux);
err_free_wq:
@ -2704,6 +2786,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_aux);
ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
ath10k_wmi_free_host_mem(ar);
ath10k_mac_destroy(ar);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -92,6 +92,7 @@ enum ath10k_bus {
ATH10K_BUS_AHB,
ATH10K_BUS_SDIO,
ATH10K_BUS_USB,
ATH10K_BUS_SNOC,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@ -105,6 +106,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus)
return "sdio";
case ATH10K_BUS_USB:
return "usb";
case ATH10K_BUS_SNOC:
return "snoc";
}
return "unknown";
@ -457,14 +460,17 @@ struct ath10k_ce_crash_hdr {
struct ath10k_ce_crash_data entries[];
};
#define MAX_MEM_DUMP_TYPE 5
/* used for crash-dump storage, protected by data-lock */
struct ath10k_fw_crash_data {
bool crashed_since_read;
guid_t guid;
struct timespec64 timestamp;
__le32 registers[REG_DUMP_COUNT_QCA988X];
struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
u8 *ramdump_buf;
size_t ramdump_buf_len;
};
struct ath10k_debug {
@ -490,8 +496,6 @@ struct ath10k_debug {
u32 reg_addr;
u32 nf_cal_period;
void *cal_data;
struct ath10k_fw_crash_data *fw_crash_data;
};
enum ath10k_state {
@ -616,6 +620,9 @@ enum ath10k_fw_features {
/* Firmware allows management tx by reference instead of by value. */
ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
/* Firmware load is done externally, not by bmi */
ATH10K_FW_FEATURE_NON_BMI = 19,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
@ -965,6 +972,13 @@ struct ath10k {
#endif
u32 pktlog_filter;
#ifdef CONFIG_DEV_COREDUMP
struct {
struct ath10k_fw_crash_data *fw_crash_data;
} coredump;
#endif
struct {
/* protected by conf_mutex */
struct ath10k_fw_components utf_mode_fw;
@ -1018,6 +1032,8 @@ static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
return false;
}
extern unsigned long ath10k_coredump_mask;
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,

View File

@ -0,0 +1,993 @@
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "coredump.h"
#include <linux/devcoredump.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/utsname.h>
#include "debug.h"
#include "hw.h"
static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
{0x800, 0x810},
{0x820, 0x82C},
{0x830, 0x8F4},
{0x90C, 0x91C},
{0xA14, 0xA18},
{0xA84, 0xA94},
{0xAA8, 0xAD4},
{0xADC, 0xB40},
{0x1000, 0x10A4},
{0x10BC, 0x111C},
{0x1134, 0x1138},
{0x1144, 0x114C},
{0x1150, 0x115C},
{0x1160, 0x1178},
{0x1240, 0x1260},
{0x2000, 0x207C},
{0x3000, 0x3014},
{0x4000, 0x4014},
{0x5000, 0x5124},
{0x6000, 0x6040},
{0x6080, 0x60CC},
{0x6100, 0x611C},
{0x6140, 0x61D8},
{0x6200, 0x6238},
{0x6240, 0x628C},
{0x62C0, 0x62EC},
{0x6380, 0x63E8},
{0x6400, 0x6440},
{0x6480, 0x64CC},
{0x6500, 0x651C},
{0x6540, 0x6580},
{0x6600, 0x6638},
{0x6640, 0x668C},
{0x66C0, 0x66EC},
{0x6780, 0x67E8},
{0x7080, 0x708C},
{0x70C0, 0x70C8},
{0x7400, 0x741C},
{0x7440, 0x7454},
{0x7800, 0x7818},
{0x8000, 0x8004},
{0x8010, 0x8064},
{0x8080, 0x8084},
{0x80A0, 0x80A4},
{0x80C0, 0x80C4},
{0x80E0, 0x80F4},
{0x8100, 0x8104},
{0x8110, 0x812C},
{0x9000, 0x9004},
{0x9800, 0x982C},
{0x9830, 0x9838},
{0x9840, 0x986C},
{0x9870, 0x9898},
{0x9A00, 0x9C00},
{0xD580, 0xD59C},
{0xF000, 0xF0E0},
{0xF140, 0xF190},
{0xF250, 0xF25C},
{0xF260, 0xF268},
{0xF26C, 0xF2A8},
{0x10008, 0x1000C},
{0x10014, 0x10018},
{0x1001C, 0x10020},
{0x10024, 0x10028},
{0x10030, 0x10034},
{0x10040, 0x10054},
{0x10058, 0x1007C},
{0x10080, 0x100C4},
{0x100C8, 0x10114},
{0x1012C, 0x10130},
{0x10138, 0x10144},
{0x10200, 0x10220},
{0x10230, 0x10250},
{0x10260, 0x10280},
{0x10290, 0x102B0},
{0x102C0, 0x102DC},
{0x102E0, 0x102F4},
{0x102FC, 0x1037C},
{0x10380, 0x10390},
{0x10800, 0x10828},
{0x10840, 0x10844},
{0x10880, 0x10884},
{0x108C0, 0x108E8},
{0x10900, 0x10928},
{0x10940, 0x10944},
{0x10980, 0x10984},
{0x109C0, 0x109E8},
{0x10A00, 0x10A28},
{0x10A40, 0x10A50},
{0x11000, 0x11028},
{0x11030, 0x11034},
{0x11038, 0x11068},
{0x11070, 0x11074},
{0x11078, 0x110A8},
{0x110B0, 0x110B4},
{0x110B8, 0x110E8},
{0x110F0, 0x110F4},
{0x110F8, 0x11128},
{0x11138, 0x11144},
{0x11178, 0x11180},
{0x111B8, 0x111C0},
{0x111F8, 0x11200},
{0x11238, 0x1123C},
{0x11270, 0x11274},
{0x11278, 0x1127C},
{0x112B0, 0x112B4},
{0x112B8, 0x112BC},
{0x112F0, 0x112F4},
{0x112F8, 0x112FC},
{0x11338, 0x1133C},
{0x11378, 0x1137C},
{0x113B8, 0x113BC},
{0x113F8, 0x113FC},
{0x11438, 0x11440},
{0x11478, 0x11480},
{0x114B8, 0x114BC},
{0x114F8, 0x114FC},
{0x11538, 0x1153C},
{0x11578, 0x1157C},
{0x115B8, 0x115BC},
{0x115F8, 0x115FC},
{0x11638, 0x1163C},
{0x11678, 0x1167C},
{0x116B8, 0x116BC},
{0x116F8, 0x116FC},
{0x11738, 0x1173C},
{0x11778, 0x1177C},
{0x117B8, 0x117BC},
{0x117F8, 0x117FC},
{0x17000, 0x1701C},
{0x17020, 0x170AC},
{0x18000, 0x18050},
{0x18054, 0x18074},
{0x18080, 0x180D4},
{0x180DC, 0x18104},
{0x18108, 0x1813C},
{0x18144, 0x18148},
{0x18168, 0x18174},
{0x18178, 0x18180},
{0x181C8, 0x181E0},
{0x181E4, 0x181E8},
{0x181EC, 0x1820C},
{0x1825C, 0x18280},
{0x18284, 0x18290},
{0x18294, 0x182A0},
{0x18300, 0x18304},
{0x18314, 0x18320},
{0x18328, 0x18350},
{0x1835C, 0x1836C},
{0x18370, 0x18390},
{0x18398, 0x183AC},
{0x183BC, 0x183D8},
{0x183DC, 0x183F4},
{0x18400, 0x186F4},
{0x186F8, 0x1871C},
{0x18720, 0x18790},
{0x19800, 0x19830},
{0x19834, 0x19840},
{0x19880, 0x1989C},
{0x198A4, 0x198B0},
{0x198BC, 0x19900},
{0x19C00, 0x19C88},
{0x19D00, 0x19D20},
{0x19E00, 0x19E7C},
{0x19E80, 0x19E94},
{0x19E98, 0x19EAC},
{0x19EB0, 0x19EBC},
{0x19F70, 0x19F74},
{0x19F80, 0x19F8C},
{0x19FA0, 0x19FB4},
{0x19FC0, 0x19FD8},
{0x1A000, 0x1A200},
{0x1A204, 0x1A210},
{0x1A228, 0x1A22C},
{0x1A230, 0x1A248},
{0x1A250, 0x1A270},
{0x1A280, 0x1A290},
{0x1A2A0, 0x1A2A4},
{0x1A2C0, 0x1A2EC},
{0x1A300, 0x1A3BC},
{0x1A3F0, 0x1A3F4},
{0x1A3F8, 0x1A434},
{0x1A438, 0x1A444},
{0x1A448, 0x1A468},
{0x1A580, 0x1A58C},
{0x1A644, 0x1A654},
{0x1A670, 0x1A698},
{0x1A6AC, 0x1A6B0},
{0x1A6D0, 0x1A6D4},
{0x1A6EC, 0x1A70C},
{0x1A710, 0x1A738},
{0x1A7C0, 0x1A7D0},
{0x1A7D4, 0x1A7D8},
{0x1A7DC, 0x1A7E4},
{0x1A7F0, 0x1A7F8},
{0x1A888, 0x1A89C},
{0x1A8A8, 0x1A8AC},
{0x1A8C0, 0x1A8DC},
{0x1A8F0, 0x1A8FC},
{0x1AE04, 0x1AE08},
{0x1AE18, 0x1AE24},
{0x1AF80, 0x1AF8C},
{0x1AFA0, 0x1AFB4},
{0x1B000, 0x1B200},
{0x1B284, 0x1B288},
{0x1B2D0, 0x1B2D8},
{0x1B2DC, 0x1B2EC},
{0x1B300, 0x1B340},
{0x1B374, 0x1B378},
{0x1B380, 0x1B384},
{0x1B388, 0x1B38C},
{0x1B404, 0x1B408},
{0x1B420, 0x1B428},
{0x1B440, 0x1B444},
{0x1B448, 0x1B44C},
{0x1B450, 0x1B458},
{0x1B45C, 0x1B468},
{0x1B584, 0x1B58C},
{0x1B68C, 0x1B690},
{0x1B6AC, 0x1B6B0},
{0x1B7F0, 0x1B7F8},
{0x1C800, 0x1CC00},
{0x1CE00, 0x1CE04},
{0x1CF80, 0x1CF84},
{0x1D200, 0x1D800},
{0x1E000, 0x20014},
{0x20100, 0x20124},
{0x21400, 0x217A8},
{0x21800, 0x21BA8},
{0x21C00, 0x21FA8},
{0x22000, 0x223A8},
{0x22400, 0x227A8},
{0x22800, 0x22BA8},
{0x22C00, 0x22FA8},
{0x23000, 0x233A8},
{0x24000, 0x24034},
{0x26000, 0x26064},
{0x27000, 0x27024},
{0x34000, 0x3400C},
{0x34400, 0x3445C},
{0x34800, 0x3485C},
{0x34C00, 0x34C5C},
{0x35000, 0x3505C},
{0x35400, 0x3545C},
{0x35800, 0x3585C},
{0x35C00, 0x35C5C},
{0x36000, 0x3605C},
{0x38000, 0x38064},
{0x38070, 0x380E0},
{0x3A000, 0x3A064},
{0x40000, 0x400A4},
{0x80000, 0x8000C},
{0x80010, 0x80020},
};
static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
{0x800, 0x810},
{0x820, 0x82C},
{0x830, 0x8F4},
{0x90C, 0x91C},
{0xA14, 0xA18},
{0xA84, 0xA94},
{0xAA8, 0xAD4},
{0xADC, 0xB40},
{0x1000, 0x10A4},
{0x10BC, 0x111C},
{0x1134, 0x1138},
{0x1144, 0x114C},
{0x1150, 0x115C},
{0x1160, 0x1178},
{0x1240, 0x1260},
{0x2000, 0x207C},
{0x3000, 0x3014},
{0x4000, 0x4014},
{0x5000, 0x5124},
{0x6000, 0x6040},
{0x6080, 0x60CC},
{0x6100, 0x611C},
{0x6140, 0x61D8},
{0x6200, 0x6238},
{0x6240, 0x628C},
{0x62C0, 0x62EC},
{0x6380, 0x63E8},
{0x6400, 0x6440},
{0x6480, 0x64CC},
{0x6500, 0x651C},
{0x6540, 0x6580},
{0x6600, 0x6638},
{0x6640, 0x668C},
{0x66C0, 0x66EC},
{0x6780, 0x67E8},
{0x7080, 0x708C},
{0x70C0, 0x70C8},
{0x7400, 0x741C},
{0x7440, 0x7454},
{0x7800, 0x7818},
{0x8000, 0x8004},
{0x8010, 0x8064},
{0x8080, 0x8084},
{0x80A0, 0x80A4},
{0x80C0, 0x80C4},
{0x80E0, 0x80F4},
{0x8100, 0x8104},
{0x8110, 0x812C},
{0x9000, 0x9004},
{0x9800, 0x982C},
{0x9830, 0x9838},
{0x9840, 0x986C},
{0x9870, 0x9898},
{0x9A00, 0x9C00},
{0xD580, 0xD59C},
{0xF000, 0xF0E0},
{0xF140, 0xF190},
{0xF250, 0xF25C},
{0xF260, 0xF268},
{0xF26C, 0xF2A8},
{0x10008, 0x1000C},
{0x10014, 0x10018},
{0x1001C, 0x10020},
{0x10024, 0x10028},
{0x10030, 0x10034},
{0x10040, 0x10054},
{0x10058, 0x1007C},
{0x10080, 0x100C4},
{0x100C8, 0x10114},
{0x1012C, 0x10130},
{0x10138, 0x10144},
{0x10200, 0x10220},
{0x10230, 0x10250},
{0x10260, 0x10280},
{0x10290, 0x102B0},
{0x102C0, 0x102DC},
{0x102E0, 0x102F4},
{0x102FC, 0x1037C},
{0x10380, 0x10390},
{0x10800, 0x10828},
{0x10840, 0x10844},
{0x10880, 0x10884},
{0x108C0, 0x108E8},
{0x10900, 0x10928},
{0x10940, 0x10944},
{0x10980, 0x10984},
{0x109C0, 0x109E8},
{0x10A00, 0x10A28},
{0x10A40, 0x10A50},
{0x11000, 0x11028},
{0x11030, 0x11034},
{0x11038, 0x11068},
{0x11070, 0x11074},
{0x11078, 0x110A8},
{0x110B0, 0x110B4},
{0x110B8, 0x110E8},
{0x110F0, 0x110F4},
{0x110F8, 0x11128},
{0x11138, 0x11144},
{0x11178, 0x11180},
{0x111B8, 0x111C0},
{0x111F8, 0x11200},
{0x11238, 0x1123C},
{0x11270, 0x11274},
{0x11278, 0x1127C},
{0x112B0, 0x112B4},
{0x112B8, 0x112BC},
{0x112F0, 0x112F4},
{0x112F8, 0x112FC},
{0x11338, 0x1133C},
{0x11378, 0x1137C},
{0x113B8, 0x113BC},
{0x113F8, 0x113FC},
{0x11438, 0x11440},
{0x11478, 0x11480},
{0x114B8, 0x114BC},
{0x114F8, 0x114FC},
{0x11538, 0x1153C},
{0x11578, 0x1157C},
{0x115B8, 0x115BC},
{0x115F8, 0x115FC},
{0x11638, 0x1163C},
{0x11678, 0x1167C},
{0x116B8, 0x116BC},
{0x116F8, 0x116FC},
{0x11738, 0x1173C},
{0x11778, 0x1177C},
{0x117B8, 0x117BC},
{0x117F8, 0x117FC},
{0x17000, 0x1701C},
{0x17020, 0x170AC},
{0x18000, 0x18050},
{0x18054, 0x18074},
{0x18080, 0x180D4},
{0x180DC, 0x18104},
{0x18108, 0x1813C},
{0x18144, 0x18148},
{0x18168, 0x18174},
{0x18178, 0x18180},
{0x181C8, 0x181E0},
{0x181E4, 0x181E8},
{0x181EC, 0x1820C},
{0x1825C, 0x18280},
{0x18284, 0x18290},
{0x18294, 0x182A0},
{0x18300, 0x18304},
{0x18314, 0x18320},
{0x18328, 0x18350},
{0x1835C, 0x1836C},
{0x18370, 0x18390},
{0x18398, 0x183AC},
{0x183BC, 0x183D8},
{0x183DC, 0x183F4},
{0x18400, 0x186F4},
{0x186F8, 0x1871C},
{0x18720, 0x18790},
{0x19800, 0x19830},
{0x19834, 0x19840},
{0x19880, 0x1989C},
{0x198A4, 0x198B0},
{0x198BC, 0x19900},
{0x19C00, 0x19C88},
{0x19D00, 0x19D20},
{0x19E00, 0x19E7C},
{0x19E80, 0x19E94},
{0x19E98, 0x19EAC},
{0x19EB0, 0x19EBC},
{0x19F70, 0x19F74},
{0x19F80, 0x19F8C},
{0x19FA0, 0x19FB4},
{0x19FC0, 0x19FD8},
{0x1A000, 0x1A200},
{0x1A204, 0x1A210},
{0x1A228, 0x1A22C},
{0x1A230, 0x1A248},
{0x1A250, 0x1A270},
{0x1A280, 0x1A290},
{0x1A2A0, 0x1A2A4},
{0x1A2C0, 0x1A2EC},
{0x1A300, 0x1A3BC},
{0x1A3F0, 0x1A3F4},
{0x1A3F8, 0x1A434},
{0x1A438, 0x1A444},
{0x1A448, 0x1A468},
{0x1A580, 0x1A58C},
{0x1A644, 0x1A654},
{0x1A670, 0x1A698},
{0x1A6AC, 0x1A6B0},
{0x1A6D0, 0x1A6D4},
{0x1A6EC, 0x1A70C},
{0x1A710, 0x1A738},
{0x1A7C0, 0x1A7D0},
{0x1A7D4, 0x1A7D8},
{0x1A7DC, 0x1A7E4},
{0x1A7F0, 0x1A7F8},
{0x1A888, 0x1A89C},
{0x1A8A8, 0x1A8AC},
{0x1A8C0, 0x1A8DC},
{0x1A8F0, 0x1A8FC},
{0x1AE04, 0x1AE08},
{0x1AE18, 0x1AE24},
{0x1AF80, 0x1AF8C},
{0x1AFA0, 0x1AFB4},
{0x1B000, 0x1B200},
{0x1B284, 0x1B288},
{0x1B2D0, 0x1B2D8},
{0x1B2DC, 0x1B2EC},
{0x1B300, 0x1B340},
{0x1B374, 0x1B378},
{0x1B380, 0x1B384},
{0x1B388, 0x1B38C},
{0x1B404, 0x1B408},
{0x1B420, 0x1B428},
{0x1B440, 0x1B444},
{0x1B448, 0x1B44C},
{0x1B450, 0x1B458},
{0x1B45C, 0x1B468},
{0x1B584, 0x1B58C},
{0x1B68C, 0x1B690},
{0x1B6AC, 0x1B6B0},
{0x1B7F0, 0x1B7F8},
{0x1C800, 0x1CC00},
{0x1CE00, 0x1CE04},
{0x1CF80, 0x1CF84},
{0x1D200, 0x1D800},
{0x1E000, 0x20014},
{0x20100, 0x20124},
{0x21400, 0x217A8},
{0x21800, 0x21BA8},
{0x21C00, 0x21FA8},
{0x22000, 0x223A8},
{0x22400, 0x227A8},
{0x22800, 0x22BA8},
{0x22C00, 0x22FA8},
{0x23000, 0x233A8},
{0x24000, 0x24034},
{0x26000, 0x26064},
{0x27000, 0x27024},
{0x34000, 0x3400C},
{0x34400, 0x3445C},
{0x34800, 0x3485C},
{0x34C00, 0x34C5C},
{0x35000, 0x3505C},
{0x35400, 0x3545C},
{0x35800, 0x3585C},
{0x35C00, 0x35C5C},
{0x36000, 0x3605C},
{0x38000, 0x38064},
{0x38070, 0x380E0},
{0x3A000, 0x3A074},
{0x40000, 0x400A4},
{0x80000, 0x8000C},
{0x80010, 0x80020},
};
static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
.start = 0x400000,
.len = 0x70000,
.name = "DRAM",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
/* RTC_SOC_BASE_ADDRESS */
.start = 0x0,
/* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
.len = 0x800 - 0x0,
.name = "REG_PART1",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
/* STEREO_BASE_ADDRESS */
.start = 0x27000,
/* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
.len = 0x60000 - 0x27000,
.name = "REG_PART2",
.section_table = {
.sections = NULL,
.size = 0,
},
},
};
static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
.start = 0x400000,
.len = 0x70000,
.name = "DRAM",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_AXI,
.start = 0xa0000,
.len = 0x18000,
.name = "AXI",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
.start = 0x800,
.len = 0x80020 - 0x800,
.name = "REG_TOTAL",
.section_table = {
.sections = qca6174_hw21_register_sections,
.size = ARRAY_SIZE(qca6174_hw21_register_sections),
},
},
};
static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
.start = 0x400000,
.len = 0x90000,
.name = "DRAM",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_AXI,
.start = 0xa0000,
.len = 0x18000,
.name = "AXI",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
.start = 0x800,
.len = 0x80020 - 0x800,
.name = "REG_TOTAL",
.section_table = {
.sections = qca6174_hw30_register_sections,
.size = ARRAY_SIZE(qca6174_hw30_register_sections),
},
},
/* IRAM dump must be put last */
{
.type = ATH10K_MEM_REGION_TYPE_IRAM1,
.start = 0x00980000,
.len = 0x00080000,
.name = "IRAM1",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_IRAM2,
.start = 0x00a00000,
.len = 0x00040000,
.name = "IRAM2",
.section_table = {
.sections = NULL,
.size = 0,
},
},
};
static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
.start = 0x400000,
.len = 0x50000,
.name = "DRAM",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
.start = 0x4000,
.len = 0x2000,
.name = "REG_PART1",
.section_table = {
.sections = NULL,
.size = 0,
},
},
{
.type = ATH10K_MEM_REGION_TYPE_REG,
.start = 0x8000,
.len = 0x58000,
.name = "REG_PART2",
.section_table = {
.sections = NULL,
.size = 0,
},
},
};
static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
},
},
{
.hw_id = QCA6174_HW_1_1_VERSION,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
},
},
{
.hw_id = QCA6174_HW_1_3_VERSION,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
},
},
{
.hw_id = QCA6174_HW_2_1_VERSION,
.region_table = {
.regions = qca6174_hw21_mem_regions,
.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
},
},
{
.hw_id = QCA6174_HW_3_0_VERSION,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
},
},
{
.hw_id = QCA6174_HW_3_2_VERSION,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
},
},
{
.hw_id = QCA9377_HW_1_1_DEV_VERSION,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
},
},
{
.hw_id = QCA988X_HW_2_0_VERSION,
.region_table = {
.regions = qca988x_hw20_mem_regions,
.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
},
},
};
static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
{
const struct ath10k_hw_mem_layout *hw;
const struct ath10k_mem_region *mem_region;
size_t size = 0;
int i;
hw = ath10k_coredump_get_mem_layout(ar);
if (!hw)
return 0;
mem_region = &hw->region_table.regions[0];
for (i = 0; i < hw->region_table.size; i++) {
size += mem_region->len;
mem_region++;
}
/* reserve space for the headers */
size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
/* make sure it is aligned 16 bytes for debug message print out */
size = ALIGN(size, 16);
return size;
}
const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
{
int i;
if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
return NULL;
if (WARN_ON(ar->target_version == 0))
return NULL;
for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
if (ar->target_version == hw_mem_layouts[i].hw_id)
return &hw_mem_layouts[i];
}
return NULL;
}
EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
lockdep_assert_held(&ar->data_lock);
if (ath10k_coredump_mask == 0)
/* coredump disabled */
return NULL;
guid_gen(&crash_data->guid);
ktime_get_real_ts64(&crash_data->timestamp);
return crash_data;
}
EXPORT_SYMBOL(ath10k_coredump_new);
static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
struct ath10k_ce_crash_hdr *ce_hdr;
struct ath10k_dump_file_data *dump_data;
struct ath10k_tlv_dump_data *dump_tlv;
size_t hdr_len = sizeof(*dump_data);
size_t len, sofar = 0;
unsigned char *buf;
len = hdr_len;
if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
CE_COUNT * sizeof(ce_hdr->entries[0]);
if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
sofar += hdr_len;
/* This is going to get big when we start dumping FW RAM and such,
* so go ahead and use vmalloc.
*/
buf = vzalloc(len);
if (!buf)
return NULL;
spin_lock_bh(&ar->data_lock);
dump_data = (struct ath10k_dump_file_data *)(buf);
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
sizeof(dump_data->df_magic));
dump_data->len = cpu_to_le32(len);
dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
guid_copy(&dump_data->guid, &crash_data->guid);
dump_data->chip_id = cpu_to_le32(ar->chip_id);
dump_data->bus_type = cpu_to_le32(0);
dump_data->target_version = cpu_to_le32(ar->target_version);
dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
sizeof(dump_data->fw_ver));
dump_data->kernel_ver_code = 0;
strlcpy(dump_data->kernel_ver, init_utsname()->release,
sizeof(dump_data->kernel_ver));
dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
memcpy(dump_tlv->tlv_data, &crash_data->registers,
sizeof(crash_data->registers));
sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
}
if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
CE_COUNT * sizeof(ce_hdr->entries[0]));
ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
memcpy(ce_hdr->entries, crash_data->ce_crash_data,
CE_COUNT * sizeof(ce_hdr->entries[0]));
sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
CE_COUNT * sizeof(ce_hdr->entries[0]);
}
/* Gather ram dump */
if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
crash_data->ramdump_buf_len);
sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
}
spin_unlock_bh(&ar->data_lock);
return dump_data;
}
int ath10k_coredump_submit(struct ath10k *ar)
{
struct ath10k_dump_file_data *dump;
if (ath10k_coredump_mask == 0)
/* coredump disabled */
return 0;
dump = ath10k_coredump_build(ar);
if (!dump) {
ath10k_warn(ar, "no crash dump data found for devcoredump");
return -ENODATA;
}
dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
return 0;
}
int ath10k_coredump_create(struct ath10k *ar)
{
if (ath10k_coredump_mask == 0)
/* coredump disabled */
return 0;
ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
if (!ar->coredump.fw_crash_data)
return -ENOMEM;
return 0;
}
int ath10k_coredump_register(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
if (!crash_data->ramdump_buf)
return -ENOMEM;
}
return 0;
}
void ath10k_coredump_unregister(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
vfree(crash_data->ramdump_buf);
}
void ath10k_coredump_destroy(struct ath10k *ar)
{
if (ar->coredump.fw_crash_data->ramdump_buf) {
vfree(ar->coredump.fw_crash_data->ramdump_buf);
ar->coredump.fw_crash_data->ramdump_buf = NULL;
ar->coredump.fw_crash_data->ramdump_buf_len = 0;
}
vfree(ar->coredump.fw_crash_data);
ar->coredump.fw_crash_data = NULL;
}

View File

@ -0,0 +1,225 @@
/*
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _COREDUMP_H_
#define _COREDUMP_H_
#include "core.h"
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
* @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
/* contains multiple struct ath10k_dump_ram_data_hdr */
ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
ATH10K_FW_CRASH_DUMP_MAX,
};
struct ath10k_tlv_dump_data {
/* see ath10k_fw_crash_dump_type above */
__le32 type;
/* in bytes */
__le32 tlv_len;
/* pad to 32-bit boundaries as needed */
u8 tlv_data[];
} __packed;
struct ath10k_dump_file_data {
/* dump file information */
/* "ATH10K-FW-DUMP" */
char df_magic[16];
__le32 len;
/* file dump version */
__le32 version;
/* some info we can get from ath10k struct that might help */
guid_t guid;
__le32 chip_id;
/* 0 for now, in place for later hardware */
__le32 bus_type;
__le32 target_version;
__le32 fw_version_major;
__le32 fw_version_minor;
__le32 fw_version_release;
__le32 fw_version_build;
__le32 phy_capability;
__le32 hw_min_tx_power;
__le32 hw_max_tx_power;
__le32 ht_cap_info;
__le32 vht_cap_info;
__le32 num_rf_chains;
/* firmware version string */
char fw_ver[ETHTOOL_FWVERS_LEN];
/* Kernel related information */
/* time-of-day stamp */
__le64 tv_sec;
/* time-of-day stamp, nano-seconds */
__le64 tv_nsec;
/* LINUX_VERSION_CODE */
__le32 kernel_ver_code;
/* VERMAGIC_STRING */
char kernel_ver[64];
/* room for growth w/out changing binary format */
u8 unused[128];
/* struct ath10k_tlv_dump_data + more */
u8 data[0];
} __packed;
struct ath10k_dump_ram_data_hdr {
/* enum ath10k_mem_region_type */
__le32 region_type;
__le32 start;
/* length of payload data, not including this header */
__le32 length;
u8 data[0];
};
/* magic number to fill the holes not copied due to sections in regions */
#define ATH10K_MAGIC_NOT_COPIED 0xAA
/* part of user space ABI */
enum ath10k_mem_region_type {
ATH10K_MEM_REGION_TYPE_REG = 1,
ATH10K_MEM_REGION_TYPE_DRAM = 2,
ATH10K_MEM_REGION_TYPE_AXI = 3,
ATH10K_MEM_REGION_TYPE_IRAM1 = 4,
ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
};
/* Define a section of the region which should be copied. As not all parts
* of the memory is possible to copy, for example some of the registers can
* be like that, sections can be used to define what is safe to copy.
*
* To minimize the size of the array, the list must obey the format:
* '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
* also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
* we may encouter error in the dump processing.
*/
struct ath10k_mem_section {
u32 start;
u32 end;
};
/* One region of a memory layout. If the sections field is null entire
* region is copied. If sections is non-null only the areas specified in
* sections are copied and rest of the areas are filled with
* ATH10K_MAGIC_NOT_COPIED.
*/
struct ath10k_mem_region {
enum ath10k_mem_region_type type;
u32 start;
u32 len;
const char *name;
struct {
const struct ath10k_mem_section *sections;
u32 size;
} section_table;
};
/* Contains the memory layout of a hardware version identified with the
* hardware id, split into regions.
*/
struct ath10k_hw_mem_layout {
u32 hw_id;
struct {
const struct ath10k_mem_region *regions;
int size;
} region_table;
};
/* FIXME: where to put this? */
extern unsigned long ath10k_coredump_mask;
#ifdef CONFIG_DEV_COREDUMP
int ath10k_coredump_submit(struct ath10k *ar);
struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
int ath10k_coredump_create(struct ath10k *ar);
int ath10k_coredump_register(struct ath10k *ar);
void ath10k_coredump_unregister(struct ath10k *ar);
void ath10k_coredump_destroy(struct ath10k *ar);
const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
#else /* CONFIG_DEV_COREDUMP */
static inline int ath10k_coredump_submit(struct ath10k *ar)
{
return 0;
}
static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
{
return NULL;
}
static inline int ath10k_coredump_create(struct ath10k *ar)
{
return 0;
}
static inline int ath10k_coredump_register(struct ath10k *ar)
{
return 0;
}
static inline void ath10k_coredump_unregister(struct ath10k *ar)
{
}
static inline void ath10k_coredump_destroy(struct ath10k *ar)
{
}
static inline const struct ath10k_hw_mem_layout *
ath10k_coredump_get_mem_layout(struct ath10k *ar)
{
return NULL;
}
#endif /* CONFIG_DEV_COREDUMP */
#endif /* _COREDUMP_H_ */

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -18,10 +18,8 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/utsname.h>
#include <linux/crc32.h>
#include <linux/firmware.h>
#include <linux/devcoredump.h>
#include "core.h"
#include "debug.h"
@ -33,86 +31,6 @@
#define ATH10K_DEBUG_CAL_DATA_LEN 12064
#define ATH10K_FW_CRASH_DUMP_VERSION 1
/**
* enum ath10k_fw_crash_dump_type - types of data in the dump file
* @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
*/
enum ath10k_fw_crash_dump_type {
ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
ATH10K_FW_CRASH_DUMP_MAX,
};
struct ath10k_tlv_dump_data {
/* see ath10k_fw_crash_dump_type above */
__le32 type;
/* in bytes */
__le32 tlv_len;
/* pad to 32-bit boundaries as needed */
u8 tlv_data[];
} __packed;
struct ath10k_dump_file_data {
/* dump file information */
/* "ATH10K-FW-DUMP" */
char df_magic[16];
__le32 len;
/* file dump version */
__le32 version;
/* some info we can get from ath10k struct that might help */
guid_t guid;
__le32 chip_id;
/* 0 for now, in place for later hardware */
__le32 bus_type;
__le32 target_version;
__le32 fw_version_major;
__le32 fw_version_minor;
__le32 fw_version_release;
__le32 fw_version_build;
__le32 phy_capability;
__le32 hw_min_tx_power;
__le32 hw_max_tx_power;
__le32 ht_cap_info;
__le32 vht_cap_info;
__le32 num_rf_chains;
/* firmware version string */
char fw_ver[ETHTOOL_FWVERS_LEN];
/* Kernel related information */
/* time-of-day stamp */
__le64 tv_sec;
/* time-of-day stamp, nano-seconds */
__le64 tv_nsec;
/* LINUX_VERSION_CODE */
__le32 kernel_ver_code;
/* VERMAGIC_STRING */
char kernel_ver[64];
/* room for growth w/out changing binary format */
u8 unused[128];
/* struct ath10k_tlv_dump_data + more */
u8 data[0];
} __packed;
void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
@ -711,189 +629,6 @@ static const struct file_operations fops_chip_id = {
.llseek = default_llseek,
};
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
lockdep_assert_held(&ar->data_lock);
crash_data->crashed_since_read = true;
guid_gen(&crash_data->guid);
ktime_get_real_ts64(&crash_data->timestamp);
return crash_data;
}
EXPORT_SYMBOL(ath10k_debug_get_new_fw_crash_data);
static struct ath10k_dump_file_data *ath10k_build_dump_file(struct ath10k *ar,
bool mark_read)
{
struct ath10k_fw_crash_data *crash_data = ar->debug.fw_crash_data;
struct ath10k_ce_crash_hdr *ce_hdr;
struct ath10k_dump_file_data *dump_data;
struct ath10k_tlv_dump_data *dump_tlv;
size_t hdr_len = sizeof(*dump_data);
size_t len, sofar = 0;
unsigned char *buf;
len = hdr_len;
len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
CE_COUNT * sizeof(ce_hdr->entries[0]);
sofar += hdr_len;
/* This is going to get big when we start dumping FW RAM and such,
* so go ahead and use vmalloc.
*/
buf = vzalloc(len);
if (!buf)
return NULL;
spin_lock_bh(&ar->data_lock);
if (!crash_data->crashed_since_read) {
spin_unlock_bh(&ar->data_lock);
vfree(buf);
return NULL;
}
dump_data = (struct ath10k_dump_file_data *)(buf);
strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
sizeof(dump_data->df_magic));
dump_data->len = cpu_to_le32(len);
dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
guid_copy(&dump_data->guid, &crash_data->guid);
dump_data->chip_id = cpu_to_le32(ar->chip_id);
dump_data->bus_type = cpu_to_le32(0);
dump_data->target_version = cpu_to_le32(ar->target_version);
dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
sizeof(dump_data->fw_ver));
dump_data->kernel_ver_code = 0;
strlcpy(dump_data->kernel_ver, init_utsname()->release,
sizeof(dump_data->kernel_ver));
dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
/* Gather crash-dump */
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
memcpy(dump_tlv->tlv_data, &crash_data->registers,
sizeof(crash_data->registers));
sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
CE_COUNT * sizeof(ce_hdr->entries[0]));
ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
memcpy(ce_hdr->entries, crash_data->ce_crash_data,
CE_COUNT * sizeof(ce_hdr->entries[0]));
sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
CE_COUNT * sizeof(ce_hdr->entries[0]);
ar->debug.fw_crash_data->crashed_since_read = !mark_read;
spin_unlock_bh(&ar->data_lock);
return dump_data;
}
int ath10k_debug_fw_devcoredump(struct ath10k *ar)
{
struct ath10k_dump_file_data *dump;
void *dump_ptr;
u32 dump_len;
/* To keep the dump file available also for debugfs don't mark the
* file read, only debugfs should do that.
*/
dump = ath10k_build_dump_file(ar, false);
if (!dump) {
ath10k_warn(ar, "no crash dump data found for devcoredump");
return -ENODATA;
}
/* Make a copy of the dump file for dev_coredumpv() as during the
* transition period we need to own the original file. Once
* fw_crash_dump debugfs file is removed no need to have a copy
* anymore.
*/
dump_len = le32_to_cpu(dump->len);
dump_ptr = vzalloc(dump_len);
if (!dump_ptr)
return -ENOMEM;
memcpy(dump_ptr, dump, dump_len);
dev_coredumpv(ar->dev, dump_ptr, dump_len, GFP_KERNEL);
return 0;
}
static int ath10k_fw_crash_dump_open(struct inode *inode, struct file *file)
{
struct ath10k *ar = inode->i_private;
struct ath10k_dump_file_data *dump;
ath10k_warn(ar, "fw_crash_dump debugfs file is deprecated, please use /sys/class/devcoredump instead.");
dump = ath10k_build_dump_file(ar, true);
if (!dump)
return -ENODATA;
file->private_data = dump;
return 0;
}
static ssize_t ath10k_fw_crash_dump_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k_dump_file_data *dump_file = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
dump_file,
le32_to_cpu(dump_file->len));
}
static int ath10k_fw_crash_dump_release(struct inode *inode,
struct file *file)
{
vfree(file->private_data);
return 0;
}
static const struct file_operations fops_fw_crash_dump = {
.open = ath10k_fw_crash_dump_open,
.read = ath10k_fw_crash_dump_read,
.release = ath10k_fw_crash_dump_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_reg_addr_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@ -2402,10 +2137,6 @@ static const struct file_operations fops_fw_checksums = {
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
if (!ar->debug.fw_crash_data)
return -ENOMEM;
ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
if (!ar->debug.cal_data)
return -ENOMEM;
@ -2420,9 +2151,6 @@ int ath10k_debug_create(struct ath10k *ar)
void ath10k_debug_destroy(struct ath10k *ar)
{
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
vfree(ar->debug.cal_data);
ar->debug.cal_data = NULL;
@ -2460,9 +2188,6 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
&fops_simulate_fw_crash);
debugfs_create_file("fw_crash_dump", 0400, ar->debug.debugfs_phy, ar,
&fops_fw_crash_dump);
debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
&fops_reg_addr);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -42,6 +42,7 @@ enum ath10k_debug_mask {
ATH10K_DBG_SDIO_DUMP = 0x00020000,
ATH10K_DBG_USB = 0x00040000,
ATH10K_DBG_USB_BULK = 0x00080000,
ATH10K_DBG_SNOC = 0x00100000,
ATH10K_DBG_ANY = 0xffffffff,
};
@ -100,13 +101,8 @@ void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
void ath10k_debug_tpc_stats_process(struct ath10k *ar,
struct ath10k_tpc_stats *tpc_stats);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
int ath10k_debug_fw_devcoredump(struct ath10k *ar);
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
@ -173,12 +169,6 @@ static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
{
}
static inline struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
{
return NULL;
}
static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
{
return 0;
@ -189,11 +179,6 @@ static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
return 0;
}
static inline int ath10k_debug_fw_devcoredump(struct ath10k *ar)
{
return 0;
}
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -207,6 +207,9 @@ int ath10k_htt_init(struct ath10k *ar)
WARN_ON(1);
return -EINVAL;
}
ath10k_htt_set_tx_ops(htt);
ath10k_htt_set_rx_ops(htt);
return 0;
}
@ -254,11 +257,11 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
return status;
}
status = ath10k_htt_send_frag_desc_bank_cfg(htt);
status = htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
if (status)
return status;
status = ath10k_htt_send_rx_ring_cfg_ll(htt);
status = htt->tx_ops->htt_send_rx_ring_cfg(htt);
if (status) {
ath10k_warn(ar, "failed to setup rx ring: %d\n",
status);

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -107,6 +107,14 @@ struct htt_msdu_ext_desc {
struct htt_data_tx_desc_frag frags[6];
};
struct htt_msdu_ext_desc_64 {
__le32 tso_flag[5];
__le16 ip_identification;
u8 flags;
u8 reserved;
struct htt_data_tx_desc_frag frags[6];
};
#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
@ -179,6 +187,22 @@ struct htt_data_tx_desc {
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
struct htt_data_tx_desc_64 {
u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
__le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
__le16 len;
__le16 id;
__le64 frags_paddr;
union {
__le32 peerid;
struct {
__le16 peerid;
__le16 freq;
} __packed offchan_tx;
} __packed;
u8 prefetch[0]; /* start of frame, for FW classification engine */
} __packed;
enum htt_rx_ring_flags {
HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
@ -200,8 +224,11 @@ enum htt_rx_ring_flags {
#define HTT_RX_RING_SIZE_MIN 128
#define HTT_RX_RING_SIZE_MAX 2048
#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
struct htt_rx_ring_setup_ring {
struct htt_rx_ring_setup_ring32 {
__le32 fw_idx_shadow_reg_paddr;
__le32 rx_ring_base_paddr;
__le16 rx_ring_len; /* in 4-byte words */
@ -222,14 +249,40 @@ struct htt_rx_ring_setup_ring {
__le16 frag_info_offset;
} __packed;
struct htt_rx_ring_setup_ring64 {
__le64 fw_idx_shadow_reg_paddr;
__le64 rx_ring_base_paddr;
__le16 rx_ring_len; /* in 4-byte words */
__le16 rx_ring_bufsize; /* rx skb size - in bytes */
__le16 flags; /* %HTT_RX_RING_FLAGS_ */
__le16 fw_idx_init_val;
/* the following offsets are in 4-byte units */
__le16 mac80211_hdr_offset;
__le16 msdu_payload_offset;
__le16 ppdu_start_offset;
__le16 ppdu_end_offset;
__le16 mpdu_start_offset;
__le16 mpdu_end_offset;
__le16 msdu_start_offset;
__le16 msdu_end_offset;
__le16 rx_attention_offset;
__le16 frag_info_offset;
} __packed;
struct htt_rx_ring_setup_hdr {
u8 num_rings; /* supported values: 1, 2 */
__le16 rsvd0;
} __packed;
struct htt_rx_ring_setup {
struct htt_rx_ring_setup_32 {
struct htt_rx_ring_setup_hdr hdr;
struct htt_rx_ring_setup_ring rings[0];
struct htt_rx_ring_setup_ring32 rings[0];
} __packed;
struct htt_rx_ring_setup_64 {
struct htt_rx_ring_setup_hdr hdr;
struct htt_rx_ring_setup_ring64 rings[0];
} __packed;
/*
@ -855,13 +908,23 @@ struct htt_rx_in_ord_msdu_desc {
u8 reserved;
} __packed;
struct htt_rx_in_ord_msdu_desc_ext {
__le64 msdu_paddr;
__le16 msdu_len;
u8 fw_desc;
u8 reserved;
} __packed;
struct htt_rx_in_ord_ind {
u8 info;
__le16 peer_id;
u8 vdev_id;
u8 reserved;
__le16 msdu_count;
struct htt_rx_in_ord_msdu_desc msdu_descs[0];
union {
struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
} __packed;
} __packed;
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
@ -1351,7 +1414,7 @@ struct htt_q_state_conf {
u8 pad[2];
} __packed;
struct htt_frag_desc_bank_cfg {
struct htt_frag_desc_bank_cfg32 {
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
u8 num_banks;
u8 desc_size;
@ -1360,6 +1423,15 @@ struct htt_frag_desc_bank_cfg {
struct htt_q_state_conf q_state;
} __packed;
struct htt_frag_desc_bank_cfg64 {
u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
u8 num_banks;
u8 desc_size;
__le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
struct htt_q_state_conf q_state;
} __packed;
#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
@ -1531,11 +1603,13 @@ struct htt_cmd {
struct htt_ver_req ver_req;
struct htt_mgmt_tx_desc mgmt_tx;
struct htt_data_tx_desc data_tx;
struct htt_rx_ring_setup rx_setup;
struct htt_rx_ring_setup_32 rx_setup_32;
struct htt_rx_ring_setup_64 rx_setup_64;
struct htt_stats_req stats_req;
struct htt_oob_sync_req oob_sync_req;
struct htt_aggr_conf aggr_conf;
struct htt_frag_desc_bank_cfg frag_desc_bank_cfg;
struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
struct htt_tx_fetch_resp tx_fetch_resp;
};
} __packed;
@ -1593,13 +1667,20 @@ struct htt_peer_unmap_event {
u16 peer_id;
};
struct ath10k_htt_txbuf {
struct ath10k_htt_txbuf_32 {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc cmd_tx;
} __packed;
struct ath10k_htt_txbuf_64 {
struct htt_data_tx_desc_frag frags[2];
struct ath10k_htc_hdr htc_hdr;
struct htt_cmd_hdr cmd_hdr;
struct htt_data_tx_desc_64 cmd_tx;
} __packed;
struct ath10k_htt {
struct ath10k *ar;
enum ath10k_htc_ep_id eid;
@ -1644,7 +1725,10 @@ struct ath10k_htt {
* rx buffers the host SW provides for the MAC HW to
* fill.
*/
__le32 *paddrs_ring;
union {
__le64 *paddrs_ring_64;
__le32 *paddrs_ring_32;
};
/*
* Base address of ring, as a "physical" device address
@ -1721,12 +1805,20 @@ struct ath10k_htt {
struct {
dma_addr_t paddr;
struct htt_msdu_ext_desc *vaddr;
union {
struct htt_msdu_ext_desc *vaddr_desc_32;
struct htt_msdu_ext_desc_64 *vaddr_desc_64;
};
size_t size;
} frag_desc;
struct {
dma_addr_t paddr;
struct ath10k_htt_txbuf *vaddr;
union {
struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
};
size_t size;
} txbuf;
struct {
@ -1741,8 +1833,29 @@ struct ath10k_htt {
} tx_q_state;
bool tx_mem_allocated;
const struct ath10k_htt_tx_ops *tx_ops;
const struct ath10k_htt_rx_ops *rx_ops;
};
struct ath10k_htt_tx_ops {
int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
void (*htt_free_frag_desc)(struct ath10k_htt *htt);
int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu);
int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
void (*htt_free_txbuff)(struct ath10k_htt *htt);
};
struct ath10k_htt_rx_ops {
size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
int idx);
void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
};
#define RX_HTT_HDR_STATUS_LEN 64
/* This structure layout is programmed via rx ring setup
@ -1820,8 +1933,6 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt);
int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
@ -1846,11 +1957,9 @@ int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
int ath10k_htt_tx(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
#endif

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -25,9 +25,6 @@
#include <linux/log2.h>
#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
@ -36,7 +33,7 @@
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u32 paddr)
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
{
struct ath10k_skb_rxcb *rxcb;
@ -84,6 +81,60 @@ static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
}
static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
{
return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
}
static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
{
return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
}
static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
void *vaddr)
{
htt->rx_ring.paddrs_ring_32 = vaddr;
}
static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
void *vaddr)
{
htt->rx_ring.paddrs_ring_64 = vaddr;
}
static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
dma_addr_t paddr, int idx)
{
htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
}
static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
dma_addr_t paddr, int idx)
{
htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
}
static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
{
htt->rx_ring.paddrs_ring_32[idx] = 0;
}
static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
{
htt->rx_ring.paddrs_ring_64[idx] = 0;
}
static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
{
return (void *)htt->rx_ring.paddrs_ring_32;
}
static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
{
return (void *)htt->rx_ring.paddrs_ring_64;
}
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
struct htt_rx_desc *rx_desc;
@ -129,13 +180,13 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
htt->rx_ring.paddrs_ring[idx] = __cpu_to_le32(paddr);
htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
(u32)paddr);
paddr);
}
num--;
@ -234,9 +285,8 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
ath10k_htt_rx_ring_free(htt);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
sizeof(htt->rx_ring.paddrs_ring)),
htt->rx_ring.paddrs_ring,
htt->rx_ops->htt_get_rx_ring_size(htt),
htt->rx_ops->htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
dma_free_coherent(htt->ar->dev,
@ -263,7 +313,7 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
htt->rx_ring.paddrs_ring[idx] = 0;
htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
idx++;
idx &= htt->rx_ring.size_mask;
@ -383,7 +433,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
}
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
u32 paddr)
u64 paddr)
{
struct ath10k *ar = htt->ar;
struct ath10k_skb_rxcb *rxcb;
@ -408,12 +458,12 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
return msdu;
}
static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
int msdu_count;
@ -458,11 +508,60 @@ static int ath10k_htt_rx_pop_paddr_list(struct ath10k_htt *htt,
return 0;
}
static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
struct sk_buff *msdu;
int msdu_count;
bool is_offload;
u64 paddr;
lockdep_assert_held(&htt->rx_ring.lock);
msdu_count = __le16_to_cpu(ev->msdu_count);
is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
while (msdu_count--) {
paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!msdu) {
__skb_queue_purge(list);
return -ENOENT;
}
__skb_queue_tail(list, msdu);
if (!is_offload) {
rxd = (void *)msdu->data;
trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
skb_put(msdu, sizeof(*rxd));
skb_pull(msdu, sizeof(*rxd));
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
if (!(__le32_to_cpu(rxd->attention.flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
}
}
msdu_desc++;
}
return 0;
}
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
dma_addr_t paddr;
void *vaddr;
void *vaddr, *vaddr_ring;
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
@ -473,7 +572,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
*/
htt->rx_ring.size = HTT_RX_RING_SIZE;
htt->rx_ring.size_mask = htt->rx_ring.size - 1;
htt->rx_ring.fill_level = HTT_RX_RING_FILL_LEVEL;
htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
@ -486,13 +585,13 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
size = htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring);
size = htt->rx_ops->htt_get_rx_ring_size(htt);
vaddr = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
if (!vaddr)
vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
if (!vaddr_ring)
goto err_dma_ring;
htt->rx_ring.paddrs_ring = vaddr;
htt->rx_ops->htt_config_paddrs_ring(htt, vaddr_ring);
htt->rx_ring.base_paddr = paddr;
vaddr = dma_alloc_coherent(htt->ar->dev,
@ -526,9 +625,8 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
err_dma_idx:
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
sizeof(htt->rx_ring.paddrs_ring)),
htt->rx_ring.paddrs_ring,
htt->rx_ops->htt_get_rx_ring_size(htt),
vaddr_ring,
htt->rx_ring.base_paddr);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
@ -1986,7 +2084,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
vdev_id, peer_id, tid, offload, frag, msdu_count);
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs)) {
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
return -EINVAL;
}
@ -1995,7 +2093,13 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* extracted and processed.
*/
__skb_queue_head_init(&list);
ret = ath10k_htt_rx_pop_paddr_list(htt, &resp->rx_in_ord_ind, &list);
if (ar->hw_params.target_64bit)
ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
&list);
else
ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
&list);
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
@ -2795,3 +2899,29 @@ exit:
return done;
}
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
};
static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
};
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->hw_params.target_64bit)
htt->rx_ops = &htt_rx_ops_64;
else
htt->rx_ops = &htt_rx_ops_32;
}

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -229,50 +229,91 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
idr_remove(&htt->pending_tx, msdu_id);
}
static void ath10k_htt_tx_free_cont_txbuf(struct ath10k_htt *htt)
static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
if (!htt->txbuf.vaddr)
if (!htt->txbuf.vaddr_txbuff_32)
return;
size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
dma_free_coherent(ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr);
htt->txbuf.vaddr = NULL;
size = htt->txbuf.size;
dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
htt->txbuf.paddr);
htt->txbuf.vaddr_txbuff_32 = NULL;
}
static int ath10k_htt_tx_alloc_cont_txbuf(struct ath10k_htt *htt)
static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf);
htt->txbuf.vaddr = dma_alloc_coherent(ar->dev, size, &htt->txbuf.paddr,
GFP_KERNEL);
if (!htt->txbuf.vaddr)
size = htt->max_num_pending_tx *
sizeof(struct ath10k_htt_txbuf_32);
htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
&htt->txbuf.paddr,
GFP_KERNEL);
if (!htt->txbuf.vaddr_txbuff_32)
return -ENOMEM;
htt->txbuf.size = size;
return 0;
}
static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt)
static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
if (!htt->txbuf.vaddr_txbuff_64)
return;
size = htt->txbuf.size;
dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
htt->txbuf.paddr);
htt->txbuf.vaddr_txbuff_64 = NULL;
}
static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
size = htt->max_num_pending_tx *
sizeof(struct ath10k_htt_txbuf_64);
htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
&htt->txbuf.paddr,
GFP_KERNEL);
if (!htt->txbuf.vaddr_txbuff_64)
return -ENOMEM;
htt->txbuf.size = size;
return 0;
}
static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
{
size_t size;
if (!htt->frag_desc.vaddr)
if (!htt->frag_desc.vaddr_desc_32)
return;
size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
size = htt->max_num_pending_tx *
sizeof(struct htt_msdu_ext_desc);
dma_free_coherent(htt->ar->dev,
size,
htt->frag_desc.vaddr,
htt->frag_desc.vaddr_desc_32,
htt->frag_desc.paddr);
htt->frag_desc.vaddr = NULL;
htt->frag_desc.vaddr_desc_32 = NULL;
}
static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
@ -280,12 +321,57 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt)
if (!ar->hw_params.continuous_frag_desc)
return 0;
size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc);
htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size,
&htt->frag_desc.paddr,
GFP_KERNEL);
if (!htt->frag_desc.vaddr)
size = htt->max_num_pending_tx *
sizeof(struct htt_msdu_ext_desc);
htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
&htt->frag_desc.paddr,
GFP_KERNEL);
if (!htt->frag_desc.vaddr_desc_32) {
ath10k_err(ar, "failed to alloc fragment desc memory\n");
return -ENOMEM;
}
htt->frag_desc.size = size;
return 0;
}
static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
{
size_t size;
if (!htt->frag_desc.vaddr_desc_64)
return;
size = htt->max_num_pending_tx *
sizeof(struct htt_msdu_ext_desc_64);
dma_free_coherent(htt->ar->dev,
size,
htt->frag_desc.vaddr_desc_64,
htt->frag_desc.paddr);
htt->frag_desc.vaddr_desc_64 = NULL;
}
static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
size_t size;
if (!ar->hw_params.continuous_frag_desc)
return 0;
size = htt->max_num_pending_tx *
sizeof(struct htt_msdu_ext_desc_64);
htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
&htt->frag_desc.paddr,
GFP_KERNEL);
if (!htt->frag_desc.vaddr_desc_64) {
ath10k_err(ar, "failed to alloc fragment desc memory\n");
return -ENOMEM;
}
htt->frag_desc.size = size;
return 0;
}
@ -357,13 +443,13 @@ static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
int ret;
ret = ath10k_htt_tx_alloc_cont_txbuf(htt);
ret = htt->tx_ops->htt_alloc_txbuff(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
return ret;
}
ret = ath10k_htt_tx_alloc_cont_frag_desc(htt);
ret = htt->tx_ops->htt_alloc_frag_desc(htt);
if (ret) {
ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
goto free_txbuf;
@ -387,10 +473,10 @@ free_txq:
ath10k_htt_tx_free_txq(htt);
free_frag_desc:
ath10k_htt_tx_free_cont_frag_desc(htt);
htt->tx_ops->htt_free_frag_desc(htt);
free_txbuf:
ath10k_htt_tx_free_cont_txbuf(htt);
htt->tx_ops->htt_free_txbuff(htt);
return ret;
}
@ -444,9 +530,9 @@ void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
if (!htt->tx_mem_allocated)
return;
ath10k_htt_tx_free_cont_txbuf(htt);
htt->tx_ops->htt_free_txbuff(htt);
ath10k_htt_tx_free_txq(htt);
ath10k_htt_tx_free_cont_frag_desc(htt);
htt->tx_ops->htt_free_frag_desc(htt);
ath10k_htt_tx_free_txdone_fifo(htt);
htt->tx_mem_allocated = false;
}
@ -545,12 +631,12 @@ int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
return 0;
}
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_frag_desc_bank_cfg *cfg;
struct htt_frag_desc_bank_cfg32 *cfg;
int ret, size;
u8 info;
@ -562,7 +648,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
return -EINVAL;
}
size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg);
size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
skb = ath10k_htc_alloc_skb(ar, size);
if (!skb)
return -ENOMEM;
@ -579,7 +665,7 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
ar->running_fw->fw_file.fw_features))
info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
cfg = &cmd->frag_desc_bank_cfg;
cfg = &cmd->frag_desc_bank_cfg32;
cfg->info = info;
cfg->num_banks = 1;
cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
@ -607,12 +693,112 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
return 0;
}
int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring *ring;
struct htt_frag_desc_bank_cfg64 *cfg;
int ret, size;
u8 info;
if (!ar->hw_params.continuous_frag_desc)
return 0;
if (!htt->frag_desc.paddr) {
ath10k_warn(ar, "invalid frag desc memory\n");
return -EINVAL;
}
size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
skb = ath10k_htc_alloc_skb(ar, size);
if (!skb)
return -ENOMEM;
skb_put(skb, size);
cmd = (struct htt_cmd *)skb->data;
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
info = 0;
info |= SM(htt->tx_q_state.type,
HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
ar->running_fw->fw_file.fw_features))
info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
cfg = &cmd->frag_desc_bank_cfg64;
cfg->info = info;
cfg->num_banks = 1;
cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);
cfg->bank_id[0].bank_min_id = 0;
cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
1);
cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
ret);
dev_kfree_skb_any(skb);
return ret;
}
return 0;
}
static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
{
struct htt_rx_ring_setup_ring32 *ring =
(struct htt_rx_ring_setup_ring32 *)rx_ring;
#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
}
static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
{
struct htt_rx_ring_setup_ring64 *ring =
(struct htt_rx_ring_setup_ring64 *)rx_ring;
#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
#undef desc_offset
}
static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring32 *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
@ -626,7 +812,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr)
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
@ -635,10 +821,10 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup.rings[0];
ring = &cmd->rx_setup_32.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup.hdr.num_rings = 1;
cmd->rx_setup_32.hdr.num_rings = 1;
/* FIXME: do we need all of this? */
flags = 0;
@ -669,21 +855,76 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
ath10k_htt_fill_rx_desc_offset_32(ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
return 0;
}
#undef desc_offset
static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct sk_buff *skb;
struct htt_cmd *cmd;
struct htt_rx_ring_setup_ring64 *ring;
const int num_rx_ring = 1;
u16 flags;
u32 fw_idx;
int len;
int ret;
/* HW expects the buffer to be an integral number of 4-byte
* "words"
*/
BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+ (sizeof(*ring) * num_rx_ring);
skb = ath10k_htc_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
skb_put(skb, len);
cmd = (struct htt_cmd *)skb->data;
ring = &cmd->rx_setup_64.rings[0];
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
cmd->rx_setup_64.hdr.num_rings = 1;
flags = 0;
flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
flags |= HTT_RX_RING_FLAGS_PPDU_START;
flags |= HTT_RX_RING_FLAGS_PPDU_END;
flags |= HTT_RX_RING_FLAGS_MPDU_START;
flags |= HTT_RX_RING_FLAGS_MPDU_END;
flags |= HTT_RX_RING_FLAGS_MSDU_START;
flags |= HTT_RX_RING_FLAGS_MSDU_END;
flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
flags |= HTT_RX_RING_FLAGS_CTRL_RX;
flags |= HTT_RX_RING_FLAGS_MGMT_RX;
flags |= HTT_RX_RING_FLAGS_NULL_RX;
flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
ring->flags = __cpu_to_le16(flags);
ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
ath10k_htt_fill_rx_desc_offset_64(ring);
ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
@ -895,8 +1136,9 @@ err:
return res;
}
int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
static int ath10k_htt_tx_32(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
@ -904,7 +1146,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
struct ath10k_htt_txbuf *txbuf;
struct ath10k_htt_txbuf_32 *txbuf;
struct htt_data_tx_desc_frag *frags;
bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
@ -917,6 +1159,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
u32 frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
struct htt_msdu_ext_desc *ext_desc_t = NULL;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
@ -929,9 +1172,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
txbuf = &htt->txbuf.vaddr[msdu_id];
txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
txbuf_paddr = htt->txbuf.paddr +
(sizeof(struct ath10k_htt_txbuf) * msdu_id);
(sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
@ -962,11 +1205,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
memset(&htt->frag_desc.vaddr[msdu_id], 0,
ext_desc_t = htt->frag_desc.vaddr_desc_32;
memset(&ext_desc_t[msdu_id], 0,
sizeof(struct htt_msdu_ext_desc));
frags = (struct htt_data_tx_desc_frag *)
&htt->frag_desc.vaddr[msdu_id].frags;
ext_desc = &htt->frag_desc.vaddr[msdu_id];
&ext_desc_t[msdu_id].frags;
ext_desc = &ext_desc_t[msdu_id];
frags[0].tword_addr.paddr_lo =
__cpu_to_le32(skb_cb->paddr);
frags[0].tword_addr.paddr_hi = 0;
@ -1055,9 +1299,9 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid, freq);
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
flags0, flags1, msdu->len, msdu_id, &frags_paddr,
&skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
@ -1093,3 +1337,239 @@ err_free_msdu_id:
err:
return res;
}
static int ath10k_htt_tx_64(struct ath10k_htt *htt,
enum ath10k_hw_txrx_mode txmode,
struct sk_buff *msdu)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct ath10k_hif_sg_item sg_items[2];
struct ath10k_htt_txbuf_64 *txbuf;
struct htt_data_tx_desc_frag *frags;
bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
int prefetch_len;
int res;
u8 flags0 = 0;
u16 msdu_id, flags1 = 0;
u16 freq = 0;
dma_addr_t frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc_64 *ext_desc = NULL;
struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
goto err;
msdu_id = res;
prefetch_len = min(htt->prefetch_len, msdu->len);
prefetch_len = roundup(prefetch_len, 4);
txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
txbuf_paddr = htt->txbuf.paddr +
(sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
} else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
txmode == ATH10K_HW_TXRX_RAW &&
ieee80211_has_protected(hdr->frame_control)) {
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
}
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr);
if (res) {
res = -EIO;
goto err_free_msdu_id;
}
if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
freq = ar->scan.roc_freq;
switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
/* pass through */
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_64;
memset(&ext_desc_t[msdu_id], 0,
sizeof(struct htt_msdu_ext_desc_64));
frags = (struct htt_data_tx_desc_frag *)
&ext_desc_t[msdu_id].frags;
ext_desc = &ext_desc_t[msdu_id];
frags[0].tword_addr.paddr_lo =
__cpu_to_le32(skb_cb->paddr);
frags[0].tword_addr.paddr_hi =
__cpu_to_le16(upper_32_bits(skb_cb->paddr));
frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
frags_paddr = htt->frag_desc.paddr +
(sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
} else {
frags = txbuf->frags;
frags[0].tword_addr.paddr_lo =
__cpu_to_le32(skb_cb->paddr);
frags[0].tword_addr.paddr_hi =
__cpu_to_le16(upper_32_bits(skb_cb->paddr));
frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
frags[1].tword_addr.paddr_lo = 0;
frags[1].tword_addr.paddr_hi = 0;
frags[1].tword_addr.len_16 = 0;
}
flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
break;
case ATH10K_HW_TXRX_MGMT:
flags0 |= SM(ATH10K_HW_TXRX_MGMT,
HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
frags_paddr = skb_cb->paddr;
break;
}
/* Normally all commands go through HTC which manages tx credits for
* each endpoint and notifies when tx is completed.
*
* HTT endpoint is creditless so there's no need to care about HTC
* flags. In that case it is trivial to fill the HTC header here.
*
* MSDU transmission is considered completed upon HTT event. This
* implies no relevant resources can be freed until after the event is
* received. That's why HTC tx completion handler itself is ignored by
* setting NULL to transfer_context for all sg items.
*
* There is simply no point in pushing HTT TX_FRM through HTC tx path
* as it's a waste of resources. By bypassing HTC it is possible to
* avoid extra memory allocations, compress data structures and thus
* improve performance.
*/
txbuf->htc_hdr.eid = htt->eid;
txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
sizeof(txbuf->cmd_tx) +
prefetch_len);
txbuf->htc_hdr.flags = 0;
if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
if (msdu->ip_summed == CHECKSUM_PARTIAL &&
!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
if (ar->hw_params.continuous_frag_desc)
ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
}
/* Prevent firmware from sending up tx inspection requests. There's
* nothing ath10k can do with frames requested for inspection so force
* it to simply rely a regular tx completion with discard status.
*/
flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
txbuf->cmd_tx.flags0 = flags0;
txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
/* fill fragment descriptor */
txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
if (ath10k_mac_tx_frm_has_freq(ar)) {
txbuf->cmd_tx.offchan_tx.peerid =
__cpu_to_le16(HTT_INVALID_PEERID);
txbuf->cmd_tx.offchan_tx.freq =
__cpu_to_le16(freq);
} else {
txbuf->cmd_tx.peerid =
__cpu_to_le32(HTT_INVALID_PEERID);
}
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
flags0, flags1, msdu->len, msdu_id, &frags_paddr,
&skb_cb->paddr, vdev_id, tid, freq);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
sg_items[0].vaddr = &txbuf->htc_hdr;
sg_items[0].paddr = txbuf_paddr +
sizeof(txbuf->frags);
sg_items[0].len = sizeof(txbuf->htc_hdr) +
sizeof(txbuf->cmd_hdr) +
sizeof(txbuf->cmd_tx);
sg_items[1].transfer_id = 0;
sg_items[1].transfer_context = NULL;
sg_items[1].vaddr = msdu->data;
sg_items[1].paddr = skb_cb->paddr;
sg_items[1].len = prefetch_len;
res = ath10k_hif_tx_sg(htt->ar,
htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
sg_items, ARRAY_SIZE(sg_items));
if (res)
goto err_unmap_msdu;
return 0;
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
err:
return res;
}
static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
.htt_tx = ath10k_htt_tx_32,
.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
};
static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
.htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
.htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
.htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
.htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
.htt_tx = ath10k_htt_tx_64,
.htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
.htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
};
void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->hw_params.target_64bit)
htt->tx_ops = &htt_tx_ops_64;
else
htt->tx_ops = &htt_tx_ops_32;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -561,6 +561,12 @@ struct ath10k_hw_params {
u32 num_peers;
u32 ast_skid_limit;
u32 num_wds_entries;
/* Targets supporting physical addressing capability above 32-bits */
bool target_64bit;
/* Target rx ring fill level */
u32 rx_ring_fill_level;
};
struct htt_rx_desc;
@ -882,6 +888,7 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010
#define FW_RAM_CONFIG_ADDRESS 0x0018
#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -3597,7 +3597,7 @@ static int ath10k_mac_tx_submit(struct ath10k *ar,
switch (txpath) {
case ATH10K_MAC_TX_HTT:
ret = ath10k_htt_tx(htt, txmode, skb);
ret = htt->tx_ops->htt_tx(htt, txmode, skb);
break;
case ATH10K_MAC_TX_HTT_MGMT:
ret = ath10k_htt_mgmt_tx(htt, skb);
@ -8294,7 +8294,8 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
}
ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -23,6 +23,7 @@
#include "core.h"
#include "debug.h"
#include "coredump.h"
#include "targaddrs.h"
#include "bmi.h"
@ -51,6 +52,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_TARGET_WAIT 3000
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
/* Maximum number of bytes that can be handled atomically by
* diag read and write.
*/
#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
@ -785,7 +791,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
ATH10K_SKB_RXCB(skb)->paddr = paddr;
spin_lock_bh(&ce->ce_lock);
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
spin_unlock_bh(&ce->ce_lock);
if (ret) {
dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
@ -923,7 +929,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@ -1089,7 +1095,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@ -1461,6 +1467,218 @@ static void ath10k_pci_dump_registers(struct ath10k *ar,
crash_data->registers[i] = reg_dump_values[i];
}
static int ath10k_pci_dump_memory_section(struct ath10k *ar,
const struct ath10k_mem_region *mem_region,
u8 *buf, size_t buf_len)
{
const struct ath10k_mem_section *cur_section, *next_section;
unsigned int count, section_size, skip_size;
int ret, i, j;
if (!mem_region || !buf)
return 0;
if (mem_region->section_table.size < 0)
return 0;
cur_section = &mem_region->section_table.sections[0];
if (mem_region->start > cur_section->start) {
ath10k_warn(ar, "incorrect memdump region 0x%x with section start addrress 0x%x.\n",
mem_region->start, cur_section->start);
return 0;
}
skip_size = cur_section->start - mem_region->start;
/* fill the gap between the first register section and register
* start address
*/
for (i = 0; i < skip_size; i++) {
*buf = ATH10K_MAGIC_NOT_COPIED;
buf++;
}
count = 0;
for (i = 0; cur_section != NULL; i++) {
section_size = cur_section->end - cur_section->start;
if (section_size <= 0) {
ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
cur_section->start,
cur_section->end);
break;
}
if ((i + 1) == mem_region->section_table.size) {
/* last section */
next_section = NULL;
skip_size = 0;
} else {
next_section = cur_section + 1;
if (cur_section->end > next_section->start) {
ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
next_section->start,
cur_section->end);
break;
}
skip_size = next_section->start - cur_section->end;
}
if (buf_len < (skip_size + section_size)) {
ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
break;
}
buf_len -= skip_size + section_size;
/* read section to dest memory */
ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
buf, section_size);
if (ret) {
ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
cur_section->start, ret);
break;
}
buf += section_size;
count += section_size;
/* fill in the gap between this section and the next */
for (j = 0; j < skip_size; j++) {
*buf = ATH10K_MAGIC_NOT_COPIED;
buf++;
}
count += skip_size;
if (!next_section)
/* this was the last section */
break;
cur_section = next_section;
}
return count;
}
static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
{
u32 val;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
FW_RAM_CONFIG_ADDRESS, config);
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
FW_RAM_CONFIG_ADDRESS);
if (val != config) {
ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
val, config);
return -EIO;
}
return 0;
}
static void ath10k_pci_dump_memory(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data)
{
const struct ath10k_hw_mem_layout *mem_layout;
const struct ath10k_mem_region *current_region;
struct ath10k_dump_ram_data_hdr *hdr;
u32 count, shift;
size_t buf_len;
int ret, i;
u8 *buf;
lockdep_assert_held(&ar->data_lock);
if (!crash_data)
return;
mem_layout = ath10k_coredump_get_mem_layout(ar);
if (!mem_layout)
return;
current_region = &mem_layout->region_table.regions[0];
buf = crash_data->ramdump_buf;
buf_len = crash_data->ramdump_buf_len;
memset(buf, 0, buf_len);
for (i = 0; i < mem_layout->region_table.size; i++) {
count = 0;
if (current_region->len > buf_len) {
ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
current_region->name,
current_region->len,
buf_len);
break;
}
/* To get IRAM dump, the host driver needs to switch target
* ram config from DRAM to IRAM.
*/
if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
shift = current_region->start >> 20;
ret = ath10k_pci_set_ram_config(ar, shift);
if (ret) {
ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
current_region->name, ret);
break;
}
}
/* Reserve space for the header. */
hdr = (void *)buf;
buf += sizeof(*hdr);
buf_len -= sizeof(*hdr);
if (current_region->section_table.size > 0) {
/* Copy each section individually. */
count = ath10k_pci_dump_memory_section(ar,
current_region,
buf,
current_region->len);
} else {
/* No individiual memory sections defined so we can
* copy the entire memory region.
*/
ret = ath10k_pci_diag_read_mem(ar,
current_region->start,
buf,
current_region->len);
if (ret) {
ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
current_region->name, ret);
break;
}
count = current_region->len;
}
hdr->region_type = cpu_to_le32(current_region->type);
hdr->start = cpu_to_le32(current_region->start);
hdr->length = cpu_to_le32(count);
if (count == 0)
/* Note: the header remains, just with zero length. */
break;
buf += count;
buf_len -= count;
current_region++;
}
}
static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
{
struct ath10k_fw_crash_data *crash_data;
@ -1470,7 +1688,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ar->stats.fw_crash_counter++;
crash_data = ath10k_debug_get_new_fw_crash_data(ar);
crash_data = ath10k_coredump_new(ar);
if (crash_data)
scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
@ -1481,6 +1699,7 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
ath10k_print_driver_info(ar);
ath10k_pci_dump_registers(ar, crash_data);
ath10k_ce_dump_registers(ar, crash_data);
ath10k_pci_dump_memory(ar, crash_data);
spin_unlock_bh(&ar->data_lock);
@ -1858,7 +2077,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
if (ret) {
u32 unused_buffer;
dma_addr_t unused_buffer;
unsigned int unused_nbytes;
unsigned int unused_id;
@ -1871,7 +2090,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
err_resp:
if (resp) {
u32 unused_buffer;
dma_addr_t unused_buffer;
ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
dma_unmap_single(ar->dev, resp_paddr,

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -210,6 +210,10 @@ struct rx_frag_info {
u8 ring1_more_count;
u8 ring2_more_count;
u8 ring3_more_count;
u8 ring4_more_count;
u8 ring5_more_count;
u8 ring6_more_count;
u8 ring7_more_count;
} __packed;
/*
@ -471,10 +475,16 @@ struct rx_msdu_start_qca99x0 {
__le32 info2; /* %RX_MSDU_START_INFO2_ */
} __packed;
struct rx_msdu_start_wcn3990 {
__le32 info2; /* %RX_MSDU_START_INFO2_ */
__le32 info3; /* %RX_MSDU_START_INFO3_ */
} __packed;
struct rx_msdu_start {
struct rx_msdu_start_common common;
union {
struct rx_msdu_start_qca99x0 qca99x0;
struct rx_msdu_start_wcn3990 wcn3990;
} __packed;
} __packed;
@ -595,10 +605,23 @@ struct rx_msdu_end_qca99x0 {
__le32 info2;
} __packed;
struct rx_msdu_end_wcn3990 {
__le32 ipv6_crc;
__le32 tcp_seq_no;
__le32 tcp_ack_no;
__le32 info1;
__le32 info2;
__le32 rule_indication_0;
__le32 rule_indication_1;
__le32 rule_indication_2;
__le32 rule_indication_3;
} __packed;
struct rx_msdu_end {
struct rx_msdu_end_common common;
union {
struct rx_msdu_end_qca99x0 qca99x0;
struct rx_msdu_end_wcn3990 wcn3990;
} __packed;
} __packed;
@ -963,6 +986,12 @@ struct rx_pkt_end {
__le32 phy_timestamp_2;
} __packed;
struct rx_pkt_end_wcn3990 {
__le32 info0; /* %RX_PKT_END_INFO0_ */
__le64 phy_timestamp_1;
__le64 phy_timestamp_2;
} __packed;
#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
@ -998,6 +1027,12 @@ struct rx_location_info {
__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
} __packed;
struct rx_location_info_wcn3990 {
__le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
__le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
__le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
} __packed;
enum rx_phy_ppdu_end_info0 {
RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
@ -1086,6 +1121,20 @@ struct rx_ppdu_end_qca9984 {
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
struct rx_ppdu_end_wcn3990 {
struct rx_pkt_end_wcn3990 rx_pkt_end;
struct rx_location_info_wcn3990 rx_location_info;
struct rx_phy_ppdu_end rx_phy_ppdu_end;
__le32 rx_timing_offset;
__le32 reserved_info_0;
__le32 reserved_info_1;
__le32 rx_antenna_info;
__le32 rx_coex_info;
__le32 rx_mpdu_cnt_info;
__le64 phy_timestamp_tx;
__le32 rx_bb_length;
} __packed;
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
@ -1093,6 +1142,7 @@ struct rx_ppdu_end {
struct rx_ppdu_end_qca6174 qca6174;
struct rx_ppdu_end_qca99x0 qca99x0;
struct rx_ppdu_end_qca9984 qca9984;
struct rx_ppdu_end_wcn3990 wcn3990;
} __packed;
} __packed;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
* Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 Qualcomm Atheros, Inc.
* Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
* Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
* Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -2494,7 +2494,6 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
void *ptr;
int len;
u32 buf_len = msdu->len;
u16 fc;
struct ath10k_vif *arvif;
dma_addr_t mgmt_frame_dma;
u32 vdev_id;
@ -2503,7 +2502,6 @@ ath10k_wmi_tlv_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
return ERR_PTR(-EINVAL);
hdr = (struct ieee80211_hdr *)msdu->data;
fc = le16_to_cpu(hdr->frame_control);
arvif = (void *)cb->vif->drv_priv;
vdev_id = arvif->vdev_id;

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2014 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@ -196,6 +196,7 @@ enum wmi_service {
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
WMI_SERVICE_MGMT_TX_WMI,
WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
/* keep last */
WMI_SERVICE_MAX,
@ -337,6 +338,7 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
};
static inline char *wmi_service_name(int service_id)
@ -445,6 +447,7 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
default:
return NULL;
}
@ -741,6 +744,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
}
#undef SVCMAP
@ -2924,7 +2929,7 @@ struct wmi_ext_resource_config_10_4_cmd {
__le32 max_tdls_concurrent_buffer_sta;
};
/* strucutre describing host memory chunk. */
/* structure describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
__le32 req_id;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015 Qualcomm Atheros, Inc.
* Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above

View File

@ -626,7 +626,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
msg_body.min_ch_time = 30;
msg_body.min_ch_time = 100;
msg_body.max_ch_time = 100;
msg_body.scan_hidden = 1;
memcpy(msg_body.mac, vif->addr, ETH_ALEN);
msg_body.p2p_search = vif->p2p;

View File

@ -956,9 +956,8 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = wil_to_wdev(wil);
wdev->preset_chandef = *chandef;
wil->monitor_chandef = *chandef;
return 0;
}
@ -1751,6 +1750,69 @@ static int wil_cfg80211_resume(struct wiphy *wiphy)
return 0;
}
static int
wil_cfg80211_sched_scan_start(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_sched_scan_request *request)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int i, rc;
wil_dbg_misc(wil,
"sched scan start: n_ssids %d, ie_len %zu, flags 0x%x\n",
request->n_ssids, request->ie_len, request->flags);
for (i = 0; i < request->n_ssids; i++) {
wil_dbg_misc(wil, "SSID[%d]:", i);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
request->ssids[i].ssid,
request->ssids[i].ssid_len, true);
}
wil_dbg_misc(wil, "channels:");
for (i = 0; i < request->n_channels; i++)
wil_dbg_misc(wil, " %d%s", request->channels[i]->hw_value,
i == request->n_channels - 1 ? "\n" : "");
wil_dbg_misc(wil, "n_match_sets %d, min_rssi_thold %d, delay %d\n",
request->n_match_sets, request->min_rssi_thold,
request->delay);
for (i = 0; i < request->n_match_sets; i++) {
struct cfg80211_match_set *ms = &request->match_sets[i];
wil_dbg_misc(wil, "MATCHSET[%d]: rssi_thold %d\n",
i, ms->rssi_thold);
wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1,
ms->ssid.ssid,
ms->ssid.ssid_len, true);
}
wil_dbg_misc(wil, "n_scan_plans %d\n", request->n_scan_plans);
for (i = 0; i < request->n_scan_plans; i++) {
struct cfg80211_sched_scan_plan *sp = &request->scan_plans[i];
wil_dbg_misc(wil, "SCAN PLAN[%d]: interval %d iterations %d\n",
i, sp->interval, sp->iterations);
}
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ, request->ie_len, request->ie);
if (rc)
return rc;
return wmi_start_sched_scan(wil, request);
}
static int
wil_cfg80211_sched_scan_stop(struct wiphy *wiphy, struct net_device *dev,
u64 reqid)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
rc = wmi_stop_sched_scan(wil);
/* device would return error if it thinks PNO is already stopped.
* ignore the return code so user space and driver gets back in-sync
*/
wil_dbg_misc(wil, "sched scan stopped (%d)\n", rc);
return 0;
}
static const struct cfg80211_ops wil_cfg80211_ops = {
.add_virtual_intf = wil_cfg80211_add_iface,
.del_virtual_intf = wil_cfg80211_del_iface,
@ -1784,6 +1846,8 @@ static const struct cfg80211_ops wil_cfg80211_ops = {
.set_power_mgmt = wil_cfg80211_set_power_mgmt,
.suspend = wil_cfg80211_suspend,
.resume = wil_cfg80211_resume,
.sched_scan_start = wil_cfg80211_sched_scan_start,
.sched_scan_stop = wil_cfg80211_sched_scan_stop,
};
static void wil_wiphy_init(struct wiphy *wiphy)

View File

@ -869,7 +869,6 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
params.buf = frame;
params.len = len;
params.chan = wdev->preset_chandef.chan;
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);

View File

@ -565,7 +565,7 @@ static irqreturn_t wil6210_hardirq(int irq, void *cookie)
if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
return IRQ_NONE;
/* FIXME: IRQ mask debug */
/* IRQ mask debug */
if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
return IRQ_NONE;

View File

@ -771,11 +771,11 @@ static void wil_collect_fw_info(struct wil6210_priv *wil)
void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
{
struct wiphy *wiphy = wil_to_wiphy(wil);
int features;
wil->keep_radio_on_during_sleep =
wil->platform_ops.keep_radio_on_during_sleep &&
wil->platform_ops.keep_radio_on_during_sleep(
wil->platform_handle) &&
test_bit(WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND,
wil->platform_capa) &&
test_bit(WMI_FW_CAPABILITY_D3_SUSPEND, wil->fw_capabilities);
wil_info(wil, "keep_radio_on_during_sleep (%d)\n",
@ -785,6 +785,24 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
else
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
if (test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities)) {
wiphy->max_sched_scan_reqs = 1;
wiphy->max_sched_scan_ssids = WMI_MAX_PNO_SSID_NUM;
wiphy->max_match_sets = WMI_MAX_PNO_SSID_NUM;
wiphy->max_sched_scan_ie_len = WMI_MAX_IE_LEN;
wiphy->max_sched_scan_plans = WMI_MAX_PLANS_NUM;
}
if (wil->platform_ops.set_features) {
features = (test_bit(WMI_FW_CAPABILITY_REF_CLOCK_CONTROL,
wil->fw_capabilities) &&
test_bit(WIL_PLATFORM_CAPA_EXT_CLK,
wil->platform_capa)) ?
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL) : 0;
wil->platform_ops.set_features(wil->platform_handle, features);
}
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@ -980,6 +998,7 @@ static void wil_pre_fw_config(struct wil6210_priv *wil)
int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
unsigned long status_flags = BIT(wil_status_resetting);
wil_dbg_misc(wil, "reset\n");
@ -1000,6 +1019,16 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
if (test_bit(WIL_PLATFORM_CAPA_T_PWR_ON_0, wil->platform_capa)) {
wil_dbg_misc(wil, "Notify FW to set T_POWER_ON=0\n");
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_SUPPORT_T_POWER_ON_0);
}
if (test_bit(WIL_PLATFORM_CAPA_EXT_CLK, wil->platform_capa)) {
wil_dbg_misc(wil, "Notify FW on ext clock configuration\n");
wil_s(wil, RGF_USER_USAGE_8, BIT_USER_EXT_CLK);
}
if (wil->platform_ops.notify) {
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
@ -1009,6 +1038,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
set_bit(wil_status_resetting, wil->status);
if (test_bit(wil_status_collecting_dumps, wil->status)) {
/* Device collects crash dump, cancel the reset.
* following crash dump collection, reset would take place.
*/
wil_dbg_misc(wil, "reject reset while collecting crash dump\n");
rc = -EBUSY;
goto out;
}
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
@ -1023,7 +1060,11 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
if (test_bit(wil_status_suspending, wil->status))
status_flags |= BIT(wil_status_suspending);
bitmap_and(wil->status, wil->status, &status_flags,
wil_status_last);
wil_dbg_misc(wil, "wil->status (0x%lx)\n", *wil->status);
mutex_unlock(&wil->wmi_mutex);
wil_mask_irq(wil);
@ -1041,14 +1082,14 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil_rx_fini(wil);
if (rc) {
wil_bl_crash_info(wil, true);
return rc;
goto out;
}
rc = wil_get_bl_info(wil);
if (rc == -EAGAIN && !load_fw) /* ignore RF error if not going up */
rc = 0;
if (rc)
return rc;
goto out;
wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
@ -1060,10 +1101,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Loading f/w from the file */
rc = wil_request_firmware(wil, wil->wil_fw_name, true);
if (rc)
return rc;
goto out;
rc = wil_request_firmware(wil, WIL_BOARD_FILE_NAME, true);
if (rc)
return rc;
goto out;
wil_pre_fw_config(wil);
wil_release_cpu(wil);
@ -1075,6 +1116,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
reinit_completion(&wil->wmi_call);
reinit_completion(&wil->halp.comp);
clear_bit(wil_status_resetting, wil->status);
if (load_fw) {
wil_configure_interrupt_moderation(wil);
wil_unmask_irq(wil);
@ -1108,6 +1151,10 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
}
return rc;
out:
clear_bit(wil_status_resetting, wil->status);
return rc;
}
void wil_fw_error_recovery(struct wil6210_priv *wil)
@ -1213,9 +1260,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan(wil, false);
mutex_unlock(&wil->p2p_wdev_mutex);
wil_reset(wil, false);
return 0;
return wil_reset(wil, false);
}
int wil_down(struct wil6210_priv *wil)

View File

@ -150,7 +150,7 @@ void *wil_if_alloc(struct device *dev)
wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
/* default monitor channel */
ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
cfg80211_chandef_create(&wil->monitor_chandef, ch, NL80211_CHAN_NO_HT);
ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
if (!ndev) {

View File

@ -31,10 +31,8 @@ static bool ftm_mode;
module_param(ftm_mode, bool, 0444);
MODULE_PARM_DESC(ftm_mode, " Set factory test mode, default - false");
#ifdef CONFIG_PM
static int wil6210_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused);
#endif /* CONFIG_PM */
static
void wil_set_capabilities(struct wil6210_priv *wil)
@ -43,9 +41,11 @@ void wil_set_capabilities(struct wil6210_priv *wil)
u32 jtag_id = wil_r(wil, RGF_USER_JTAG_DEV_ID);
u8 chip_revision = (wil_r(wil, RGF_USER_REVISION_ID) &
RGF_USER_REVISION_ID_MASK);
int platform_capa;
bitmap_zero(wil->hw_capabilities, hw_capability_last);
bitmap_zero(wil->fw_capabilities, WMI_FW_CAPABILITY_MAX);
bitmap_zero(wil->platform_capa, WIL_PLATFORM_CAPA_MAX);
wil->wil_fw_name = ftm_mode ? WIL_FW_NAME_FTM_DEFAULT :
WIL_FW_NAME_DEFAULT;
wil->chip_revision = chip_revision;
@ -81,6 +81,14 @@ void wil_set_capabilities(struct wil6210_priv *wil)
wil_info(wil, "Board hardware is %s\n", wil->hw_name);
/* Get platform capabilities */
if (wil->platform_ops.get_capa) {
platform_capa =
wil->platform_ops.get_capa(wil->platform_handle);
memcpy(wil->platform_capa, &platform_capa,
min(sizeof(wil->platform_capa), sizeof(platform_capa)));
}
/* extract FW capabilities from file without loading the FW */
wil_request_firmware(wil, wil->wil_fw_name, false);
wil_refresh_fw_capabilities(wil);
@ -206,6 +214,8 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
.fw_recovery = wil_platform_rop_fw_recovery,
};
u32 bar_size = pci_resource_len(pdev, 0);
int dma_addr_size[] = {48, 40, 32}; /* keep descending order */
int i;
/* check HW */
dev_info(&pdev->dev, WIL_NAME
@ -241,21 +251,23 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to err_plat */
/* device supports 48bit addresses */
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (rc) {
dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
/* device supports >32bit addresses */
for (i = 0; i < ARRAY_SIZE(dma_addr_size); i++) {
rc = dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(dma_addr_size[i]));
if (rc) {
dev_err(dev,
"dma_set_mask_and_coherent(32) failed: %d\n",
rc);
goto err_plat;
dev_err(dev, "dma_set_mask_and_coherent(%d) failed: %d\n",
dma_addr_size[i], rc);
continue;
}
} else {
wil->use_extended_dma_addr = 1;
dev_info(dev, "using dma mask %d", dma_addr_size[i]);
wil->dma_addr_size = dma_addr_size[i];
break;
}
if (wil->dma_addr_size == 0)
goto err_plat;
rc = pci_enable_device(pdev);
if (rc && pdev->msi_enabled == 0) {
wil_err(wil,
@ -307,15 +319,15 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto bus_disable;
}
#ifdef CONFIG_PM
wil->pm_notify.notifier_call = wil6210_pm_notify;
if (IS_ENABLED(CONFIG_PM))
wil->pm_notify.notifier_call = wil6210_pm_notify;
rc = register_pm_notifier(&wil->pm_notify);
if (rc)
/* Do not fail the driver initialization, as suspend can
* be prevented in a later phase if needed
*/
wil_err(wil, "register_pm_notifier failed: %d\n", rc);
#endif /* CONFIG_PM */
wil6210_debugfs_init(wil);
@ -346,9 +358,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
wil_dbg_misc(wil, "pcie_remove\n");
#ifdef CONFIG_PM
unregister_pm_notifier(&wil->pm_notify);
#endif /* CONFIG_PM */
wil_pm_runtime_forbid(wil);
@ -372,8 +382,6 @@ static const struct pci_device_id wil6210_pcie_ids[] = {
};
MODULE_DEVICE_TABLE(pci, wil6210_pcie_ids);
#ifdef CONFIG_PM
static int wil6210_suspend(struct device *dev, bool is_runtime)
{
int rc = 0;
@ -481,17 +489,17 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
return rc;
}
static int wil6210_pm_suspend(struct device *dev)
static int __maybe_unused wil6210_pm_suspend(struct device *dev)
{
return wil6210_suspend(dev, false);
}
static int wil6210_pm_resume(struct device *dev)
static int __maybe_unused wil6210_pm_resume(struct device *dev)
{
return wil6210_resume(dev, false);
}
static int wil6210_pm_runtime_idle(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_idle(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
@ -501,12 +509,12 @@ static int wil6210_pm_runtime_idle(struct device *dev)
return wil_can_suspend(wil, true);
}
static int wil6210_pm_runtime_resume(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_resume(struct device *dev)
{
return wil6210_resume(dev, true);
}
static int wil6210_pm_runtime_suspend(struct device *dev)
static int __maybe_unused wil6210_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
@ -518,15 +526,12 @@ static int wil6210_pm_runtime_suspend(struct device *dev)
return wil6210_suspend(dev, true);
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops wil6210_pm_ops = {
#ifdef CONFIG_PM
SET_SYSTEM_SLEEP_PM_OPS(wil6210_pm_suspend, wil6210_pm_resume)
SET_RUNTIME_PM_OPS(wil6210_pm_runtime_suspend,
wil6210_pm_runtime_resume,
wil6210_pm_runtime_idle)
#endif /* CONFIG_PM */
};
static struct pci_driver wil6210_driver = {

View File

@ -145,6 +145,13 @@ static int wil_suspend_keep_radio_on(struct wil6210_priv *wil)
/* Prevent handling of new tx and wmi commands */
set_bit(wil_status_suspending, wil->status);
if (test_bit(wil_status_collecting_dumps, wil->status)) {
/* Device collects crash dump, cancel the suspend */
wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
clear_bit(wil_status_suspending, wil->status);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
wil_update_net_queues_bh(wil, NULL, true);
if (!wil_is_tx_idle(wil)) {
@ -255,6 +262,15 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
wil_dbg_pm(wil, "suspend radio off\n");
set_bit(wil_status_suspending, wil->status);
if (test_bit(wil_status_collecting_dumps, wil->status)) {
/* Device collects crash dump, cancel the suspend */
wil_dbg_pm(wil, "reject suspend while collecting crash dump\n");
clear_bit(wil_status_suspending, wil->status);
wil->suspend_stats.rejected_by_host++;
return -EBUSY;
}
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
rc = wil_down(wil);
@ -281,6 +297,7 @@ static int wil_suspend_radio_off(struct wil6210_priv *wil)
set_bit(wil_status_suspended, wil->status);
out:
clear_bit(wil_status_suspending, wil->status);
wil_dbg_pm(wil, "suspend radio off: %d\n", rc);
return rc;

View File

@ -111,14 +111,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using 48 bit addresses switch to 32 bit allocation
* before allocating vring memory.
* if we are using more than 32 bit addresses switch to 32 bit
* allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
if (wil->use_extended_dma_addr)
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
pmc->pring_va = dma_alloc_coherent(dev,
@ -126,8 +126,9 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
&pmc->pring_pa,
GFP_KERNEL);
if (wil->use_extended_dma_addr)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(wil->dma_addr_size));
wil_dbg_misc(wil,
"pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",

View File

@ -178,14 +178,14 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
*
* HW has limitation that all vrings addresses must share the same
* upper 16 msb bits part of 48 bits address. To workaround that,
* if we are using 48 bit addresses switch to 32 bit allocation
* before allocating vring memory.
* if we are using more than 32 bit addresses switch to 32 bit
* allocation before allocating vring memory.
*
* There's no check for the return value of dma_set_mask_and_coherent,
* since we assume if we were able to set the mask during
* initialization in this system it will not fail if we set it again
*/
if (wil->use_extended_dma_addr)
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
@ -195,8 +195,9 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
return -ENOMEM;
}
if (wil->use_extended_dma_addr)
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (wil->dma_addr_size > 32)
dma_set_mask_and_coherent(dev,
DMA_BIT_MASK(wil->dma_addr_size));
/* initially, all descriptors are SW owned
* For Tx and Rx, ownership bit is at the same location, thus
@ -347,7 +348,6 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
struct sk_buff *skb)
{
struct wireless_dev *wdev = wil->wdev;
struct wil6210_rtap {
struct ieee80211_radiotap_header rthdr;
/* fields should be in the order of bits in rthdr.it_present */
@ -374,7 +374,7 @@ static void wil_rx_add_radiotap_header(struct wil6210_priv *wil,
int rtap_len = sizeof(struct wil6210_rtap);
int phy_length = 0; /* phy info header size, bytes */
static char phy_data[128];
struct ieee80211_channel *ch = wdev->preset_chandef.chan;
struct ieee80211_channel *ch = wil->monitor_chandef.chan;
if (rtap_include_phy_info) {
rtap_len = sizeof(*rtap_vendor) + sizeof(*d);

View File

@ -161,6 +161,10 @@ struct RGF_ICR {
#define RGF_USER_USAGE_6 (0x880018)
#define BIT_USER_OOB_MODE BIT(31)
#define BIT_USER_OOB_R2_MODE BIT(30)
#define RGF_USER_USAGE_8 (0x880020)
#define BIT_USER_PREVENT_DEEP_SLEEP BIT(0)
#define BIT_USER_SUPPORT_T_POWER_ON_0 BIT(1)
#define BIT_USER_EXT_CLK BIT(2)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
@ -435,12 +439,13 @@ enum { /* for wil6210_priv.status */
wil_status_fwconnected,
wil_status_dontscan,
wil_status_mbox_ready, /* MBOX structures ready */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_irqen, /* interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
wil_status_suspending, /* suspend in progress */
wil_status_suspended, /* suspend completed, device is suspended */
wil_status_resuming, /* resume in progress */
wil_status_collecting_dumps, /* crashdump collection in progress */
wil_status_last /* keep last */
};
@ -643,12 +648,14 @@ struct wil6210_priv {
const char *wil_fw_name;
DECLARE_BITMAP(hw_capabilities, hw_capability_last);
DECLARE_BITMAP(fw_capabilities, WMI_FW_CAPABILITY_MAX);
DECLARE_BITMAP(platform_capa, WIL_PLATFORM_CAPA_MAX);
u8 n_mids; /* number of additional MIDs as reported by FW */
u32 recovery_count; /* num of FW recovery attempts in a short time */
u32 recovery_state; /* FW recovery state machine */
unsigned long last_fw_recovery; /* jiffies of last fw recovery */
wait_queue_head_t wq; /* for all wait_event() use */
/* profile */
struct cfg80211_chan_def monitor_chandef;
u32 monitor_flags;
u32 privacy; /* secure connection? */
u8 hidden_ssid; /* relevant in AP mode */
@ -704,7 +711,7 @@ struct wil6210_priv {
struct wil_sta_info sta[WIL6210_MAX_CID];
int bcast_vring;
u32 vring_idle_trsh; /* HW fetches up to 16 descriptors at once */
bool use_extended_dma_addr; /* indicates whether we are using 48 bits */
u32 dma_addr_size; /* indicates dma addr size */
/* scan */
struct cfg80211_scan_request *scan_request;
@ -742,9 +749,7 @@ struct wil6210_priv {
int fw_calib_result;
#ifdef CONFIG_PM
struct notifier_block pm_notify;
#endif /* CONFIG_PM */
bool suspend_resp_rcvd;
bool suspend_resp_comp;
@ -1032,4 +1037,8 @@ void wil_halp_unvote(struct wil6210_priv *wil);
void wil6210_set_halp(struct wil6210_priv *wil);
void wil6210_clear_halp(struct wil6210_priv *wil);
int wmi_start_sched_scan(struct wil6210_priv *wil,
struct cfg80211_sched_scan_request *request);
int wmi_stop_sched_scan(struct wil6210_priv *wil);
#endif /* __WIL6210_H__ */

View File

@ -72,6 +72,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
set_bit(wil_status_collecting_dumps, wil->status);
if (test_bit(wil_status_suspending, wil->status) ||
test_bit(wil_status_suspended, wil->status) ||
test_bit(wil_status_resetting, wil->status)) {
wil_err(wil, "cannot collect fw dump during suspend/reset\n");
clear_bit(wil_status_collecting_dumps, wil->status);
return -EINVAL;
}
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
map = &fw_mapping[i];
@ -91,6 +100,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
(const void __iomem * __force)data, len);
}
clear_bit(wil_status_collecting_dumps, wil->status);
return 0;
}

View File

@ -27,6 +27,18 @@ enum wil_platform_event {
WIL_PLATFORM_EVT_POST_SUSPEND = 4,
};
enum wil_platform_features {
WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL = 0,
WIL_PLATFORM_FEATURE_MAX,
};
enum wil_platform_capa {
WIL_PLATFORM_CAPA_RADIO_ON_IN_SUSPEND = 0,
WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
WIL_PLATFORM_CAPA_EXT_CLK = 2,
WIL_PLATFORM_CAPA_MAX,
};
/**
* struct wil_platform_ops - wil platform module calls from this
* driver to platform driver
@ -37,7 +49,8 @@ struct wil_platform_ops {
int (*resume)(void *handle, bool device_powered_on);
void (*uninit)(void *handle);
int (*notify)(void *handle, enum wil_platform_event evt);
bool (*keep_radio_on_during_sleep)(void *handle);
int (*get_capa)(void *handle);
void (*set_features)(void *handle, int features);
};
/**

View File

@ -38,6 +38,7 @@ MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
#define WIL_WAIT_FOR_SUSPEND_RESUME_COMP 200
#define WIL_WMI_CALL_GENERAL_TO_MS 100
/**
* WMI event receiving - theory of operations
@ -314,6 +315,10 @@ static const char *cmdid2name(u16 cmdid)
return "WMI_LINK_MAINTAIN_CFG_WRITE_CMD";
case WMI_LO_POWER_CALIB_FROM_OTP_CMDID:
return "WMI_LO_POWER_CALIB_FROM_OTP_CMD";
case WMI_START_SCHED_SCAN_CMDID:
return "WMI_START_SCHED_SCAN_CMD";
case WMI_STOP_SCHED_SCAN_CMDID:
return "WMI_STOP_SCHED_SCAN_CMD";
default:
return "Untracked CMD";
}
@ -428,6 +433,12 @@ static const char *eventid2name(u16 eventid)
return "WMI_LINK_MAINTAIN_CFG_WRITE_DONE_EVENT";
case WMI_LO_POWER_CALIB_FROM_OTP_EVENTID:
return "WMI_LO_POWER_CALIB_FROM_OTP_EVENT";
case WMI_START_SCHED_SCAN_EVENTID:
return "WMI_START_SCHED_SCAN_EVENT";
case WMI_STOP_SCHED_SCAN_EVENTID:
return "WMI_STOP_SCHED_SCAN_EVENT";
case WMI_SCHED_SCAN_RESULT_EVENTID:
return "WMI_SCHED_SCAN_RESULT_EVENT";
default:
return "Untracked EVENT";
}
@ -802,8 +813,6 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
}
/* FIXME FW can transmit only ucast frames to peer */
/* FIXME real ring_id instead of hard coded 0 */
ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid);
wil->sta[evt->cid].status = wil_sta_conn_pending;
@ -1066,6 +1075,75 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
static void
wmi_evt_sched_scan_result(struct wil6210_priv *wil, int id, void *d, int len)
{
struct wmi_sched_scan_result_event *data = d;
struct wiphy *wiphy = wil_to_wiphy(wil);
struct ieee80211_mgmt *rx_mgmt_frame =
(struct ieee80211_mgmt *)data->payload;
int flen = len - offsetof(struct wmi_sched_scan_result_event, payload);
int ch_no;
u32 freq;
struct ieee80211_channel *channel;
s32 signal;
__le16 fc;
u32 d_len;
struct cfg80211_bss *bss;
if (flen < 0) {
wil_err(wil, "sched scan result event too short, len %d\n",
len);
return;
}
d_len = le32_to_cpu(data->info.len);
if (d_len != flen) {
wil_err(wil,
"sched scan result length mismatch, d_len %d should be %d\n",
d_len, flen);
return;
}
fc = rx_mgmt_frame->frame_control;
if (!ieee80211_is_probe_resp(fc)) {
wil_err(wil, "sched scan result invalid frame, fc 0x%04x\n",
fc);
return;
}
ch_no = data->info.channel + 1;
freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (test_bit(WMI_FW_CAPABILITY_RSSI_REPORTING, wil->fw_capabilities))
signal = 100 * data->info.rssi;
else
signal = data->info.sqi;
wil_dbg_wmi(wil, "sched scan result: channel %d MCS %d RSSI %d\n",
data->info.channel, data->info.mcs, data->info.rssi);
wil_dbg_wmi(wil, "len %d qid %d mid %d cid %d\n",
d_len, data->info.qid, data->info.mid, data->info.cid);
wil_hex_dump_wmi("PROBE ", DUMP_PREFIX_OFFSET, 16, 1, rx_mgmt_frame,
d_len, true);
if (!channel) {
wil_err(wil, "Frame on unsupported channel\n");
return;
}
bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
d_len, signal, GFP_KERNEL);
if (bss) {
wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
cfg80211_put_bss(wiphy, bss);
} else {
wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
}
cfg80211_sched_scan_results(wiphy, 0);
}
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
@ -1093,6 +1171,7 @@ static const struct {
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
{WMI_SCHED_SCAN_RESULT_EVENTID, wmi_evt_sched_scan_result},
};
/*
@ -1703,7 +1782,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
int rc;
if (wdev->iftype == NL80211_IFTYPE_MONITOR) {
struct ieee80211_channel *ch = wdev->preset_chandef.chan;
struct ieee80211_channel *ch = wil->monitor_chandef.chan;
cmd.sniffer_cfg.mode = cpu_to_le32(WMI_SNIFFER_ON);
if (ch)
@ -2284,3 +2363,159 @@ out:
spin_unlock_irqrestore(&wil->wmi_ev_lock, flags);
return rc;
}
static void
wmi_sched_scan_set_ssids(struct wil6210_priv *wil,
struct wmi_start_sched_scan_cmd *cmd,
struct cfg80211_ssid *ssids, int n_ssids,
struct cfg80211_match_set *match_sets,
int n_match_sets)
{
int i;
if (n_match_sets > WMI_MAX_PNO_SSID_NUM) {
wil_dbg_wmi(wil, "too many match sets (%d), use first %d\n",
n_match_sets, WMI_MAX_PNO_SSID_NUM);
n_match_sets = WMI_MAX_PNO_SSID_NUM;
}
cmd->num_of_ssids = n_match_sets;
for (i = 0; i < n_match_sets; i++) {
struct wmi_sched_scan_ssid_match *wmi_match =
&cmd->ssid_for_match[i];
struct cfg80211_match_set *cfg_match = &match_sets[i];
int j;
wmi_match->ssid_len = cfg_match->ssid.ssid_len;
memcpy(wmi_match->ssid, cfg_match->ssid.ssid,
min_t(u8, wmi_match->ssid_len, WMI_MAX_SSID_LEN));
wmi_match->rssi_threshold = S8_MIN;
if (cfg_match->rssi_thold >= S8_MIN &&
cfg_match->rssi_thold <= S8_MAX)
wmi_match->rssi_threshold = cfg_match->rssi_thold;
for (j = 0; j < n_ssids; j++)
if (wmi_match->ssid_len == ssids[j].ssid_len &&
memcmp(wmi_match->ssid, ssids[j].ssid,
wmi_match->ssid_len) == 0)
wmi_match->add_ssid_to_probe = true;
}
}
static void
wmi_sched_scan_set_channels(struct wil6210_priv *wil,
struct wmi_start_sched_scan_cmd *cmd,
u32 n_channels,
struct ieee80211_channel **channels)
{
int i;
if (n_channels > WMI_MAX_CHANNEL_NUM) {
wil_dbg_wmi(wil, "too many channels (%d), use first %d\n",
n_channels, WMI_MAX_CHANNEL_NUM);
n_channels = WMI_MAX_CHANNEL_NUM;
}
cmd->num_of_channels = n_channels;
for (i = 0; i < n_channels; i++) {
struct ieee80211_channel *cfg_chan = channels[i];
cmd->channel_list[i] = cfg_chan->hw_value - 1;
}
}
static void
wmi_sched_scan_set_plans(struct wil6210_priv *wil,
struct wmi_start_sched_scan_cmd *cmd,
struct cfg80211_sched_scan_plan *scan_plans,
int n_scan_plans)
{
int i;
if (n_scan_plans > WMI_MAX_PLANS_NUM) {
wil_dbg_wmi(wil, "too many plans (%d), use first %d\n",
n_scan_plans, WMI_MAX_PLANS_NUM);
n_scan_plans = WMI_MAX_PLANS_NUM;
}
for (i = 0; i < n_scan_plans; i++) {
struct cfg80211_sched_scan_plan *cfg_plan = &scan_plans[i];
cmd->scan_plans[i].interval_sec =
cpu_to_le16(cfg_plan->interval);
cmd->scan_plans[i].num_of_iterations =
cpu_to_le16(cfg_plan->iterations);
}
}
int wmi_start_sched_scan(struct wil6210_priv *wil,
struct cfg80211_sched_scan_request *request)
{
int rc;
struct wmi_start_sched_scan_cmd cmd = {
.min_rssi_threshold = S8_MIN,
.initial_delay_sec = cpu_to_le16(request->delay),
};
struct {
struct wmi_cmd_hdr wmi;
struct wmi_start_sched_scan_event evt;
} __packed reply;
if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
return -ENOTSUPP;
if (request->min_rssi_thold >= S8_MIN &&
request->min_rssi_thold <= S8_MAX)
cmd.min_rssi_threshold = request->min_rssi_thold;
wmi_sched_scan_set_ssids(wil, &cmd, request->ssids, request->n_ssids,
request->match_sets, request->n_match_sets);
wmi_sched_scan_set_channels(wil, &cmd,
request->n_channels, request->channels);
wmi_sched_scan_set_plans(wil, &cmd,
request->scan_plans, request->n_scan_plans);
reply.evt.result = WMI_PNO_REJECT;
rc = wmi_call(wil, WMI_START_SCHED_SCAN_CMDID, &cmd, sizeof(cmd),
WMI_START_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.result != WMI_PNO_SUCCESS) {
wil_err(wil, "start sched scan failed, result %d\n",
reply.evt.result);
return -EINVAL;
}
return 0;
}
int wmi_stop_sched_scan(struct wil6210_priv *wil)
{
int rc;
struct {
struct wmi_cmd_hdr wmi;
struct wmi_stop_sched_scan_event evt;
} __packed reply;
if (!test_bit(WMI_FW_CAPABILITY_PNO, wil->fw_capabilities))
return -ENOTSUPP;
reply.evt.result = WMI_PNO_REJECT;
rc = wmi_call(wil, WMI_STOP_SCHED_SCAN_CMDID, NULL, 0,
WMI_STOP_SCHED_SCAN_EVENTID, &reply, sizeof(reply),
WIL_WMI_CALL_GENERAL_TO_MS);
if (rc)
return rc;
if (reply.evt.result != WMI_PNO_SUCCESS) {
wil_err(wil, "stop sched scan failed, result %d\n",
reply.evt.result);
return -EINVAL;
}
return 0;
}

View File

@ -71,6 +71,8 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RSSI_REPORTING = 12,
WMI_FW_CAPABILITY_SET_SILENT_RSSI_TABLE = 13,
WMI_FW_CAPABILITY_LO_POWER_CALIB_FROM_OTP = 14,
WMI_FW_CAPABILITY_PNO = 15,
WMI_FW_CAPABILITY_REF_CLOCK_CONTROL = 18,
WMI_FW_CAPABILITY_MAX,
};
@ -87,6 +89,8 @@ enum wmi_command_id {
WMI_CONNECT_CMDID = 0x01,
WMI_DISCONNECT_CMDID = 0x03,
WMI_DISCONNECT_STA_CMDID = 0x04,
WMI_START_SCHED_SCAN_CMDID = 0x05,
WMI_STOP_SCHED_SCAN_CMDID = 0x06,
WMI_START_SCAN_CMDID = 0x07,
WMI_SET_BSS_FILTER_CMDID = 0x09,
WMI_SET_PROBED_SSID_CMDID = 0x0A,
@ -385,6 +389,38 @@ struct wmi_start_scan_cmd {
} channel_list[0];
} __packed;
#define WMI_MAX_PNO_SSID_NUM (16)
#define WMI_MAX_CHANNEL_NUM (6)
#define WMI_MAX_PLANS_NUM (2)
/* WMI_START_SCHED_SCAN_CMDID */
struct wmi_sched_scan_ssid_match {
u8 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
s8 rssi_threshold;
/* boolean */
u8 add_ssid_to_probe;
u8 reserved;
} __packed;
/* WMI_START_SCHED_SCAN_CMDID */
struct wmi_sched_scan_plan {
__le16 interval_sec;
__le16 num_of_iterations;
} __packed;
/* WMI_START_SCHED_SCAN_CMDID */
struct wmi_start_sched_scan_cmd {
struct wmi_sched_scan_ssid_match ssid_for_match[WMI_MAX_PNO_SSID_NUM];
u8 num_of_ssids;
s8 min_rssi_threshold;
u8 channel_list[WMI_MAX_CHANNEL_NUM];
u8 num_of_channels;
u8 reserved;
__le16 initial_delay_sec;
struct wmi_sched_scan_plan scan_plans[WMI_MAX_PLANS_NUM];
} __packed;
/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
@ -1238,6 +1274,9 @@ enum wmi_event_id {
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
WMI_START_SCHED_SCAN_EVENTID = 0x1005,
WMI_STOP_SCHED_SCAN_EVENTID = 0x1006,
WMI_SCHED_SCAN_RESULT_EVENTID = 0x1007,
WMI_SCAN_COMPLETE_EVENTID = 0x100A,
WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
@ -1600,6 +1639,49 @@ struct wmi_scan_complete_event {
__le32 status;
} __packed;
/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
s8 rssi;
u8 range;
u8 sqi;
__le16 stype;
__le16 status;
__le32 len;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 qid;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 mid;
u8 cid;
/* From Radio MNGR */
u8 channel;
} __packed;
/* WMI_START_SCHED_SCAN_EVENTID */
enum wmi_pno_result {
WMI_PNO_SUCCESS = 0x00,
WMI_PNO_REJECT = 0x01,
WMI_PNO_INVALID_PARAMETERS = 0x02,
WMI_PNO_NOT_ENABLED = 0x03,
};
struct wmi_start_sched_scan_event {
/* pno_result */
u8 result;
u8 reserved[3];
} __packed;
struct wmi_stop_sched_scan_event {
/* pno_result */
u8 result;
u8 reserved[3];
} __packed;
struct wmi_sched_scan_result_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];
} __packed;
/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
enum wmi_acs_info_bitmask {
WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
@ -1814,24 +1896,6 @@ struct wmi_get_ssid_event {
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
s8 rssi;
u8 range;
u8 sqi;
__le16 stype;
__le16 status;
__le32 len;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 qid;
/* Not resolved when == 0xFFFFFFFF == > Broadcast to all MIDS */
u8 mid;
u8 cid;
/* From Radio MNGR */
u8 channel;
} __packed;
/* EVENT: WMI_RF_XPM_READ_RESULT_EVENTID */
struct wmi_rf_xpm_read_result_event {
/* enum wmi_fw_status_e - success=0 or fail=1 */

View File

@ -1031,7 +1031,7 @@ static void b43_radio_2057_init_post(struct b43_wldev *dev)
b43_radio_set(dev, R2057_RFPLL_MISC_CAL_RESETN, 0x78);
b43_radio_set(dev, R2057_XTAL_CONFIG2, 0x80);
mdelay(2);
usleep_range(2000, 3000);
b43_radio_mask(dev, R2057_RFPLL_MISC_CAL_RESETN, ~0x78);
b43_radio_mask(dev, R2057_XTAL_CONFIG2, ~0x80);

View File

@ -118,7 +118,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler,
pdata->oob_irq_flags, "brcmf_oob_intr",
&sdiodev->func[1]->dev);
&sdiodev->func1->dev);
if (ret != 0) {
brcmf_err("request_irq failed %d\n", ret);
return ret;
@ -132,7 +132,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
}
sdiodev->irq_wake = true;
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
if (sdiodev->bus_if->chip == BRCM_CC_43362_CHIP_ID) {
/* assign GPIO to SDIO core */
@ -149,7 +149,8 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
/* must configure SDIO_CCCR_IENx to enable irq */
data = brcmf_sdiod_func0_rb(sdiodev, SDIO_CCCR_IENx, &ret);
data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
data |= SDIO_CCCR_IEN_FUNC1 | SDIO_CCCR_IEN_FUNC2 |
SDIO_CCCR_IEN_FUNC0;
brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, data, &ret);
/* redirect, configure and enable io for interrupt signal */
@ -158,13 +159,13 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev)
data |= SDIO_CCCR_BRCM_SEPINT_ACT_HI;
brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT,
data, &ret);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
} else {
brcmf_dbg(SDIO, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
sdio_claim_irq(sdiodev->func[1], brcmf_sdiod_ib_irqhandler);
sdio_claim_irq(sdiodev->func[2], brcmf_sdiod_dummy_irqhandler);
sdio_release_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
sdio_claim_irq(sdiodev->func1, brcmf_sdiod_ib_irqhandler);
sdio_claim_irq(sdiodev->func2, brcmf_sdiod_dummy_irqhandler);
sdio_release_host(sdiodev->func1);
sdiodev->sd_irq_requested = true;
}
@ -182,26 +183,26 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev)
struct brcmfmac_sdio_pd *pdata;
pdata = &sdiodev->settings->bus.sdio;
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL);
brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_IENx, 0, NULL);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
sdiodev->oob_irq_requested = false;
if (sdiodev->irq_wake) {
disable_irq_wake(pdata->oob_irq_nr);
sdiodev->irq_wake = false;
}
free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev);
free_irq(pdata->oob_irq_nr, &sdiodev->func1->dev);
sdiodev->irq_en = false;
sdiodev->oob_irq_requested = false;
}
if (sdiodev->sd_irq_requested) {
sdio_claim_host(sdiodev->func[1]);
sdio_release_irq(sdiodev->func[2]);
sdio_release_irq(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
sdio_release_irq(sdiodev->func2);
sdio_release_irq(sdiodev->func1);
sdio_release_host(sdiodev->func1);
sdiodev->sd_irq_requested = false;
}
}
@ -263,7 +264,7 @@ u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret)
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
data = sdio_readl(sdiodev->func[1], addr, &retval);
data = sdio_readl(sdiodev->func1, addr, &retval);
out:
if (ret)
@ -284,30 +285,37 @@ void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr,
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
sdio_writel(sdiodev->func[1], data, addr, &retval);
sdio_writel(sdiodev->func1, data, addr, &retval);
out:
if (ret)
*ret = retval;
}
static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
u32 addr, struct sk_buff *pkt)
static int brcmf_sdiod_skbuff_read(struct brcmf_sdio_dev *sdiodev,
struct sdio_func *func, u32 addr,
struct sk_buff *skb)
{
unsigned int req_sz;
int err;
/* Single skb use the standard mmc interface */
req_sz = pkt->len + 3;
req_sz = skb->len + 3;
req_sz &= (uint)~3;
if (fn == 1)
err = sdio_memcpy_fromio(sdiodev->func[fn],
((u8 *)(pkt->data)), addr, req_sz);
else
/* function 2 read is FIFO operation */
err = sdio_readsb(sdiodev->func[fn],
((u8 *)(pkt->data)), addr, req_sz);
switch (func->num) {
case 1:
err = sdio_memcpy_fromio(func, ((u8 *)(skb->data)), addr,
req_sz);
break;
case 2:
err = sdio_readsb(func, ((u8 *)(skb->data)), addr, req_sz);
break;
default:
/* bail out as things are really fishy here */
WARN(1, "invalid sdio function number: %d\n", func->num);
err = -ENOMEDIUM;
};
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
@ -315,18 +323,18 @@ static int brcmf_sdiod_buff_read(struct brcmf_sdio_dev *sdiodev, uint fn,
return err;
}
static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
u32 addr, struct sk_buff *pkt)
static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
struct sdio_func *func, u32 addr,
struct sk_buff *skb)
{
unsigned int req_sz;
int err;
/* Single skb use the standard mmc interface */
req_sz = pkt->len + 3;
req_sz = skb->len + 3;
req_sz &= (uint)~3;
err = sdio_memcpy_toio(sdiodev->func[fn], addr,
((u8 *)(pkt->data)), req_sz);
err = sdio_memcpy_toio(func, addr, ((u8 *)(skb->data)), req_sz);
if (err == -ENOMEDIUM)
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
@ -337,7 +345,7 @@ static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
/**
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
* @fn: SDIO function number
* @func: SDIO function
* @write: direction flag
* @addr: dongle memory address as source/destination
* @pkt: skb pointer
@ -346,7 +354,8 @@ static int brcmf_sdiod_buff_write(struct brcmf_sdio_dev *sdiodev, uint fn,
* stack for block data access. It assumes that the skb passed down by the
* caller has already been padded and aligned.
*/
static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
struct sdio_func *func,
bool write, u32 addr,
struct sk_buff_head *pktlist)
{
@ -372,7 +381,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
req_sz = 0;
skb_queue_walk(pktlist, pkt_next)
req_sz += pkt_next->len;
req_sz = ALIGN(req_sz, sdiodev->func[fn]->cur_blksize);
req_sz = ALIGN(req_sz, func->cur_blksize);
while (req_sz > PAGE_SIZE) {
pkt_next = brcmu_pkt_buf_get_skb(PAGE_SIZE);
if (pkt_next == NULL) {
@ -391,7 +400,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
target_list = &local_list;
}
func_blk_sz = sdiodev->func[fn]->cur_blksize;
func_blk_sz = func->cur_blksize;
max_req_sz = sdiodev->max_request_size;
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
target_list->qlen);
@ -408,10 +417,10 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
mmc_cmd.opcode = SD_IO_RW_EXTENDED;
mmc_cmd.arg = write ? 1<<31 : 0; /* write flag */
mmc_cmd.arg |= (fn & 0x7) << 28; /* SDIO func num */
mmc_cmd.arg |= 1<<27; /* block mode */
mmc_cmd.arg |= (func->num & 0x7) << 28; /* SDIO func num */
mmc_cmd.arg |= 1 << 27; /* block mode */
/* for function 1 the addr will be incremented */
mmc_cmd.arg |= (fn == 1) ? 1<<26 : 0;
mmc_cmd.arg |= (func->num == 1) ? 1 << 26 : 0;
mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
@ -457,11 +466,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn,
mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
/* incrementing addr for function 1 */
if (fn == 1)
if (func->num == 1)
addr += req_sz;
mmc_set_data_timeout(&mmc_dat, sdiodev->func[fn]->card);
mmc_wait_for_req(sdiodev->func[fn]->card->host, &mmc_req);
mmc_set_data_timeout(&mmc_dat, func->card);
mmc_wait_for_req(func->card->host, &mmc_req);
ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
if (ret == -ENOMEDIUM) {
@ -529,7 +538,7 @@ int brcmf_sdiod_recv_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
{
u32 addr = sdiodev->sbwad;
u32 addr = sdiodev->cc_core->base;
int err = 0;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pkt->len);
@ -541,7 +550,7 @@ int brcmf_sdiod_recv_pkt(struct brcmf_sdio_dev *sdiodev, struct sk_buff *pkt)
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr, pkt);
err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr, pkt);
done:
return err;
@ -552,7 +561,7 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
{
struct sk_buff *glom_skb = NULL;
struct sk_buff *skb;
u32 addr = sdiodev->sbwad;
u32 addr = sdiodev->cc_core->base;
int err = 0;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n",
@ -566,14 +575,14 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
if (pktq->qlen == 1)
err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
pktq->next);
err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
pktq->next);
else if (!sdiodev->sg_support) {
glom_skb = brcmu_pkt_buf_get_skb(totlen);
if (!glom_skb)
return -ENOMEM;
err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_2, addr,
glom_skb);
err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func2, addr,
glom_skb);
if (err)
goto done;
@ -582,8 +591,8 @@ int brcmf_sdiod_recv_chain(struct brcmf_sdio_dev *sdiodev,
skb_pull(glom_skb, skb->len);
}
} else
err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, false, addr,
pktq);
err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, false,
addr, pktq);
done:
brcmu_pkt_buf_free_skb(glom_skb);
@ -593,7 +602,7 @@ done:
int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
u32 addr = sdiodev->sbwad;
u32 addr = sdiodev->cc_core->base;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@ -614,7 +623,8 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
if (!err)
err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2, addr, mypkt);
err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
mypkt);
brcmu_pkt_buf_free_skb(mypkt);
@ -625,7 +635,7 @@ int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktq)
{
struct sk_buff *skb;
u32 addr = sdiodev->sbwad;
u32 addr = sdiodev->cc_core->base;
int err;
brcmf_dbg(SDIO, "addr = 0x%x, size = %d\n", addr, pktq->qlen);
@ -639,14 +649,14 @@ int brcmf_sdiod_send_pkt(struct brcmf_sdio_dev *sdiodev,
if (pktq->qlen == 1 || !sdiodev->sg_support) {
skb_queue_walk(pktq, skb) {
err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_2,
addr, skb);
err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2,
addr, skb);
if (err)
break;
}
} else {
err = brcmf_sdiod_sglist_rw(sdiodev, SDIO_FUNC_2, true, addr,
pktq);
err = brcmf_sdiod_sglist_rw(sdiodev, sdiodev->func2, true,
addr, pktq);
}
return err;
@ -676,7 +686,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
else
dsize = size;
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
/* Do the transfer(s) */
while (size) {
@ -696,11 +706,11 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
if (write) {
memcpy(pkt->data, data, dsize);
err = brcmf_sdiod_buff_write(sdiodev, SDIO_FUNC_1,
sdaddr, pkt);
err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func1,
sdaddr, pkt);
} else {
err = brcmf_sdiod_buff_read(sdiodev, SDIO_FUNC_1,
sdaddr, pkt);
err = brcmf_sdiod_skbuff_read(sdiodev, sdiodev->func1,
sdaddr, pkt);
}
if (err) {
@ -723,17 +733,17 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
dev_kfree_skb(pkt);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
return err;
}
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn)
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func)
{
brcmf_dbg(SDIO, "Enter\n");
/* Issue abort cmd52 command through F0 */
brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, fn, NULL);
brcmf_sdiod_func0_wb(sdiodev, SDIO_CCCR_ABORT, func->num, NULL);
brcmf_dbg(SDIO, "Exit\n");
return 0;
@ -747,7 +757,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
uint nents;
int err;
func = sdiodev->func[2];
func = sdiodev->func2;
host = func->card->host;
sdiodev->sg_support = host->max_segs > 1;
max_blocks = min_t(uint, host->max_blk_count, 511u);
@ -808,17 +818,17 @@ static int brcmf_sdiod_freezer_on(struct brcmf_sdio_dev *sdiodev)
brcmf_sdio_trigger_dpc(sdiodev->bus);
wait_event(sdiodev->freezer->thread_freeze,
atomic_read(expect) == sdiodev->freezer->frozen_count);
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
res = brcmf_sdio_sleep(sdiodev->bus, true);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
return res;
}
static void brcmf_sdiod_freezer_off(struct brcmf_sdio_dev *sdiodev)
{
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
brcmf_sdio_sleep(sdiodev->bus, false);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
atomic_set(&sdiodev->freezer->freezing, 0);
complete_all(&sdiodev->freezer->resumed);
}
@ -868,19 +878,19 @@ static int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev)
brcmf_sdiod_freezer_detach(sdiodev);
/* Disable Function 2 */
sdio_claim_host(sdiodev->func[2]);
sdio_disable_func(sdiodev->func[2]);
sdio_release_host(sdiodev->func[2]);
sdio_claim_host(sdiodev->func2);
sdio_disable_func(sdiodev->func2);
sdio_release_host(sdiodev->func2);
/* Disable Function 1 */
sdio_claim_host(sdiodev->func[1]);
sdio_disable_func(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
sdio_disable_func(sdiodev->func1);
sdio_release_host(sdiodev->func1);
sg_free_table(&sdiodev->sgtable);
sdiodev->sbwad = 0;
pm_runtime_allow(sdiodev->func[1]->card->host->parent);
pm_runtime_allow(sdiodev->func1->card->host->parent);
return 0;
}
@ -896,29 +906,27 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
{
int ret = 0;
sdiodev->num_funcs = 2;
sdio_claim_host(sdiodev->func1);
sdio_claim_host(sdiodev->func[1]);
ret = sdio_set_block_size(sdiodev->func[1], SDIO_FUNC1_BLOCKSIZE);
ret = sdio_set_block_size(sdiodev->func1, SDIO_FUNC1_BLOCKSIZE);
if (ret) {
brcmf_err("Failed to set F1 blocksize\n");
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
goto out;
}
ret = sdio_set_block_size(sdiodev->func[2], SDIO_FUNC2_BLOCKSIZE);
ret = sdio_set_block_size(sdiodev->func2, SDIO_FUNC2_BLOCKSIZE);
if (ret) {
brcmf_err("Failed to set F2 blocksize\n");
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
goto out;
}
/* increase F2 timeout */
sdiodev->func[2]->enable_timeout = SDIO_WAIT_F2RDY;
sdiodev->func2->enable_timeout = SDIO_WAIT_F2RDY;
/* Enable Function 1 */
ret = sdio_enable_func(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
ret = sdio_enable_func(sdiodev->func1);
sdio_release_host(sdiodev->func1);
if (ret) {
brcmf_err("Failed to enable F1: err=%d\n", ret);
goto out;
@ -934,7 +942,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
ret = -ENODEV;
goto out;
}
brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
brcmf_sdiod_host_fixup(sdiodev->func2->card->host);
out:
if (ret)
brcmf_sdiod_remove(sdiodev);
@ -995,6 +1003,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(SDIO, "Function#: %d\n", func->num);
dev = &func->dev;
/* Set MMC_QUIRK_LENIENT_FN0 for this card */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
/* prohibit ACPI power management for this device */
brcmf_sdiod_acpi_set_power_manageable(dev, 0);
@ -1018,17 +1030,15 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
/* store refs to functions used. mmc_card does
* not hold the F0 function pointer.
*/
sdiodev->func[0] = kmemdup(func, sizeof(*func), GFP_KERNEL);
sdiodev->func[0]->num = 0;
sdiodev->func[1] = func->card->sdio_func[0];
sdiodev->func[2] = func;
sdiodev->func1 = func->card->sdio_func[0];
sdiodev->func2 = func;
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
bus_if->proto_type = BRCMF_PROTO_BCDC;
dev_set_drvdata(&func->dev, bus_if);
dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
sdiodev->dev = &sdiodev->func[1]->dev;
dev_set_drvdata(&sdiodev->func1->dev, bus_if);
sdiodev->dev = &sdiodev->func1->dev;
brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN);
@ -1044,8 +1054,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
fail:
dev_set_drvdata(&func->dev, NULL);
dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
kfree(sdiodev->func[0]);
dev_set_drvdata(&sdiodev->func1->dev, NULL);
kfree(sdiodev);
kfree(bus_if);
return err;
@ -1074,11 +1083,10 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
/* only proceed with rest of cleanup if func 1 */
brcmf_sdiod_remove(sdiodev);
dev_set_drvdata(&sdiodev->func[1]->dev, NULL);
dev_set_drvdata(&sdiodev->func[2]->dev, NULL);
dev_set_drvdata(&sdiodev->func1->dev, NULL);
dev_set_drvdata(&sdiodev->func2->dev, NULL);
kfree(bus_if);
kfree(sdiodev->func[0]);
kfree(sdiodev);
}
@ -1104,7 +1112,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != SDIO_FUNC_1)
if (func->num != 1)
return 0;
@ -1121,7 +1129,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
else
sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
}
if (sdio_set_host_pm_flags(sdiodev->func[1], sdio_flags))
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
return 0;
}
@ -1133,7 +1141,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != SDIO_FUNC_2)
if (func->num != 2)
return 0;
brcmf_sdiod_freezer_off(sdiodev);

View File

@ -1338,6 +1338,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
switch (pub->chip) {
case BRCM_CC_4354_CHIP_ID:
case BRCM_CC_4356_CHIP_ID:
case BRCM_CC_4345_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */

View File

@ -1251,14 +1251,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
u64 address;
u32 addr;
devinfo->shared.scratch = dma_alloc_coherent(&devinfo->pdev->dev,
BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
&devinfo->shared.scratch_dmahandle, GFP_KERNEL);
devinfo->shared.scratch =
dma_zalloc_coherent(&devinfo->pdev->dev,
BRCMF_DMA_D2H_SCRATCH_BUF_LEN,
&devinfo->shared.scratch_dmahandle,
GFP_KERNEL);
if (!devinfo->shared.scratch)
goto fail;
memset(devinfo->shared.scratch, 0, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_SCRATCH_ADDR_OFFSET;
address = (u64)devinfo->shared.scratch_dmahandle;
@ -1268,14 +1268,14 @@ static int brcmf_pcie_init_scratchbuffers(struct brcmf_pciedev_info *devinfo)
BRCMF_SHARED_DMA_SCRATCH_LEN_OFFSET;
brcmf_pcie_write_tcm32(devinfo, addr, BRCMF_DMA_D2H_SCRATCH_BUF_LEN);
devinfo->shared.ringupd = dma_alloc_coherent(&devinfo->pdev->dev,
BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
&devinfo->shared.ringupd_dmahandle, GFP_KERNEL);
devinfo->shared.ringupd =
dma_zalloc_coherent(&devinfo->pdev->dev,
BRCMF_DMA_D2H_RINGUPD_BUF_LEN,
&devinfo->shared.ringupd_dmahandle,
GFP_KERNEL);
if (!devinfo->shared.ringupd)
goto fail;
memset(devinfo->shared.ringupd, 0, BRCMF_DMA_D2H_RINGUPD_BUF_LEN);
addr = devinfo->shared.tcm_base_address +
BRCMF_SHARED_DMA_RINGUPD_ADDR_OFFSET;
address = (u64)devinfo->shared.ringupd_dmahandle;

View File

@ -660,30 +660,6 @@ static bool data_ok(struct brcmf_sdio *bus)
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
}
/*
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
struct brcmf_core *core = bus->sdio_core;
int ret;
*regvar = brcmf_sdiod_readl(bus->sdiodev, core->base + offset, &ret);
return ret;
}
static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
struct brcmf_core *core = bus->sdio_core;
int ret;
brcmf_sdiod_writel(bus->sdiodev, core->base + reg_offset, regval, &ret);
return ret;
}
static int
brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on)
{
@ -1003,7 +979,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
struct sdpcm_shared_le sh_le;
__le32 addr_le;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_bus_sleep(bus, false, false);
/*
@ -1037,7 +1013,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
if (rv < 0)
goto fail;
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
/* Endianness */
sh->flags = le32_to_cpu(sh_le.flags);
@ -1059,7 +1035,7 @@ static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
fail:
brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n",
rv, addr);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
return rv;
}
@ -1078,6 +1054,8 @@ static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
struct brcmf_sdio_dev *sdiod = bus->sdiodev;
struct brcmf_core *core = bus->sdio_core;
u32 intstatus = 0;
u32 hmb_data;
u8 fcbits;
@ -1086,10 +1064,14 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
brcmf_dbg(SDIO, "Enter\n");
/* Read mailbox data and ack that we did so */
ret = r_sdreg32(bus, &hmb_data, SD_REG(tohostmailboxdata));
hmb_data = brcmf_sdiod_readl(sdiod,
core->base + SD_REG(tohostmailboxdata),
&ret);
if (!ret)
brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
SMB_INT_ACK, &ret);
if (ret == 0)
w_sdreg32(bus, SMB_INT_ACK, SD_REG(tosbmailbox));
bus->sdcnt.f1regdata += 2;
/* dongle indicates the firmware has halted/crashed */
@ -1163,6 +1145,8 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
struct brcmf_sdio_dev *sdiod = bus->sdiodev;
struct brcmf_core *core = bus->sdio_core;
uint retries = 0;
u16 lastrbc;
u8 hi, lo;
@ -1173,7 +1157,7 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
rtx ? ", send NAK" : "");
if (abort)
brcmf_sdiod_abort(bus->sdiodev, SDIO_FUNC_2);
brcmf_sdiod_abort(bus->sdiodev, bus->sdiodev->func2);
brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM,
&err);
@ -1204,7 +1188,8 @@ static void brcmf_sdio_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
if (rtx) {
bus->sdcnt.rxrtx++;
err = w_sdreg32(bus, SMB_NAK, SD_REG(tosbmailbox));
brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailbox),
SMB_NAK, &err);
bus->sdcnt.f1regdata++;
if (err == 0)
@ -1224,7 +1209,7 @@ static void brcmf_sdio_txfail(struct brcmf_sdio *bus)
brcmf_err("sdio error, abort command and terminate frame\n");
bus->sdcnt.tx_sderrs++;
brcmf_sdiod_abort(sdiodev, SDIO_FUNC_2);
brcmf_sdiod_abort(sdiodev, sdiodev->func2);
brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL);
bus->sdcnt.f1regdata++;
@ -1580,10 +1565,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdiod_recv_chain(bus->sdiodev,
&bus->glom, dlen);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
bus->sdcnt.f2rxdata++;
/* On failure, kill the superframe */
@ -1591,11 +1576,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_err("glom read of %d bytes failed: %d\n",
dlen, errcode);
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
brcmf_sdio_free_glom(bus);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
return 0;
}
@ -1605,10 +1590,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.seq_num = rxseq;
rd_new.len = dlen;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
BRCMF_SDIO_FT_SUPER);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
bus->cur_read.len = rd_new.len_nxtfrm << 4;
/* Remove superframe header, remember offset */
@ -1624,10 +1609,10 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.len = pnext->len;
rd_new.seq_num = rxseq++;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
BRCMF_SDIO_FT_SUB);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
pnext->data, 32, "subframe:\n");
@ -1636,11 +1621,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
if (errcode) {
/* Terminate frame on error */
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true, false);
bus->sdcnt.rxglomfail++;
brcmf_sdio_free_glom(bus);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
bus->cur_read.len = 0;
return 0;
}
@ -1848,7 +1833,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_left = rd->len;
/* read header first for unknow frame length */
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
if (!rd->len) {
ret = brcmf_sdiod_recv_buf(bus->sdiodev,
bus->rxhdr, BRCMF_FIRSTREAD);
@ -1858,7 +1843,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
ret);
bus->sdcnt.rx_hdrfail++;
brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
continue;
}
@ -1868,7 +1853,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
BRCMF_SDIO_FT_NORMAL)) {
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
if (!bus->rxpending)
break;
else
@ -1884,7 +1869,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd->len_nxtfrm = 0;
/* treat all packet as event if we don't know */
rd->channel = SDPCM_EVENT_CHANNEL;
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
continue;
}
rd->len_left = rd->len > BRCMF_FIRSTREAD ?
@ -1901,7 +1886,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_err("brcmu_pkt_buf_get_skb failed\n");
brcmf_sdio_rxfail(bus, false,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
continue;
}
skb_pull(pkt, head_read);
@ -1909,16 +1894,16 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
ret = brcmf_sdiod_recv_pkt(bus->sdiodev, pkt);
bus->sdcnt.f2rxdata++;
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
if (ret < 0) {
brcmf_err("read %d bytes from channel %d failed: %d\n",
rd->len, rd->channel, ret);
brcmu_pkt_buf_free_skb(pkt);
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, true,
RETRYCHAN(rd->channel));
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
continue;
}
@ -1929,7 +1914,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
@ -1942,11 +1927,11 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
roundup(rd_new.len, 16) >> 4);
rd->len = 0;
brcmf_sdio_rxfail(bus, true, true);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
rd->len_nxtfrm = rd_new.len_nxtfrm;
rd->channel = rd_new.channel;
rd->dat_offset = rd_new.dat_offset;
@ -1962,9 +1947,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
rd_new.seq_num);
/* Force retry w/normal header read */
rd->len = 0;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, false, true);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
brcmu_pkt_buf_free_skb(pkt);
continue;
}
@ -1987,9 +1972,9 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
} else {
brcmf_err("%s: glom superframe w/o "
"descriptor!\n", __func__);
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_rxfail(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
}
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
@ -2087,7 +2072,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
int ntail, ret;
sdiodev = bus->sdiodev;
blksize = sdiodev->func[SDIO_FUNC_2]->cur_blksize;
blksize = sdiodev->func2->cur_blksize;
/* sg entry alignment should be a divisor of block size */
WARN_ON(blksize % bus->sgentry_align);
@ -2266,14 +2251,14 @@ static int brcmf_sdio_txpkt(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
if (ret)
goto done;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
ret = brcmf_sdiod_send_pkt(bus->sdiodev, pktq);
bus->sdcnt.f2txdata++;
if (ret < 0)
brcmf_sdio_txfail(bus);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
done:
brcmf_sdio_txpkt_postp(bus, pktq);
@ -2291,6 +2276,7 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
struct sk_buff_head pktq;
u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
u32 intstatus = 0;
int ret = 0, prec_out, i;
uint cnt = 0;
@ -2328,9 +2314,11 @@ static uint brcmf_sdio_sendfromq(struct brcmf_sdio *bus, uint maxframes)
/* In poll mode, need to check for other events */
if (!bus->intr) {
/* Check device status, signal pending interrupt */
sdio_claim_host(bus->sdiodev->func[1]);
ret = r_sdreg32(bus, &intstatus, SD_REG(intstatus));
sdio_release_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
intstatus = brcmf_sdiod_readl(bus->sdiodev,
intstat_addr, &ret);
sdio_release_host(bus->sdiodev->func1);
bus->sdcnt.f2txdata++;
if (ret != 0)
break;
@ -2413,12 +2401,13 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len)
static void brcmf_sdio_bus_stop(struct device *dev)
{
u32 local_hostintmask;
u8 saveclk;
int err;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_core *core = bus->sdio_core;
u32 local_hostintmask;
u8 saveclk;
int err;
brcmf_dbg(TRACE, "Enter\n");
@ -2429,13 +2418,15 @@ static void brcmf_sdio_bus_stop(struct device *dev)
}
if (sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
/* Enable clock for device interrupts */
brcmf_sdio_bus_sleep(bus, false, false);
/* Disable and clear interrupts at the chip level also */
w_sdreg32(bus, 0, SD_REG(hostintmask));
brcmf_sdiod_writel(sdiodev, core->base + SD_REG(hostintmask),
0, NULL);
local_hostintmask = bus->hostintmask;
bus->hostintmask = 0;
@ -2451,12 +2442,13 @@ static void brcmf_sdio_bus_stop(struct device *dev)
/* Turn off the bus (F2), free any pending packets */
brcmf_dbg(INTR, "disable SDIO interrupts\n");
sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
sdio_disable_func(sdiodev->func2);
/* Clear any pending interrupts now that F2 is disabled */
w_sdreg32(bus, local_hostintmask, SD_REG(intstatus));
brcmf_sdiod_writel(sdiodev, core->base + SD_REG(intstatus),
local_hostintmask, NULL);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
}
/* Clear the data packet queues */
brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
@ -2494,12 +2486,12 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
struct brcmf_core *buscore = bus->sdio_core;
struct brcmf_core *core = bus->sdio_core;
u32 addr;
unsigned long val;
int ret;
addr = buscore->base + SD_REG(intstatus);
addr = core->base + SD_REG(intstatus);
val = brcmf_sdiod_readl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
@ -2521,7 +2513,9 @@ static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
{
struct brcmf_sdio_dev *sdiod = bus->sdiodev;
u32 newstatus = 0;
u32 intstat_addr = bus->sdio_core->base + SD_REG(intstatus);
unsigned long intstatus;
uint txlimit = bus->txbound; /* Tx frames to send before resched */
uint framecnt; /* Temporary counter of tx/rx frames */
@ -2529,7 +2523,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
/* If waiting for HTAVAIL, check status */
if (!bus->sr_enabled && bus->clkstate == CLK_PENDING) {
@ -2576,9 +2570,10 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
*/
if (intstatus & I_HMB_FC_CHANGE) {
intstatus &= ~I_HMB_FC_CHANGE;
err = w_sdreg32(bus, I_HMB_FC_CHANGE, SD_REG(intstatus));
brcmf_sdiod_writel(sdiod, intstat_addr, I_HMB_FC_CHANGE, &err);
newstatus = brcmf_sdiod_readl(sdiod, intstat_addr, &err);
err = r_sdreg32(bus, &newstatus, SD_REG(intstatus));
bus->sdcnt.f1regdata += 2;
atomic_set(&bus->fcstate,
!!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)));
@ -2591,7 +2586,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
intstatus |= brcmf_sdio_hostmail(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
/* Generally don't ask for these, can get CRC errors... */
if (intstatus & I_WR_OOSYNC) {
@ -2634,7 +2629,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) &&
data_ok(bus)) {
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf,
bus->ctrl_frame_len);
@ -2642,7 +2637,7 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
wmb();
bus->ctrl_frame_stat = false;
}
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
brcmf_sdio_wait_event_wakeup(bus);
}
/* Send queued frames (limit 1 if rx may still be pending) */
@ -2658,14 +2653,14 @@ static void brcmf_sdio_dpc(struct brcmf_sdio *bus)
brcmf_err("failed backplane access over SDIO, halting operation\n");
atomic_set(&bus->intstatus, 0);
if (bus->ctrl_frame_stat) {
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
bus->ctrl_frame_err = -ENODEV;
wmb();
bus->ctrl_frame_stat = false;
brcmf_sdio_wait_event_wakeup(bus);
}
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
}
} else if (atomic_read(&bus->intstatus) ||
atomic_read(&bus->ipend) > 0 ||
@ -2880,13 +2875,13 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
CTL_DONE_TIMEOUT);
ret = 0;
if (bus->ctrl_frame_stat) {
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
if (bus->ctrl_frame_stat) {
brcmf_dbg(SDIO, "ctrl_frame timeout\n");
bus->ctrl_frame_stat = false;
ret = -ETIMEDOUT;
}
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
}
if (!ret) {
brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n",
@ -3010,7 +3005,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
return 0;
}
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
if (sh->assert_file_addr != 0) {
error = brcmf_sdiod_ramrw(bus->sdiodev, false,
sh->assert_file_addr, (u8 *)file, 80);
@ -3023,7 +3018,7 @@ static int brcmf_sdio_assert_info(struct seq_file *seq, struct brcmf_sdio *bus,
if (error < 0)
return error;
}
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
seq_printf(seq, "dongle assert: %s:%d: assert(%s)\n",
file, sh->assert_line, expr);
@ -3297,7 +3292,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
int bcmerror;
u32 rstvec;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
rstvec = get_unaligned_le32(fw->data);
@ -3326,7 +3321,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
err:
brcmf_sdio_clkctl(bus, CLK_SDONLY, false);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
return bcmerror;
}
@ -3441,7 +3436,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
if (sdiodev->sg_support) {
bus->txglom = false;
value = 1;
pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
pad_size = bus->sdiodev->func2->cur_blksize << 1;
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
@ -3483,7 +3478,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
address = bus->ci->rambase;
offset = err = 0;
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
while (offset < mem_size) {
len = ((offset + MEMBLOCK) < mem_size) ? MEMBLOCK :
mem_size - offset;
@ -3499,7 +3494,7 @@ static int brcmf_sdio_bus_get_memdump(struct device *dev, void *data,
}
done:
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
return err;
}
@ -3556,11 +3551,10 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
if (!bus->dpc_triggered) {
u8 devpend;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
devpend = brcmf_sdiod_func0_rb(bus->sdiodev,
SDIO_CCCR_INTx,
NULL);
sdio_release_host(bus->sdiodev->func[1]);
SDIO_CCCR_INTx, NULL);
sdio_release_host(bus->sdiodev->func1);
intstatus = devpend & (INTR_STATUS_FUNC1 |
INTR_STATUS_FUNC2);
}
@ -3586,13 +3580,13 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->console.count += jiffies_to_msecs(BRCMF_WD_POLL);
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
/* Make sure backplane clock is on */
brcmf_sdio_bus_sleep(bus, false, false);
if (brcmf_sdio_readconsole(bus) < 0)
/* stop on error */
bus->console_interval = 0;
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
}
}
#endif /* DEBUG */
@ -3605,11 +3599,11 @@ static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus)
bus->idlecount++;
if (bus->idlecount > bus->idletime) {
brcmf_dbg(SDIO, "idle\n");
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_wd_timer(bus, false);
bus->idlecount = 0;
brcmf_sdio_bus_sleep(bus, true, false);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
}
} else {
bus->idlecount = 0;
@ -3777,15 +3771,24 @@ static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
u32 val, rev;
val = brcmf_sdiod_readl(sdiodev, addr, NULL);
if ((sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 ||
sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4339) &&
addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
/*
* this is a bit of special handling if reading the chipcommon chipid
* register. The 4339 is a next-gen of the 4335. It uses the same
* SDIO device id as 4335 and the chipid register returns 4335 as well.
* It can be identified as 4339 by looking at the chip revision. It
* is corrected here so the chip.c module has the right info.
*/
if (addr == CORE_CC_REG(SI_ENUM_BASE, chipid) &&
(sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4339 ||
sdiodev->func1->device == SDIO_DEVICE_ID_BROADCOM_4335_4339)) {
rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
if (rev >= 2) {
val &= ~CID_ID_MASK;
val |= BRCM_CC_4339_CHIP_ID;
}
}
return val;
}
@ -3814,7 +3817,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
u32 drivestrength;
sdiodev = bus->sdiodev;
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
pr_debug("F1 signature read @0x18000000=0x%4x\n",
brcmf_sdiod_readl(sdiodev, SI_ENUM_BASE, NULL));
@ -3848,6 +3851,11 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
if (!bus->sdio_core)
goto fail;
/* Pick up the CHIPCOMMON core info struct, for bulk IO in bcmsdh.c */
sdiodev->cc_core = brcmf_chip_get_core(bus->ci, BCMA_CORE_CHIPCOMMON);
if (!sdiodev->cc_core)
goto fail;
sdiodev->settings = brcmf_get_module_param(sdiodev->dev,
BRCMF_BUSTYPE_SDIO,
bus->ci->chip,
@ -3876,8 +3884,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
/* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ
* is true or when platform data OOB irq is true).
*/
if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
if ((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func1) & MMC_PM_WAKE_SDIO_IRQ) ||
(sdiodev->settings->bus.sdio.oob_irq_supported)))
sdiodev->bus_if->wowl_supported = true;
#endif
@ -3916,7 +3924,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
if (err)
goto fail;
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@ -3937,7 +3945,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
return true;
fail:
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
return false;
}
@ -4017,22 +4025,21 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
const struct firmware *code,
void *nvram, u32 nvram_len)
{
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
struct brcmf_sdio *bus;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
struct brcmf_sdio_dev *sdiod = bus->sdiodev;
struct brcmf_core *core = bus->sdio_core;
u8 saveclk;
brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err);
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
if (err)
goto fail;
if (!bus_if->drvr)
return;
bus = sdiodev->bus;
/* try to download image and nvram to the dongle */
bus->alp_only = true;
err = brcmf_sdio_download_firmware(bus, code, nvram, nvram_len);
@ -4044,7 +4051,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
bus->sdcnt.tickcnt = 0;
brcmf_sdio_wd_timer(bus, true);
sdio_claim_host(sdiodev->func[1]);
sdio_claim_host(sdiodev->func1);
/* Make sure backplane clock is on, needed to generate F2 interrupt */
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
@ -4063,10 +4070,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
}
/* Enable function 2 (frame transfers) */
w_sdreg32(bus, SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT,
SD_REG(tosbmailboxdata));
err = sdio_enable_func(sdiodev->func[SDIO_FUNC_2]);
brcmf_sdiod_writel(sdiod, core->base + SD_REG(tosbmailboxdata),
SDPCM_PROT_VERSION << SMB_DATA_VERSION_SHIFT, NULL);
err = sdio_enable_func(sdiodev->func2);
brcmf_dbg(INFO, "enable F2: err=%d\n", err);
@ -4074,12 +4081,14 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
if (!err) {
/* Set up the interrupt mask and enable interrupts */
bus->hostintmask = HOSTINTMASK;
w_sdreg32(bus, bus->hostintmask, SD_REG(hostintmask));
brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask),
bus->hostintmask, NULL);
brcmf_sdiod_writeb(sdiodev, SBSDIO_WATERMARK, 8, &err);
} else {
/* Disable F2 again */
sdio_disable_func(sdiodev->func[SDIO_FUNC_2]);
sdio_disable_func(sdiodev->func2);
goto release;
}
@ -4104,7 +4113,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
if (err != 0)
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
err = brcmf_bus_started(dev);
if (err != 0) {
@ -4114,10 +4123,10 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
return;
release:
sdio_release_host(sdiodev->func[1]);
sdio_release_host(sdiodev->func1);
fail:
brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
device_release_driver(&sdiodev->func[2]->dev);
device_release_driver(&sdiodev->func2->dev);
device_release_driver(dev);
}
@ -4144,7 +4153,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
/* single-threaded workqueue */
wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM,
dev_name(&sdiodev->func[1]->dev));
dev_name(&sdiodev->func1->dev));
if (!wq) {
brcmf_err("insufficient memory to create txworkqueue\n");
goto fail;
@ -4170,7 +4179,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
bus, "brcmf_wdog/%s",
dev_name(&sdiodev->func[1]->dev));
dev_name(&sdiodev->func1->dev));
if (IS_ERR(bus->watchdog_tsk)) {
pr_warn("brcmf_watchdog thread failed to start\n");
bus->watchdog_tsk = NULL;
@ -4196,7 +4205,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
/* Query the F2 block size, set roundup accordingly */
bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
bus->blocksize = bus->sdiodev->func2->cur_blksize;
bus->roundup = min(max_roundup, bus->blocksize);
/* Allocate buffers */
@ -4212,17 +4221,17 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
}
}
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
/* Disable F2 to clear any intermediate frame state on the dongle */
sdio_disable_func(bus->sdiodev->func[SDIO_FUNC_2]);
sdio_disable_func(bus->sdiodev->func2);
bus->rxflow = false;
/* Done with backplane-dependent accesses, can drop clock... */
brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
/* ...and initialize clock/power states */
bus->clkstate = CLK_SDONLY;
@ -4274,7 +4283,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
if (bus->ci) {
if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) {
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
brcmf_sdio_wd_timer(bus, false);
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Leave the device in state where it is
@ -4284,7 +4293,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
msleep(20);
brcmf_chip_set_passive(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
}
brcmf_chip_detach(bus->ci);
}
@ -4331,9 +4340,9 @@ int brcmf_sdio_sleep(struct brcmf_sdio *bus, bool sleep)
{
int ret;
sdio_claim_host(bus->sdiodev->func[1]);
sdio_claim_host(bus->sdiodev->func1);
ret = brcmf_sdio_bus_sleep(bus, sleep, false);
sdio_release_host(bus->sdiodev->func[1]);
sdio_release_host(bus->sdiodev->func1);
return ret;
}

View File

@ -21,10 +21,6 @@
#include <linux/firmware.h>
#include "firmware.h"
#define SDIO_FUNC_0 0
#define SDIO_FUNC_1 1
#define SDIO_FUNC_2 2
#define SDIOD_FBR_SIZE 0x100
/* io_en */
@ -39,16 +35,10 @@
#define INTR_STATUS_FUNC1 0x2
#define INTR_STATUS_FUNC2 0x4
/* Maximum number of I/O funcs */
#define SDIOD_MAX_IOFUNCS 7
/* mask of register map */
#define REG_F0_REG_MASK 0x7FF
#define REG_F1_MISC_MASK 0x1FFFF
/* as of sdiod rev 0, supports 3 functions */
#define SBSDIO_NUM_FUNCTION 3
/* function 0 vendor specific CCCR registers */
#define SDIO_CCCR_BRCM_CARDCAP 0xf0
@ -56,6 +46,11 @@
#define SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT BIT(2)
#define SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC BIT(3)
/* Interrupt enable bits for each function */
#define SDIO_CCCR_IEN_FUNC0 BIT(0)
#define SDIO_CCCR_IEN_FUNC1 BIT(1)
#define SDIO_CCCR_IEN_FUNC2 BIT(2)
#define SDIO_CCCR_BRCM_CARDCTRL 0xf1
#define SDIO_CCCR_BRCM_CARDCTRL_WLANRESET BIT(1)
@ -175,9 +170,10 @@ struct brcmf_sdio;
struct brcmf_sdiod_freezer;
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
struct sdio_func *func1;
struct sdio_func *func2;
u32 sbwad; /* Save backplane window address */
struct brcmf_core *cc_core; /* chipcommon core info struct */
struct brcmf_sdio *bus;
struct device *dev;
struct brcmf_bus *bus_if;
@ -296,17 +292,17 @@ void brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
/* SDIO device register access interface */
/* Accessors for SDIO Function 0 */
#define brcmf_sdiod_func0_rb(sdiodev, addr, r) \
sdio_readb((sdiodev)->func[0], (addr), (r))
sdio_f0_readb((sdiodev)->func1, (addr), (r))
#define brcmf_sdiod_func0_wb(sdiodev, addr, v, ret) \
sdio_writeb((sdiodev)->func[0], (v), (addr), (ret))
sdio_f0_writeb((sdiodev)->func1, (v), (addr), (ret))
/* Accessors for SDIO Function 1 */
#define brcmf_sdiod_readb(sdiodev, addr, r) \
sdio_readb((sdiodev)->func[1], (addr), (r))
sdio_readb((sdiodev)->func1, (addr), (r))
#define brcmf_sdiod_writeb(sdiodev, addr, v, ret) \
sdio_writeb((sdiodev)->func[1], (v), (addr), (ret))
sdio_writeb((sdiodev)->func1, (v), (addr), (ret))
u32 brcmf_sdiod_readl(struct brcmf_sdio_dev *sdiodev, u32 addr, int *ret);
void brcmf_sdiod_writel(struct brcmf_sdio_dev *sdiodev, u32 addr, u32 data,
@ -350,7 +346,8 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
u8 *data, uint size);
/* Issue an abort to the specified function */
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, u8 fn);
int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, struct sdio_func *func);
void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
enum brcmf_sdiod_state state);

View File

@ -15,6 +15,7 @@ iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o fw/nvm.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
iwlwifi-objs += $(iwlwifi-m)

View File

@ -250,10 +250,12 @@ struct iwl_mfu_assert_dump_notif {
* The ids for different type of markers to insert into the usniffer logs
*
* @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
* @MARKER_ID_SYNC_CLOCK: sync FW time and systime
*/
enum iwl_mvm_marker_id {
MARKER_ID_TX_FRAME_LATENCY = 1,
}; /* MARKER_ID_API_E_VER_1 */
MARKER_ID_SYNC_CLOCK = 2,
}; /* MARKER_ID_API_E_VER_2 */
/**
* struct iwl_mvm_marker - mark info into the usniffer logs

View File

@ -67,6 +67,10 @@
* enum iwl_mac_conf_subcmd_ids - mac configuration command IDs
*/
enum iwl_mac_conf_subcmd_ids {
/**
* @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
*/
LOW_LATENCY_CMD = 0x3,
/**
* @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
*/
@ -82,4 +86,19 @@ struct iwl_channel_switch_noa_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
/**
* struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
*
* @mac_id: MAC ID to whom to apply the low-latency configurations
* @low_latency_rx: 1/0 to set/clear Rx low latency direction
* @low_latency_tx: 1/0 to set/clear Tx low latency direction
* @reserved: reserved for alignment purposes
*/
struct iwl_mac_low_latency_cmd {
__le32 mac_id;
u8 low_latency_rx;
u8 low_latency_tx;
__le16 reserved;
} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
#endif /* __iwl_fw_api_mac_cfg_h__ */

View File

@ -71,7 +71,7 @@
* @IWL_TLC_MNG_CFG_FLAGS_BF_MSK: enable BFER
* @IWL_TLC_MNG_CFG_FLAGS_DCM_MSK: enable DCM
*/
enum iwl_tlc_mng_cfg_flags_enum {
enum iwl_tlc_mng_cfg_flags {
IWL_TLC_MNG_CFG_FLAGS_CCK_MSK = BIT(0),
IWL_TLC_MNG_CFG_FLAGS_DD_MSK = BIT(1),
IWL_TLC_MNG_CFG_FLAGS_STBC_MSK = BIT(2),
@ -81,14 +81,14 @@ enum iwl_tlc_mng_cfg_flags_enum {
};
/**
* enum iwl_tlc_mng_cfg_cw_enum - channel width options
* enum iwl_tlc_mng_cfg_cw - channel width options
* @IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ: 20MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ: 40MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ: 80MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_160MHZ: 160MHZ channel
* @IWL_TLC_MNG_MAX_CH_WIDTH_LAST: maximum value
*/
enum iwl_tlc_mng_cfg_cw_enum {
enum iwl_tlc_mng_cfg_cw {
IWL_TLC_MNG_MAX_CH_WIDTH_20MHZ,
IWL_TLC_MNG_MAX_CH_WIDTH_40MHZ,
IWL_TLC_MNG_MAX_CH_WIDTH_80MHZ,
@ -97,25 +97,25 @@ enum iwl_tlc_mng_cfg_cw_enum {
};
/**
* enum iwl_tlc_mng_cfg_chains_enum - possible chains
* enum iwl_tlc_mng_cfg_chains - possible chains
* @IWL_TLC_MNG_CHAIN_A_MSK: chain A
* @IWL_TLC_MNG_CHAIN_B_MSK: chain B
* @IWL_TLC_MNG_CHAIN_C_MSK: chain C
*/
enum iwl_tlc_mng_cfg_chains_enum {
enum iwl_tlc_mng_cfg_chains {
IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
IWL_TLC_MNG_CHAIN_C_MSK = BIT(2),
};
/**
* enum iwl_tlc_mng_cfg_gi_enum - guard interval options
* enum iwl_tlc_mng_cfg_gi - guard interval options
* @IWL_TLC_MNG_SGI_20MHZ_MSK: enable short GI for 20MHZ
* @IWL_TLC_MNG_SGI_40MHZ_MSK: enable short GI for 40MHZ
* @IWL_TLC_MNG_SGI_80MHZ_MSK: enable short GI for 80MHZ
* @IWL_TLC_MNG_SGI_160MHZ_MSK: enable short GI for 160MHZ
*/
enum iwl_tlc_mng_cfg_gi_enum {
enum iwl_tlc_mng_cfg_gi {
IWL_TLC_MNG_SGI_20MHZ_MSK = BIT(0),
IWL_TLC_MNG_SGI_40MHZ_MSK = BIT(1),
IWL_TLC_MNG_SGI_80MHZ_MSK = BIT(2),
@ -123,7 +123,7 @@ enum iwl_tlc_mng_cfg_gi_enum {
};
/**
* enum iwl_tlc_mng_cfg_mode_enum - supported modes
* enum iwl_tlc_mng_cfg_mode - supported modes
* @IWL_TLC_MNG_MODE_CCK: enable CCK
* @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
* @IWL_TLC_MNG_MODE_NON_HT: enable non HT
@ -133,7 +133,7 @@ enum iwl_tlc_mng_cfg_gi_enum {
* @IWL_TLC_MNG_MODE_INVALID: invalid value
* @IWL_TLC_MNG_MODE_NUM: a count of possible modes
*/
enum iwl_tlc_mng_cfg_mode_enum {
enum iwl_tlc_mng_cfg_mode {
IWL_TLC_MNG_MODE_CCK = 0,
IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
@ -145,14 +145,14 @@ enum iwl_tlc_mng_cfg_mode_enum {
};
/**
* enum iwl_tlc_mng_vht_he_types_enum - VHT HE types
* enum iwl_tlc_mng_vht_he_types - VHT HE types
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU: VHT HT single user
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT: VHT HT single user extended
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU: VHT HT multiple users
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_TRIG_BASED: trigger based
* @IWL_TLC_MNG_VALID_VHT_HE_TYPES_NUM: a count of possible types
*/
enum iwl_tlc_mng_vht_he_types_enum {
enum iwl_tlc_mng_vht_he_types {
IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU = 0,
IWL_TLC_MNG_VALID_VHT_HE_TYPES_SU_EXT,
IWL_TLC_MNG_VALID_VHT_HE_TYPES_MU,
@ -163,7 +163,7 @@ enum iwl_tlc_mng_vht_he_types_enum {
};
/**
* enum iwl_tlc_mng_ht_rates_enum - HT/VHT rates
* enum iwl_tlc_mng_ht_rates - HT/VHT rates
* @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
* @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
* @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
@ -176,7 +176,7 @@ enum iwl_tlc_mng_vht_he_types_enum {
* @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
* @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
*/
enum iwl_tlc_mng_ht_rates_enum {
enum iwl_tlc_mng_ht_rates {
IWL_TLC_MNG_HT_RATE_MCS0 = 0,
IWL_TLC_MNG_HT_RATE_MCS1,
IWL_TLC_MNG_HT_RATE_MCS2,
@ -198,13 +198,13 @@ enum iwl_tlc_mng_ht_rates_enum {
* @sta_id: station id
* @reserved1: reserved
* @max_supp_ch_width: channel width
* @flags: bitmask of %IWL_TLC_MNG_CONFIG_FLAGS_ENABLE_\*
* @chains: bitmask of %IWL_TLC_MNG_CHAIN_\*
* @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
* @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
* @max_supp_ss: valid values are 0-3, 0 - spatial streams are not supported
* @valid_vht_he_types: bitmap of %IWL_TLC_MNG_VALID_VHT_HE_TYPES_\*
* @valid_vht_he_types: bitmap of &enum iwl_tlc_mng_vht_he_types
* @non_ht_supp_rates: bitmap of supported legacy rates
* @ht_supp_rates: bitmap of supported HT/VHT rates, valid bits are 0-9
* @mode: modulation type %IWL_TLC_MNG_MODE_\*
* @mode: &enum iwl_tlc_mng_cfg_mode
* @reserved2: reserved
* @he_supp_rates: bitmap of supported HE rates
* @sgi_ch_width_supp: bitmap of SGI support per channel width

View File

@ -0,0 +1,195 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "api/commands.h"
#include "debugfs.h"
#define FWRT_DEBUGFS_READ_FILE_OPS(name) \
static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt, \
char *buf, size_t count, \
loff_t *ppos); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt, \
char *buf, size_t count, \
loff_t *ppos); \
static ssize_t _iwl_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct iwl_fw_runtime *fwrt = file->private_data; \
char buf[buflen] = {}; \
size_t buf_size = min(count, sizeof(buf) - 1); \
\
if (copy_from_user(buf, user_buf, buf_size)) \
return -EFAULT; \
\
return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos); \
}
#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen) \
FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen) \
FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen) \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = _iwl_dbgfs_##name##_write, \
.open = simple_open, \
.llseek = generic_file_llseek, \
}
#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
if (!debugfs_create_file(alias, mode, parent, fwrt, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
{
struct iwl_mvm_marker marker = {
.dw_len = sizeof(struct iwl_mvm_marker) / 4,
.marker_id = MARKER_ID_SYNC_CLOCK,
/* the real timestamp is taken from the ftrace clock
* this is for finding the match between fw and kernel logs
*/
.timestamp = cpu_to_le64(fwrt->timestamp.seq++),
};
struct iwl_host_cmd hcmd = {
.id = MARKER_CMD,
.flags = CMD_ASYNC,
.data[0] = &marker,
.len[0] = sizeof(marker),
};
return iwl_trans_send_cmd(fwrt->trans, &hcmd);
}
static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
{
int ret;
struct iwl_fw_runtime *fwrt =
container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
unsigned long delay = fwrt->timestamp.delay;
ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
if (!ret && delay)
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(delay));
else
IWL_INFO(fwrt,
"stopping timestamp_marker, ret: %d, delay: %u\n",
ret, jiffies_to_msecs(delay) / 1000);
}
static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
char *buf, size_t count,
loff_t *ppos)
{
int ret;
u32 delay;
ret = kstrtou32(buf, 10, &delay);
if (ret < 0)
return ret;
IWL_INFO(fwrt,
"starting timestamp_marker trigger with delay: %us\n",
delay);
iwl_fw_cancel_timestamp(fwrt);
fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
schedule_delayed_work(&fwrt->timestamp.wk,
round_jiffies_relative(fwrt->timestamp.delay));
return count;
}
FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, S_IWUSR);
return 0;
err:
IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
return -ENOMEM;
}

View File

@ -0,0 +1,87 @@
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "runtime.h"
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir);
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
{
fwrt->timestamp.delay = 0;
cancel_delayed_work_sync(&fwrt->timestamp.wk);
}
#else
static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
struct dentry *dbgfs_dir)
{
return 0;
}
static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
#endif /* CONFIG_IWLWIFI_DEBUGFS */

View File

@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@ -34,7 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -248,6 +248,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
* @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
* @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
* indicating low latency direction.
* @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is
* deprecated.
*
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
@ -266,6 +268,7 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY = (__force iwl_ucode_tlv_api_t)38,
IWL_UCODE_TLV_API_DEPRECATE_TTAK = (__force iwl_ucode_tlv_api_t)41,
NUM_IWL_UCODE_TLV_API
#ifdef __CHECKER__
@ -311,6 +314,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
* @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
* @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
* @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@ -366,6 +370,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_CDB_SUPPORT = (__force iwl_ucode_tlv_capa_t)40,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST = (__force iwl_ucode_tlv_capa_t)41,
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD = (__force iwl_ucode_tlv_capa_t)43,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA = (__force iwl_ucode_tlv_capa_t)44,
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65,
IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67,
@ -540,7 +545,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
} __packed;
/**
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
* struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
* @monitor_mode: &enum iwl_fw_dbg_monitor_mode
@ -555,7 +560,7 @@ struct iwl_fw_dbg_mem_seg_tlv {
*
* This parses IWL_UCODE_TLV_FW_DBG_DEST
*/
struct iwl_fw_dbg_dest_tlv {
struct iwl_fw_dbg_dest_tlv_v1 {
u8 version;
u8 monitor_mode;
u8 size_power;
@ -569,6 +574,26 @@ struct iwl_fw_dbg_dest_tlv {
struct iwl_fw_dbg_reg_op reg_ops[0];
} __packed;
/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
#define IWL_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
#define IWL_LDBG_M2S_BUF_BA_MSK 0x00000fff
/* The smem buffer chunks are in units of 256 bits */
#define IWL_M2S_UNIT_SIZE 0x100
struct iwl_fw_dbg_dest_tlv {
u8 version;
u8 monitor_mode;
u8 size_power;
u8 reserved;
__le32 cfg_reg;
__le32 write_ptr_reg;
__le32 wrap_count;
u8 base_shift;
u8 size_shift;
struct iwl_fw_dbg_reg_op reg_ops[0];
} __packed;
struct iwl_fw_dbg_conf_hcmd {
u8 id;
u8 reserved;

View File

@ -284,7 +284,7 @@ struct iwl_fw {
struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];

View File

@ -58,10 +58,12 @@
#include "iwl-drv.h"
#include "runtime.h"
#include "dbg.h"
#include "debugfs.h"
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx)
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
struct dentry *dbgfs_dir)
{
memset(fwrt, 0, sizeof(*fwrt));
fwrt->trans = trans;
@ -71,5 +73,12 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops = ops;
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt)
{
iwl_fw_cancel_timestamp(fwrt);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit);

View File

@ -134,11 +134,21 @@ struct iwl_fw_runtime {
/* ts of the beginning of a non-collect fw dbg data period */
unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
} dump;
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct {
struct delayed_work wk;
u32 delay;
u64 seq;
} timestamp;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx);
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
struct dentry *dbgfs_dir);
void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
enum iwl_ucode_type cur_fw_img)

View File

@ -95,7 +95,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
TP_STRUCT__entry(
DEV_ENTRY
__field(void *, skbaddr)
__field(size_t, framelen)
__dynamic_array(u8, tfd, tfdlen)
@ -110,6 +110,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
),
TP_fast_assign(
DEV_ASSIGN;
__entry->skbaddr = skb;
__entry->framelen = buf0_len;
if (hdr_len > 0)
__entry->framelen += skb->len - hdr_len;
@ -120,9 +121,9 @@ TRACE_EVENT(iwlwifi_dev_tx,
__get_dynamic_array(buf1),
skb->len - hdr_len);
),
TP_printk("[%s] TX %.2x (%zu bytes)",
TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p",
__get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
__entry->framelen)
__entry->framelen, __entry->skbaddr)
);
TRACE_EVENT(iwlwifi_dev_ucode_error,

View File

@ -296,7 +296,12 @@ struct iwl_firmware_pieces {
u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
/* FW debug data parsed for driver usage */
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
bool dbg_dest_tlv_init;
u8 *dbg_dest_ver;
union {
struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
};
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
@ -930,21 +935,49 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
struct iwl_fw_dbg_dest_tlv *dest = (void *)tlv_data;
struct iwl_fw_dbg_dest_tlv *dest = NULL;
struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
u8 mon_mode;
if (pieces->dbg_dest_tlv) {
pieces->dbg_dest_ver = (u8 *)tlv_data;
if (*pieces->dbg_dest_ver == 1) {
dest = (void *)tlv_data;
} else if (*pieces->dbg_dest_ver == 0) {
dest_v1 = (void *)tlv_data;
} else {
IWL_ERR(drv,
"The version is %d, and it is invalid\n",
*pieces->dbg_dest_ver);
break;
}
if (pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
"dbg destination ignored, already exists\n");
break;
}
pieces->dbg_dest_tlv = dest;
IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
pieces->dbg_dest_tlv_init = true;
if (dest_v1) {
pieces->dbg_dest_tlv_v1 = dest_v1;
mon_mode = dest_v1->monitor_mode;
} else {
pieces->dbg_dest_tlv = dest;
mon_mode = dest->monitor_mode;
}
IWL_INFO(drv, "Found debug destination: %s\n",
get_fw_dbg_mode_string(mon_mode));
drv->fw.dbg_dest_reg_num = (dest_v1) ?
tlv_len -
offsetof(struct iwl_fw_dbg_dest_tlv_v1,
reg_ops) :
tlv_len -
offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops);
drv->fw.dbg_dest_reg_num =
tlv_len - offsetof(struct iwl_fw_dbg_dest_tlv,
reg_ops);
drv->fw.dbg_dest_reg_num /=
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
@ -953,7 +986,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_FW_DBG_CONF: {
struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
if (!pieces->dbg_dest_tlv) {
if (!pieces->dbg_dest_tlv_init) {
IWL_ERR(drv,
"Ignore dbg config %d - no destination configured\n",
conf->id);
@ -1340,15 +1373,51 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
if (iwl_alloc_ucode(drv, pieces, i))
goto out_free_fw;
if (pieces->dbg_dest_tlv) {
drv->fw.dbg_dest_tlv =
kmemdup(pieces->dbg_dest_tlv,
sizeof(*pieces->dbg_dest_tlv) +
sizeof(pieces->dbg_dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num, GFP_KERNEL);
if (pieces->dbg_dest_tlv_init) {
size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num;
drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
if (!drv->fw.dbg_dest_tlv)
goto out_free_fw;
if (*pieces->dbg_dest_ver == 0) {
memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
dbg_dest_size);
} else {
struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
drv->fw.dbg_dest_tlv;
dest_tlv->version = pieces->dbg_dest_tlv->version;
dest_tlv->monitor_mode =
pieces->dbg_dest_tlv->monitor_mode;
dest_tlv->size_power =
pieces->dbg_dest_tlv->size_power;
dest_tlv->wrap_count =
pieces->dbg_dest_tlv->wrap_count;
dest_tlv->write_ptr_reg =
pieces->dbg_dest_tlv->write_ptr_reg;
dest_tlv->base_shift =
pieces->dbg_dest_tlv->base_shift;
memcpy(dest_tlv->reg_ops,
pieces->dbg_dest_tlv->reg_ops,
sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
drv->fw.dbg_dest_reg_num);
/* In version 1 of the destination tlv, which is
* relevant for internal buffer exclusively,
* the base address is part of given with the length
* of the buffer, and the size shift is give instead of
* end shift. We now store these values in base_reg,
* and end shift, and when dumping the data we'll
* manipulate it for extracting both the length and
* base address */
dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
dest_tlv->end_shift =
pieces->dbg_dest_tlv->size_shift;
}
}
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {

View File

@ -579,6 +579,7 @@ struct iwl_trans_ops {
void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state);
void (*sw_reset)(struct iwl_trans *trans);
bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
void (*release_nic_access)(struct iwl_trans *trans,
unsigned long *flags);
@ -744,7 +745,7 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u8 dbg_dest_reg_num;
@ -1124,6 +1125,12 @@ static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
trans->ops->set_pmi(trans, state);
}
static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
{
if (trans->ops->sw_reset)
trans->ops->sw_reset(trans);
}
static inline void
iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
{

View File

@ -1221,7 +1221,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
loff_t *ppos)
{
struct iwl_trans *trans = mvm->trans;
const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
struct iwl_continuous_record_cmd cont_rec = {};
int ret, rec_mode;
@ -1914,7 +1914,7 @@ void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
if (iwl_mvm_has_tlc_offload(mvm))
MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, S_IRUSR);
return;

View File

@ -421,7 +421,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
if (iwl_mvm_has_tlc_offload(mvm)) {
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
}
@ -460,7 +460,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* this is the case for CCK frames, it's better (only 8) for OFDM */
hw->radiotap_timestamp.accuracy = 22;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
if (!iwl_mvm_has_tlc_offload(mvm))
hw->rate_control_algorithm = RS_NAME;
hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
@ -3801,7 +3801,7 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
mvm->noa_duration = noa_duration;
mvm->noa_vif = vif;
return iwl_mvm_update_quotas(mvm, false, NULL);
return iwl_mvm_update_quotas(mvm, true, NULL);
case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
/* must be associated client vif - ignore authorized */
if (!vif || vif->type != NL80211_IFTYPE_STATION ||

View File

@ -1278,6 +1278,12 @@ static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
}
static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
}
static inline struct agg_tx_status *
iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
{

View File

@ -602,7 +602,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm);
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
dbgfs_dir);
mvm->init_status = 0;
@ -801,6 +802,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_leds_exit(mvm);
iwl_mvm_thermal_exit(mvm);
out_free:
iwl_fw_runtime_exit(&mvm->fwrt);
iwl_fw_flush_dump(&mvm->fwrt);
if (iwlmvm_mod_params.init_dbg)
@ -841,7 +843,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
kfree(mvm->d3_resume_sram);
#endif
iwl_fw_runtime_exit(&mvm->fwrt);
iwl_trans_op_mode_leave(mvm->trans);
iwl_phy_db_free(mvm->phy_db);

View File

@ -202,6 +202,10 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
return 0;
/* update all upon completion */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;

View File

@ -4052,7 +4052,7 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool init)
{
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
if (iwl_mvm_has_tlc_offload(mvm))
rs_fw_rate_init(mvm, sta, band);
else
rs_drv_rate_init(mvm, sta, band, init);
@ -4096,7 +4096,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool enable)
{
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
if (iwl_mvm_has_tlc_offload(mvm))
return rs_fw_tx_protection(mvm, mvmsta, enable);
else
return rs_drv_tx_protection(mvm, mvmsta, enable);

View File

@ -222,7 +222,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
/* Don't drop the frame and decrypt it in SW */
if (!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
!(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;
/* fall through if TTAK OK */

View File

@ -261,7 +261,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return 0;
case IWL_RX_MPDU_STATUS_SEC_TKIP:
/* Don't drop the frame and decrypt it in SW */
if (!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
!(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
return 0;
*crypt_len = IEEE80211_TKIP_IV_LEN;

View File

@ -1443,7 +1443,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
* if rs is registered with mac80211, then "add station" will be handled
* via the corresponding ops, otherwise need to notify rate scaling here
*/
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD))
if (iwl_mvm_has_tlc_offload(mvm))
iwl_mvm_rs_add_sta(mvm, mvm_sta);
update_fw:
@ -2586,8 +2586,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* When FW supports TLC_OFFLOAD, it also implements Tx aggregation
* manager, so this function should never be called in this case.
*/
if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
return -EINVAL;
BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)

View File

@ -888,10 +888,9 @@ static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
/*
* The first deferred frame should've stopped the MAC queues, so we
* should never get a second deferred frame for the RA/TID.
* In case of GSO the first packet may have been split, so don't warn.
*/
if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
"RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
skb_queue_len(deferred_tx_frames))) {
if (skb_queue_len(deferred_tx_frames) == 1) {
iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
schedule_work(&mvm->add_stream_wk);
}
@ -1719,8 +1718,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
ba_info->band = chanctx_conf->def.chan->band;
iwl_mvm_hwrate_to_tx_status(rate, ba_info);
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
if (!iwl_mvm_has_tlc_offload(mvm)) {
IWL_DEBUG_TX_REPLY(mvm,
"No reclaim. Update rs directly\n");
iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);

View File

@ -516,8 +516,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
IWL_ERR(trans, "HW error, resetting before reading\n");
/* reset the device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
iwl_trans_sw_reset(trans);
/* set INIT_DONE flag */
iwl_set_bit(trans, CSR_GP_CNTRL,
@ -913,8 +912,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
};
if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
iwl_mvm_has_tlc_offload(mvm)))
return -EINVAL;
return iwl_mvm_send_cmd(mvm, &cmd);
@ -1033,12 +1031,34 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int res;
bool low_latency;
lockdep_assert_held(&mvm->mutex);
if (iwl_mvm_vif_low_latency(mvmvif) == prev)
low_latency = iwl_mvm_vif_low_latency(mvmvif);
if (low_latency == prev)
return 0;
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
struct iwl_mac_low_latency_cmd cmd = {
.mac_id = cpu_to_le32(mvmvif->id)
};
if (low_latency) {
/* currently we don't care about the direction */
cmd.low_latency_rx = 1;
cmd.low_latency_tx = 1;
}
res = iwl_mvm_send_cmd_pdu(mvm,
iwl_cmd_id(LOW_LATENCY_CMD,
MAC_CONF_GROUP, 0),
0, sizeof(cmd), &cmd);
if (res)
IWL_ERR(mvm, "Failed to send low latency command\n");
}
res = iwl_mvm_update_quotas(mvm, false, NULL);
if (res)
return res;

View File

@ -658,13 +658,6 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
static inline void iwl_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);

View File

@ -137,7 +137,7 @@ static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
iwl_pcie_sw_reset(trans);
iwl_trans_sw_reset(trans);
/*
* Clear "initialization complete" bit to move adapter from
@ -192,7 +192,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
iwl_pcie_sw_reset(trans);
iwl_trans_sw_reset(trans);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't

View File

@ -176,6 +176,13 @@ out:
kfree(buf);
}
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@ -446,7 +453,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
iwl_pcie_sw_reset(trans);
iwl_trans_pcie_sw_reset(trans);
/*
* Set "initialization complete" bit to move adapter from
@ -487,7 +494,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
iwl_pcie_sw_reset(trans);
iwl_trans_pcie_sw_reset(trans);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@ -580,7 +587,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
return;
}
iwl_pcie_sw_reset(trans);
iwl_trans_pcie_sw_reset(trans);
/*
* Clear "initialization complete" bit to move adapter from
@ -915,14 +922,9 @@ static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
void iwl_pcie_apply_destination(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
int i;
if (dest->version)
IWL_ERR(trans,
"DBG DEST version is %d - expect issues\n",
dest->version);
IWL_INFO(trans, "Applying debug destination %s\n",
get_fw_dbg_mode_string(dest->monitor_mode));
@ -1270,7 +1272,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
iwl_pcie_sw_reset(trans);
iwl_trans_pcie_sw_reset(trans);
/*
* Upon stop, the IVAR table gets erased, so msi-x won't
@ -1744,7 +1746,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
return err;
}
iwl_pcie_sw_reset(trans);
iwl_trans_pcie_sw_reset(trans);
err = iwl_pcie_apm_init(trans);
if (err)
@ -2816,8 +2818,17 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
* Update pointers to reflect actual values after
* shifting
*/
base = iwl_read_prph(trans, base) <<
trans->dbg_dest_tlv->base_shift;
if (trans->dbg_dest_tlv->version) {
base = (iwl_read_prph(trans, base) &
IWL_LDBG_M2S_BUF_BA_MSK) <<
trans->dbg_dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
base += trans->cfg->smem_offset;
} else {
base = iwl_read_prph(trans, base) <<
trans->dbg_dest_tlv->base_shift;
}
iwl_trans_read_mem(trans, base, fw_mon_data->data,
monitor_len / sizeof(u32));
} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
@ -2865,21 +2876,36 @@ static struct iwl_trans_dump_data
trans_pcie->fw_mon_size;
monitor_len = trans_pcie->fw_mon_size;
} else if (trans->dbg_dest_tlv) {
u32 base, end;
u32 base, end, cfg_reg;
base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
if (trans->dbg_dest_tlv->version == 1) {
cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
cfg_reg = iwl_read_prph(trans, cfg_reg);
base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
trans->dbg_dest_tlv->base_shift;
base *= IWL_M2S_UNIT_SIZE;
base += trans->cfg->smem_offset;
base = iwl_read_prph(trans, base) <<
trans->dbg_dest_tlv->base_shift;
end = iwl_read_prph(trans, end) <<
trans->dbg_dest_tlv->end_shift;
monitor_len =
(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
trans->dbg_dest_tlv->end_shift;
monitor_len *= IWL_M2S_UNIT_SIZE;
} else {
base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
/* Make "end" point to the actual end */
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000 ||
trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
base = iwl_read_prph(trans, base) <<
trans->dbg_dest_tlv->base_shift;
end = iwl_read_prph(trans, end) <<
trans->dbg_dest_tlv->end_shift;
/* Make "end" point to the actual end */
if (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_8000 ||
trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
end += (1 << trans->dbg_dest_tlv->end_shift);
monitor_len = end - base;
}
len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
monitor_len;
} else {
@ -3025,6 +3051,7 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.write_mem = iwl_trans_pcie_write_mem, \
.configure = iwl_trans_pcie_configure, \
.set_pmi = iwl_trans_pcie_set_pmi, \
.sw_reset = iwl_trans_pcie_sw_reset, \
.grab_nic_access = iwl_trans_pcie_grab_nic_access, \
.release_nic_access = iwl_trans_pcie_release_nic_access, \
.set_bits_mask = iwl_trans_pcie_set_bits_mask, \

View File

@ -290,13 +290,16 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
adapter->dbg.last_cmd_act[adapter->dbg.last_cmd_index] =
get_unaligned_le16((u8 *)host_cmd + S_DS_GEN);
/* Setup the timer after transmit command, except that specific
* command might not have command response.
*/
if (cmd_code != HostCmd_CMD_FW_DUMP_EVENT)
mod_timer(&adapter->cmd_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
/* Clear BSS_NO_BITS from HostCmd */
cmd_code &= HostCmd_CMD_ID_MASK;
/* Setup the timer after transmit command */
mod_timer(&adapter->cmd_timer,
jiffies + msecs_to_jiffies(MWIFIEX_TIMER_10S));
return 0;
}

View File

@ -168,10 +168,15 @@ mwifiex_device_dump_read(struct file *file, char __user *ubuf,
{
struct mwifiex_private *priv = file->private_data;
if (!priv->adapter->if_ops.device_dump)
return -EIO;
priv->adapter->if_ops.device_dump(priv->adapter);
/* For command timeouts, USB firmware will automatically emit
* firmware dump events, so we don't implement device_dump().
* For user-initiated dumps, we trigger it ourselves.
*/
if (priv->adapter->iface_type == MWIFIEX_USB)
mwifiex_send_cmd(priv, HostCmd_CMD_FW_DUMP_EVENT,
HostCmd_ACT_GEN_SET, 0, NULL, true);
else
priv->adapter->if_ops.device_dump(priv->adapter);
return 0;
}

View File

@ -56,6 +56,15 @@ struct mwifiex_fw_data {
u8 data[1];
} __packed;
struct mwifiex_fw_dump_header {
__le16 seq_num;
__le16 reserved;
__le16 type;
__le16 len;
} __packed;
#define FW_DUMP_INFO_ENDED 0x0002
#define MWIFIEX_FW_DNLD_CMD_1 0x1
#define MWIFIEX_FW_DNLD_CMD_5 0x5
#define MWIFIEX_FW_DNLD_CMD_6 0x6
@ -400,6 +409,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_TDLS_CONFIG 0x0100
#define HostCmd_CMD_MC_POLICY 0x0121
#define HostCmd_CMD_TDLS_OPER 0x0122
#define HostCmd_CMD_FW_DUMP_EVENT 0x0125
#define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223
#define HostCmd_CMD_CHAN_REGION_CFG 0x0242
#define HostCmd_CMD_PACKET_AGGR_CTRL 0x0251
@ -570,6 +580,7 @@ enum mwifiex_channel_flags {
#define EVENT_BG_SCAN_STOPPED 0x00000065
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_MULTI_CHAN_INFO 0x0000006a
#define EVENT_FW_DUMP_INFO 0x00000073
#define EVENT_TX_STATUS_REPORT 0x00000074
#define EVENT_BT_COEX_WLAN_PARA_CHANGE 0X00000076

View File

@ -64,6 +64,13 @@ static void wakeup_timer_fn(struct timer_list *t)
adapter->if_ops.card_reset(adapter);
}
static void fw_dump_timer_fn(struct timer_list *t)
{
struct mwifiex_adapter *adapter = from_timer(adapter, t, devdump_timer);
mwifiex_upload_device_dump(adapter);
}
/*
* This function initializes the private structure and sets default
* values to the members.
@ -314,6 +321,8 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->iface_limit.p2p_intf = MWIFIEX_MAX_P2P_NUM;
adapter->active_scan_triggered = false;
timer_setup(&adapter->wakeup_timer, wakeup_timer_fn, 0);
adapter->devdump_len = 0;
timer_setup(&adapter->devdump_timer, fw_dump_timer_fn, 0);
}
/*
@ -396,6 +405,7 @@ static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
del_timer(&adapter->wakeup_timer);
del_timer_sync(&adapter->devdump_timer);
mwifiex_cancel_all_pending_cmd(adapter);
wake_up_interruptible(&adapter->cmd_wait_q.wait);
wake_up_interruptible(&adapter->hs_activate_wait_q);

View File

@ -1051,9 +1051,30 @@ void mwifiex_multi_chan_resync(struct mwifiex_adapter *adapter)
}
EXPORT_SYMBOL_GPL(mwifiex_multi_chan_resync);
int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter)
{
void *p;
/* Dump all the memory data into single file, a userspace script will
* be used to split all the memory data to multiple files
*/
mwifiex_dbg(adapter, MSG,
"== mwifiex dump information to /sys/class/devcoredump start\n");
dev_coredumpv(adapter->dev, adapter->devdump_data, adapter->devdump_len,
GFP_KERNEL);
mwifiex_dbg(adapter, MSG,
"== mwifiex dump information to /sys/class/devcoredump end\n");
/* Device dump data will be freed in device coredump release function
* after 5 min. Here reset adapter->devdump_data and ->devdump_len
* to avoid it been accidentally reused.
*/
adapter->devdump_data = NULL;
adapter->devdump_len = 0;
}
EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
{
char *p;
char drv_version[64];
struct usb_card_rec *cardp;
struct sdio_mmc_card *sdio_card;
@ -1061,17 +1082,12 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
int i, idx;
struct netdev_queue *txq;
struct mwifiex_debug_info *debug_info;
void *drv_info_dump;
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump start===\n");
/* memory allocate here should be free in mwifiex_upload_device_dump*/
drv_info_dump = vzalloc(MWIFIEX_DRV_INFO_SIZE_MAX);
if (!drv_info_dump)
return 0;
p = (char *)(drv_info_dump);
p = adapter->devdump_data;
strcpy(p, "========Start dump driverinfo========\n");
p += strlen("========Start dump driverinfo========\n");
p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
mwifiex_drv_get_driver_version(adapter, drv_version,
@ -1155,21 +1171,18 @@ int mwifiex_drv_info_dump(struct mwifiex_adapter *adapter, void **drv_info)
kfree(debug_info);
}
strcpy(p, "\n========End dump========\n");
p += strlen("\n========End dump========\n");
mwifiex_dbg(adapter, MSG, "===mwifiex driverinfo dump end===\n");
*drv_info = drv_info_dump;
return p - drv_info_dump;
adapter->devdump_len = p - (char *)adapter->devdump_data;
}
EXPORT_SYMBOL_GPL(mwifiex_drv_info_dump);
void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
int drv_info_size)
void mwifiex_prepare_fw_dump_info(struct mwifiex_adapter *adapter)
{
u8 idx, *dump_data, *fw_dump_ptr;
u32 dump_len;
dump_len = (strlen("========Start dump driverinfo========\n") +
drv_info_size +
strlen("\n========End dump========\n"));
u8 idx;
char *fw_dump_ptr;
u32 dump_len = 0;
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
@ -1184,24 +1197,24 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
}
}
dump_data = vzalloc(dump_len + 1);
if (!dump_data)
goto done;
if (dump_len + 1 + adapter->devdump_len > MWIFIEX_FW_DUMP_SIZE) {
/* Realloc in case buffer overflow */
fw_dump_ptr = vzalloc(dump_len + 1 + adapter->devdump_len);
mwifiex_dbg(adapter, MSG, "Realloc device dump data.\n");
if (!fw_dump_ptr) {
vfree(adapter->devdump_data);
mwifiex_dbg(adapter, ERROR,
"vzalloc devdump data failure!\n");
return;
}
fw_dump_ptr = dump_data;
memmove(fw_dump_ptr, adapter->devdump_data,
adapter->devdump_len);
vfree(adapter->devdump_data);
adapter->devdump_data = fw_dump_ptr;
}
/* Dump all the memory data into single file, a userspace script will
* be used to split all the memory data to multiple files
*/
mwifiex_dbg(adapter, MSG,
"== mwifiex dump information to /sys/class/devcoredump start");
strcpy(fw_dump_ptr, "========Start dump driverinfo========\n");
fw_dump_ptr += strlen("========Start dump driverinfo========\n");
memcpy(fw_dump_ptr, drv_info, drv_info_size);
fw_dump_ptr += drv_info_size;
strcpy(fw_dump_ptr, "\n========End dump========\n");
fw_dump_ptr += strlen("\n========End dump========\n");
fw_dump_ptr = (char *)adapter->devdump_data + adapter->devdump_len;
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
@ -1225,14 +1238,8 @@ void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter, void *drv_info,
}
}
/* device dump data will be free in device coredump release function
* after 5 min
*/
dev_coredumpv(adapter->dev, dump_data, dump_len, GFP_KERNEL);
mwifiex_dbg(adapter, MSG,
"== mwifiex dump information to /sys/class/devcoredump end");
adapter->devdump_len = fw_dump_ptr - (char *)adapter->devdump_data;
done:
for (idx = 0; idx < adapter->num_mem_types; idx++) {
struct memory_type_mapping *entry =
&adapter->mem_type_mapping_tbl[idx];
@ -1241,10 +1248,8 @@ done:
entry->mem_ptr = NULL;
entry->mem_size = 0;
}
vfree(drv_info);
}
EXPORT_SYMBOL_GPL(mwifiex_upload_device_dump);
EXPORT_SYMBOL_GPL(mwifiex_prepare_fw_dump_info);
/*
* CFG802.11 network device handler for statistics retrieval.

Some files were not shown because too many files have changed in this diff Show More