Merge ath-next from git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
ath.git patches for v5.10. Major changes: ath11k * improvements to QCA6390 PCI support, adding essential missing features: ELF board files, packet log handling to avoid data stalls and crash fixes
This commit is contained in:
commit
70442ee62d
@ -187,6 +187,26 @@ const struct ce_attr ath11k_host_ce_config_qca6390[] = {
|
||||
|
||||
};
|
||||
|
||||
static bool ath11k_ce_need_shadow_fix(int ce_id)
|
||||
{
|
||||
/* only ce4 needs shadow workaroud*/
|
||||
if (ce_id == 4)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ab->hw_params.ce_count; i++)
|
||||
if (ath11k_ce_need_shadow_fix(i))
|
||||
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
|
||||
}
|
||||
|
||||
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
|
||||
struct sk_buff *skb, dma_addr_t paddr)
|
||||
{
|
||||
@ -505,6 +525,12 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
|
||||
|
||||
ce_ring->hal_ring_id = ret;
|
||||
|
||||
if (ab->hw_params.supports_shadow_regs &&
|
||||
ath11k_ce_need_shadow_fix(ce_id))
|
||||
ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
|
||||
ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
|
||||
ce_ring->hal_ring_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -677,6 +703,9 @@ int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
|
||||
|
||||
ath11k_hal_srng_access_end(ab, srng);
|
||||
|
||||
if (ath11k_ce_need_shadow_fix(pipe_id))
|
||||
ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
|
||||
|
||||
spin_unlock_bh(&srng->lock);
|
||||
|
||||
spin_unlock_bh(&ab->ce.ce_lock);
|
||||
@ -713,11 +742,56 @@ static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
|
||||
}
|
||||
}
|
||||
|
||||
static void ath11k_ce_shadow_config(struct ath11k_base *ab)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ab->hw_params.ce_count; i++) {
|
||||
if (ab->hw_params.host_ce_config[i].src_nentries)
|
||||
ath11k_hal_srng_update_shadow_config(ab,
|
||||
HAL_CE_SRC, i);
|
||||
|
||||
if (ab->hw_params.host_ce_config[i].dest_nentries) {
|
||||
ath11k_hal_srng_update_shadow_config(ab,
|
||||
HAL_CE_DST, i);
|
||||
|
||||
ath11k_hal_srng_update_shadow_config(ab,
|
||||
HAL_CE_DST_STATUS, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
|
||||
u32 **shadow_cfg, u32 *shadow_cfg_len)
|
||||
{
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
return;
|
||||
|
||||
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
|
||||
|
||||
/* shadow is already configured */
|
||||
if (*shadow_cfg_len)
|
||||
return;
|
||||
|
||||
/* shadow isn't configured yet, configure now.
|
||||
* non-CE srngs are configured firstly, then
|
||||
* all CE srngs.
|
||||
*/
|
||||
ath11k_hal_srng_shadow_config(ab);
|
||||
ath11k_ce_shadow_config(ab);
|
||||
|
||||
/* get the shadow configuration */
|
||||
ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
|
||||
}
|
||||
EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
|
||||
|
||||
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
|
||||
{
|
||||
struct ath11k_ce_pipe *pipe;
|
||||
int pipe_num;
|
||||
|
||||
ath11k_ce_stop_shadow_timers(ab);
|
||||
|
||||
for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
|
||||
pipe = &ab->ce.ce_pipe[pipe_num];
|
||||
ath11k_ce_rx_pipe_cleanup(pipe);
|
||||
@ -767,6 +841,9 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
|
||||
&ab->qmi.ce_cfg.shadow_reg_v2_len);
|
||||
|
||||
for (i = 0; i < ab->hw_params.ce_count; i++) {
|
||||
pipe = &ab->ce.ce_pipe[i];
|
||||
|
||||
@ -828,6 +905,9 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
|
||||
for (i = 0; i < ab->hw_params.ce_count; i++) {
|
||||
pipe = &ab->ce.ce_pipe[i];
|
||||
|
||||
if (ath11k_ce_need_shadow_fix(i))
|
||||
ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
|
||||
|
||||
if (pipe->src_ring) {
|
||||
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
|
||||
dma_free_coherent(ab->dev,
|
||||
|
@ -168,6 +168,7 @@ struct ath11k_ce {
|
||||
struct ath11k_ce_pipe ce_pipe[CE_COUNT_MAX];
|
||||
/* Protects rings of all ce pipes */
|
||||
spinlock_t ce_lock;
|
||||
struct ath11k_hp_update_timer hp_timer[CE_COUNT_MAX];
|
||||
};
|
||||
|
||||
extern const struct ce_attr ath11k_host_ce_config_ipq8074[];
|
||||
@ -187,4 +188,6 @@ void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
|
||||
int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
|
||||
u8 *ul_pipe, u8 *dl_pipe);
|
||||
int ath11k_ce_attr_attach(struct ath11k_base *ab);
|
||||
void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
|
||||
u32 **shadow_cfg, u32 *shadow_cfg_len);
|
||||
#endif
|
||||
|
@ -58,6 +58,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
|
||||
.htt_peer_map_v2 = true,
|
||||
.tcl_0_only = false,
|
||||
.spectral_fft_sz = 2,
|
||||
|
||||
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_MESH_POINT),
|
||||
.supports_monitor = true,
|
||||
.supports_shadow_regs = false,
|
||||
.idle_ps = false,
|
||||
},
|
||||
{
|
||||
.hw_rev = ATH11K_HW_IPQ6018_HW10,
|
||||
@ -88,6 +95,13 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
|
||||
.htt_peer_map_v2 = true,
|
||||
.tcl_0_only = false,
|
||||
.spectral_fft_sz = 4,
|
||||
|
||||
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_MESH_POINT),
|
||||
.supports_monitor = true,
|
||||
.supports_shadow_regs = false,
|
||||
.idle_ps = false,
|
||||
},
|
||||
{
|
||||
.name = "qca6390 hw2.0",
|
||||
@ -118,6 +132,12 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
|
||||
.htt_peer_map_v2 = false,
|
||||
.tcl_0_only = true,
|
||||
.spectral_fft_sz = 0,
|
||||
|
||||
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP),
|
||||
.supports_monitor = false,
|
||||
.supports_shadow_regs = true,
|
||||
.idle_ps = true,
|
||||
},
|
||||
};
|
||||
|
||||
@ -398,6 +418,7 @@ static void ath11k_core_stop(struct ath11k_base *ab)
|
||||
{
|
||||
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
|
||||
ath11k_qmi_firmware_stop(ab);
|
||||
|
||||
ath11k_hif_stop(ab);
|
||||
ath11k_wmi_detach(ab);
|
||||
ath11k_dp_pdev_reo_cleanup(ab);
|
||||
|
@ -37,6 +37,8 @@
|
||||
|
||||
extern unsigned int ath11k_frame_mode;
|
||||
|
||||
#define ATH11K_MON_TIMER_INTERVAL 10
|
||||
|
||||
enum ath11k_supported_bw {
|
||||
ATH11K_BW_20 = 0,
|
||||
ATH11K_BW_40 = 1,
|
||||
@ -727,6 +729,7 @@ struct ath11k_base {
|
||||
struct ath11k_dbring_cap *db_caps;
|
||||
u32 num_db_cap;
|
||||
|
||||
struct timer_list mon_reap_timer;
|
||||
/* must be last */
|
||||
u8 drv_priv[0] __aligned(sizeof(void *));
|
||||
};
|
||||
|
@ -837,12 +837,8 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
|
||||
return 0;
|
||||
|
||||
ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
|
||||
|
||||
if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
|
||||
if (IS_ERR(ab->debugfs_soc))
|
||||
return PTR_ERR(ab->debugfs_soc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (IS_ERR(ab->debugfs_soc))
|
||||
return PTR_ERR(ab->debugfs_soc);
|
||||
|
||||
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
|
||||
&fops_simulate_fw_crash);
|
||||
@ -855,27 +851,21 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
|
||||
|
||||
void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
|
||||
{
|
||||
debugfs_remove_recursive(ab->debugfs_ath11k);
|
||||
ab->debugfs_ath11k = NULL;
|
||||
debugfs_remove_recursive(ab->debugfs_soc);
|
||||
ab->debugfs_soc = NULL;
|
||||
}
|
||||
|
||||
int ath11k_debugfs_soc_create(struct ath11k_base *ab)
|
||||
{
|
||||
ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
|
||||
|
||||
if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) {
|
||||
if (IS_ERR(ab->debugfs_ath11k))
|
||||
return PTR_ERR(ab->debugfs_ath11k);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
|
||||
}
|
||||
|
||||
void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
|
||||
{
|
||||
debugfs_remove_recursive(ab->debugfs_soc);
|
||||
ab->debugfs_soc = NULL;
|
||||
debugfs_remove_recursive(ab->debugfs_ath11k);
|
||||
ab->debugfs_ath11k = NULL;
|
||||
}
|
||||
|
||||
void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
|
||||
@ -1069,13 +1059,8 @@ int ath11k_debugfs_register(struct ath11k *ar)
|
||||
snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
|
||||
|
||||
ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
|
||||
|
||||
if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) {
|
||||
if (IS_ERR(ar->debug.debugfs_pdev))
|
||||
return PTR_ERR(ar->debug.debugfs_pdev);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (IS_ERR(ar->debug.debugfs_pdev))
|
||||
return PTR_ERR(ar->debug.debugfs_pdev);
|
||||
|
||||
/* Create a symlink under ieee80211/phy* */
|
||||
snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
|
||||
|
@ -304,11 +304,25 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
return;
|
||||
|
||||
for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
|
||||
ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
|
||||
|
||||
ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
|
||||
}
|
||||
|
||||
static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
|
||||
{
|
||||
struct ath11k_dp *dp = &ab->dp;
|
||||
int i;
|
||||
|
||||
ath11k_dp_stop_shadow_timers(ab);
|
||||
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
|
||||
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
|
||||
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
|
||||
@ -374,6 +388,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
|
||||
|
||||
srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
|
||||
ath11k_hal_tx_init_data_ring(ab, srng);
|
||||
|
||||
ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
|
||||
ATH11K_SHADOW_DP_TIMER_INTERVAL,
|
||||
dp->tx_ring[i].tcl_data_ring.ring_id);
|
||||
}
|
||||
|
||||
ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
|
||||
@ -409,6 +427,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
|
||||
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
|
||||
ath11k_hal_reo_init_cmd_ring(ab, srng);
|
||||
|
||||
ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
|
||||
ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
|
||||
dp->reo_cmd_ring.ring_id);
|
||||
|
||||
ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
|
||||
0, 0, DP_REO_STATUS_RING_SIZE);
|
||||
if (ret) {
|
||||
@ -812,8 +834,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
|
||||
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
|
||||
|
||||
ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
|
||||
HAL_RX_BUF_RBM_SW3_BM,
|
||||
GFP_ATOMIC);
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -829,6 +850,8 @@ void ath11k_dp_pdev_free(struct ath11k_base *ab)
|
||||
struct ath11k *ar;
|
||||
int i;
|
||||
|
||||
del_timer_sync(&ab->mon_reap_timer);
|
||||
|
||||
for (i = 0; i < ab->num_radios; i++) {
|
||||
ar = ab->pdevs[i].ar;
|
||||
ath11k_dp_rx_pdev_free(ab, i);
|
||||
@ -1065,3 +1088,78 @@ fail_link_desc_cleanup:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
|
||||
{
|
||||
struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
|
||||
t, timer);
|
||||
struct ath11k_base *ab = update_timer->ab;
|
||||
struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
|
||||
|
||||
spin_lock_bh(&srng->lock);
|
||||
|
||||
/* when the timer is fired, the handler checks whether there
|
||||
* are new TX happened. The handler updates HP only when there
|
||||
* are no TX operations during the timeout interval, and stop
|
||||
* the timer. Timer will be started again when TX happens again.
|
||||
*/
|
||||
if (update_timer->timer_tx_num != update_timer->tx_num) {
|
||||
update_timer->timer_tx_num = update_timer->tx_num;
|
||||
mod_timer(&update_timer->timer, jiffies +
|
||||
msecs_to_jiffies(update_timer->interval));
|
||||
} else {
|
||||
update_timer->started = false;
|
||||
ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&srng->lock);
|
||||
}
|
||||
|
||||
void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
|
||||
struct hal_srng *srng,
|
||||
struct ath11k_hp_update_timer *update_timer)
|
||||
{
|
||||
lockdep_assert_held(&srng->lock);
|
||||
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
return;
|
||||
|
||||
update_timer->tx_num++;
|
||||
|
||||
if (update_timer->started)
|
||||
return;
|
||||
|
||||
update_timer->started = true;
|
||||
update_timer->timer_tx_num = update_timer->tx_num;
|
||||
mod_timer(&update_timer->timer, jiffies +
|
||||
msecs_to_jiffies(update_timer->interval));
|
||||
}
|
||||
|
||||
void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
|
||||
struct ath11k_hp_update_timer *update_timer)
|
||||
{
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
return;
|
||||
|
||||
if (!update_timer->init)
|
||||
return;
|
||||
|
||||
del_timer_sync(&update_timer->timer);
|
||||
}
|
||||
|
||||
void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
|
||||
struct ath11k_hp_update_timer *update_timer,
|
||||
u32 interval, u32 ring_id)
|
||||
{
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
return;
|
||||
|
||||
update_timer->tx_num = 0;
|
||||
update_timer->timer_tx_num = 0;
|
||||
update_timer->ab = ab;
|
||||
update_timer->ring_id = ring_id;
|
||||
update_timer->interval = interval;
|
||||
update_timer->init = true;
|
||||
timer_setup(&update_timer->timer,
|
||||
ath11k_dp_shadow_timer_handler, 0);
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ struct dp_rx_tid {
|
||||
|
||||
#define DP_REO_DESC_FREE_THRESHOLD 64
|
||||
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
|
||||
#define DP_MON_SERVICE_BUDGET 128
|
||||
|
||||
struct dp_reo_cache_flush_elem {
|
||||
struct list_head list;
|
||||
@ -205,6 +206,20 @@ struct ath11k_pdev_dp {
|
||||
#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
|
||||
#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
|
||||
|
||||
#define ATH11K_SHADOW_DP_TIMER_INTERVAL 20
|
||||
#define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10
|
||||
|
||||
struct ath11k_hp_update_timer {
|
||||
struct timer_list timer;
|
||||
bool started;
|
||||
bool init;
|
||||
u32 tx_num;
|
||||
u32 timer_tx_num;
|
||||
u32 ring_id;
|
||||
u32 interval;
|
||||
struct ath11k_base *ab;
|
||||
};
|
||||
|
||||
struct ath11k_dp {
|
||||
struct ath11k_base *ab;
|
||||
enum ath11k_htc_ep_id eid;
|
||||
@ -234,6 +249,8 @@ struct ath11k_dp {
|
||||
* - reo_cmd_cache_flush_count
|
||||
*/
|
||||
spinlock_t reo_cmd_lock;
|
||||
struct ath11k_hp_update_timer reo_cmd_timer;
|
||||
struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
|
||||
};
|
||||
|
||||
/* HTT definitions */
|
||||
@ -497,7 +514,7 @@ struct htt_ppdu_stats_cfg_cmd {
|
||||
} __packed;
|
||||
|
||||
#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
|
||||
#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9)
|
||||
#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
|
||||
#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
|
||||
|
||||
enum htt_ppdu_stats_tag_type {
|
||||
@ -1615,5 +1632,13 @@ int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
|
||||
struct dp_link_desc_bank *link_desc_banks,
|
||||
u32 ring_type, struct hal_srng *srng,
|
||||
u32 n_link_desc);
|
||||
void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
|
||||
struct hal_srng *srng,
|
||||
struct ath11k_hp_update_timer *update_timer);
|
||||
void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
|
||||
struct ath11k_hp_update_timer *update_timer);
|
||||
void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
|
||||
struct ath11k_hp_update_timer *update_timer,
|
||||
u32 interval, u32 ring_id);
|
||||
|
||||
#endif
|
||||
|
@ -262,12 +262,23 @@ static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
|
||||
return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
|
||||
}
|
||||
|
||||
static void ath11k_dp_service_mon_ring(struct timer_list *t)
|
||||
{
|
||||
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
|
||||
ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
|
||||
|
||||
mod_timer(&ab->mon_reap_timer, jiffies +
|
||||
msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
|
||||
}
|
||||
|
||||
/* Returns number of Rx buffers replenished */
|
||||
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
|
||||
struct dp_rxdma_ring *rx_ring,
|
||||
int req_entries,
|
||||
enum hal_rx_buf_return_buf_manager mgr,
|
||||
gfp_t gfp)
|
||||
enum hal_rx_buf_return_buf_manager mgr)
|
||||
{
|
||||
struct hal_srng *srng;
|
||||
u32 *desc;
|
||||
@ -314,7 +325,7 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
|
||||
|
||||
spin_lock_bh(&rx_ring->idr_lock);
|
||||
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
|
||||
rx_ring->bufs_max * 3, gfp);
|
||||
rx_ring->bufs_max * 3, GFP_ATOMIC);
|
||||
spin_unlock_bh(&rx_ring->idr_lock);
|
||||
if (buf_id < 0)
|
||||
goto fail_dma_unmap;
|
||||
@ -434,7 +445,7 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
|
||||
|
||||
rx_ring->bufs_max = num_entries;
|
||||
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
|
||||
HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -570,9 +581,14 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
|
||||
/* if rxdma1_enable is false, then it doesn't need
|
||||
* to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
|
||||
* and rxdma_mon_desc_ring.
|
||||
* init reap timer for QCA6390.
|
||||
*/
|
||||
if (!ar->ab->hw_params.rxdma1_enable)
|
||||
if (!ar->ab->hw_params.rxdma1_enable) {
|
||||
//init mon status buffer reap timer
|
||||
timer_setup(&ar->ab->mon_reap_timer,
|
||||
ath11k_dp_service_mon_ring, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = ath11k_dp_srng_setup(ar->ab,
|
||||
&dp->rxdma_mon_buf_ring.refill_buf_ring,
|
||||
@ -1478,7 +1494,7 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
|
||||
}
|
||||
spin_unlock_bh(&ar->data_lock);
|
||||
|
||||
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
|
||||
ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
|
||||
if (!ppdu_info)
|
||||
return NULL;
|
||||
|
||||
@ -2598,7 +2614,7 @@ try_again:
|
||||
rx_ring = &ar->dp.rx_refill_buf_ring;
|
||||
|
||||
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
|
||||
HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
}
|
||||
|
||||
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
|
||||
@ -2680,7 +2696,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
|
||||
|
||||
static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
|
||||
struct dp_rxdma_ring *rx_ring,
|
||||
int *buf_id, gfp_t gfp)
|
||||
int *buf_id)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t paddr;
|
||||
@ -2705,7 +2721,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
|
||||
|
||||
spin_lock_bh(&rx_ring->idr_lock);
|
||||
*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
|
||||
rx_ring->bufs_max, gfp);
|
||||
rx_ring->bufs_max, GFP_ATOMIC);
|
||||
spin_unlock_bh(&rx_ring->idr_lock);
|
||||
if (*buf_id < 0)
|
||||
goto fail_dma_unmap;
|
||||
@ -2725,8 +2741,7 @@ fail_alloc_skb:
|
||||
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
|
||||
struct dp_rxdma_ring *rx_ring,
|
||||
int req_entries,
|
||||
enum hal_rx_buf_return_buf_manager mgr,
|
||||
gfp_t gfp)
|
||||
enum hal_rx_buf_return_buf_manager mgr)
|
||||
{
|
||||
struct hal_srng *srng;
|
||||
u32 *desc;
|
||||
@ -2752,7 +2767,7 @@ int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
|
||||
|
||||
while (num_remain > 0) {
|
||||
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
|
||||
&buf_id, gfp);
|
||||
&buf_id);
|
||||
if (!skb)
|
||||
break;
|
||||
paddr = ATH11K_SKB_RXCB(skb)->paddr;
|
||||
@ -2863,7 +2878,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
|
||||
}
|
||||
move_next:
|
||||
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
|
||||
&buf_id, GFP_ATOMIC);
|
||||
&buf_id);
|
||||
|
||||
if (!skb) {
|
||||
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
|
||||
@ -3676,7 +3691,7 @@ exit:
|
||||
rx_ring = &ar->dp.rx_refill_buf_ring;
|
||||
|
||||
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
|
||||
HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
}
|
||||
|
||||
return tot_n_bufs_reaped;
|
||||
@ -3972,7 +3987,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
|
||||
rx_ring = &ar->dp.rx_refill_buf_ring;
|
||||
|
||||
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
|
||||
HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
@ -4081,7 +4096,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
|
||||
|
||||
if (num_buf_freed)
|
||||
ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
|
||||
HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
|
||||
return budget - quota;
|
||||
}
|
||||
@ -4291,8 +4306,13 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
|
||||
void *src_srng_desc;
|
||||
int ret = 0;
|
||||
|
||||
dp_srng = &dp->rxdma_mon_desc_ring;
|
||||
hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
|
||||
if (ar->ab->hw_params.rxdma1_enable) {
|
||||
dp_srng = &dp->rxdma_mon_desc_ring;
|
||||
hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
|
||||
} else {
|
||||
dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
|
||||
hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
|
||||
}
|
||||
|
||||
ath11k_hal_srng_access_begin(ar->ab, hal_srng);
|
||||
|
||||
@ -4316,16 +4336,16 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
|
||||
static
|
||||
void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
|
||||
dma_addr_t *paddr, u32 *sw_cookie,
|
||||
u8 *rbm,
|
||||
void **pp_buf_addr_info)
|
||||
{
|
||||
struct hal_rx_msdu_link *msdu_link =
|
||||
(struct hal_rx_msdu_link *)rx_msdu_link_desc;
|
||||
struct ath11k_buffer_addr *buf_addr_info;
|
||||
u8 rbm = 0;
|
||||
|
||||
buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
|
||||
|
||||
ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
|
||||
ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
|
||||
|
||||
*pp_buf_addr_info = (void *)buf_addr_info;
|
||||
}
|
||||
@ -4436,7 +4456,7 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
|
||||
}
|
||||
|
||||
static u32
|
||||
ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
|
||||
ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
|
||||
void *ring_entry, struct sk_buff **head_msdu,
|
||||
struct sk_buff **tail_msdu, u32 *npackets,
|
||||
u32 *ppdu_id)
|
||||
@ -4461,9 +4481,15 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
|
||||
struct hal_reo_entrance_ring *ent_desc =
|
||||
(struct hal_reo_entrance_ring *)ring_entry;
|
||||
int buf_id;
|
||||
u32 rx_link_buf_info[2];
|
||||
u8 rbm;
|
||||
|
||||
if (!ar->ab->hw_params.rxdma1_enable)
|
||||
rx_ring = &dp->rx_refill_buf_ring;
|
||||
|
||||
ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
|
||||
&sw_cookie, &p_last_buf_addr_info,
|
||||
&sw_cookie,
|
||||
&p_last_buf_addr_info, &rbm,
|
||||
&msdu_cnt);
|
||||
|
||||
if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
|
||||
@ -4489,9 +4515,14 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
|
||||
return rx_bufs_used;
|
||||
}
|
||||
|
||||
rx_msdu_link_desc =
|
||||
(void *)pmon->link_desc_banks[sw_cookie].vaddr +
|
||||
(paddr - pmon->link_desc_banks[sw_cookie].paddr);
|
||||
if (ar->ab->hw_params.rxdma1_enable)
|
||||
rx_msdu_link_desc =
|
||||
(void *)pmon->link_desc_banks[sw_cookie].vaddr +
|
||||
(paddr - pmon->link_desc_banks[sw_cookie].paddr);
|
||||
else
|
||||
rx_msdu_link_desc =
|
||||
(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
|
||||
(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
|
||||
|
||||
ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
|
||||
&num_msdus);
|
||||
@ -4587,15 +4618,22 @@ next_msdu:
|
||||
spin_unlock_bh(&rx_ring->idr_lock);
|
||||
}
|
||||
|
||||
ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
|
||||
|
||||
ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
|
||||
&sw_cookie,
|
||||
&sw_cookie, &rbm,
|
||||
&p_buf_addr_info);
|
||||
|
||||
if (ath11k_dp_rx_monitor_link_desc_return(ar,
|
||||
p_last_buf_addr_info,
|
||||
dp->mac_id))
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
|
||||
"dp_rx_monitor_link_desc_return failed");
|
||||
if (ar->ab->hw_params.rxdma1_enable) {
|
||||
if (ath11k_dp_rx_monitor_link_desc_return(ar,
|
||||
p_last_buf_addr_info,
|
||||
dp->mac_id))
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
|
||||
"dp_rx_monitor_link_desc_return failed");
|
||||
} else {
|
||||
ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
|
||||
HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
|
||||
}
|
||||
|
||||
p_last_buf_addr_info = p_buf_addr_info;
|
||||
|
||||
@ -4779,8 +4817,8 @@ mon_deliver_fail:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
|
||||
struct napi_struct *napi)
|
||||
static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
|
||||
u32 quota, struct napi_struct *napi)
|
||||
{
|
||||
struct ath11k_pdev_dp *dp = &ar->dp;
|
||||
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
|
||||
@ -4788,10 +4826,16 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
|
||||
void *mon_dst_srng;
|
||||
u32 ppdu_id;
|
||||
u32 rx_bufs_used;
|
||||
u32 ring_id;
|
||||
struct ath11k_pdev_mon_stats *rx_mon_stats;
|
||||
u32 npackets = 0;
|
||||
|
||||
mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
|
||||
if (ar->ab->hw_params.rxdma1_enable)
|
||||
ring_id = dp->rxdma_mon_dst_ring.ring_id;
|
||||
else
|
||||
ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
|
||||
|
||||
mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
|
||||
|
||||
if (!mon_dst_srng) {
|
||||
ath11k_warn(ar->ab,
|
||||
@ -4814,7 +4858,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
|
||||
head_msdu = NULL;
|
||||
tail_msdu = NULL;
|
||||
|
||||
rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
|
||||
rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
|
||||
&head_msdu,
|
||||
&tail_msdu,
|
||||
&npackets, &ppdu_id);
|
||||
@ -4841,15 +4885,21 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
|
||||
|
||||
if (rx_bufs_used) {
|
||||
rx_mon_stats->dest_ppdu_done++;
|
||||
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
|
||||
&dp->rxdma_mon_buf_ring,
|
||||
rx_bufs_used,
|
||||
HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
|
||||
if (ar->ab->hw_params.rxdma1_enable)
|
||||
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
|
||||
&dp->rxdma_mon_buf_ring,
|
||||
rx_bufs_used,
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
else
|
||||
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
|
||||
&dp->rx_refill_buf_ring,
|
||||
rx_bufs_used,
|
||||
HAL_RX_BUF_RBM_SW3_BM);
|
||||
}
|
||||
}
|
||||
|
||||
static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
|
||||
u32 quota,
|
||||
int mac_id, u32 quota,
|
||||
struct napi_struct *napi)
|
||||
{
|
||||
struct ath11k_pdev_dp *dp = &ar->dp;
|
||||
@ -4873,7 +4923,7 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
|
||||
if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
|
||||
rx_mon_stats->status_ppdu_done++;
|
||||
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
|
||||
ath11k_dp_rx_mon_dest_process(ar, quota, napi);
|
||||
ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
|
||||
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
|
||||
}
|
||||
dev_kfree_skb_any(status_skb);
|
||||
@ -4888,10 +4938,10 @@ static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
|
||||
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
|
||||
int num_buffs_reaped = 0;
|
||||
|
||||
num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
|
||||
num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
|
||||
&pmon->rx_status_q);
|
||||
if (num_buffs_reaped)
|
||||
ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
|
||||
ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
|
||||
|
||||
return num_buffs_reaped;
|
||||
}
|
||||
|
@ -74,8 +74,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
|
||||
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
|
||||
struct dp_rxdma_ring *rx_ring,
|
||||
int req_entries,
|
||||
enum hal_rx_buf_return_buf_manager mgr,
|
||||
gfp_t gfp);
|
||||
enum hal_rx_buf_return_buf_manager mgr);
|
||||
int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
|
||||
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
|
||||
const void *ptr, void *data),
|
||||
@ -87,8 +86,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
|
||||
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
|
||||
struct dp_rxdma_ring *rx_ring,
|
||||
int req_entries,
|
||||
enum hal_rx_buf_return_buf_manager mgr,
|
||||
gfp_t gfp);
|
||||
enum hal_rx_buf_return_buf_manager mgr);
|
||||
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
|
||||
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
|
||||
int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
|
||||
|
@ -254,6 +254,8 @@ tcl_ring_sel:
|
||||
|
||||
ath11k_hal_srng_access_end(ab, tcl_ring);
|
||||
|
||||
ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
|
||||
|
||||
spin_unlock_bh(&tcl_ring->lock);
|
||||
|
||||
ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
|
||||
@ -536,6 +538,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
|
||||
u32 msdu_id;
|
||||
u8 mac_id;
|
||||
|
||||
spin_lock_bh(&status_ring->lock);
|
||||
|
||||
ath11k_hal_srng_access_begin(ab, status_ring);
|
||||
|
||||
while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
|
||||
@ -555,6 +559,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
|
||||
|
||||
ath11k_hal_srng_access_end(ab, status_ring);
|
||||
|
||||
spin_unlock_bh(&status_ring->lock);
|
||||
|
||||
while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
|
||||
struct hal_wbm_release_ring *tx_status;
|
||||
u32 desc_id;
|
||||
@ -786,9 +792,9 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
|
||||
cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
|
||||
HAL_ADDR_MSB_REG_SHIFT;
|
||||
|
||||
cmd->ring_msi_addr_lo = 0;
|
||||
cmd->ring_msi_addr_hi = 0;
|
||||
cmd->msi_data = 0;
|
||||
cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
|
||||
cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff;
|
||||
cmd->msi_data = params.msi_data;
|
||||
|
||||
cmd->intr_info = FIELD_PREP(
|
||||
HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
|
||||
@ -804,6 +810,15 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
|
||||
params.low_threshold);
|
||||
}
|
||||
|
||||
ath11k_dbg(ab, ATH11k_DBG_HAL,
|
||||
"%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
|
||||
__func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
|
||||
cmd->msi_data);
|
||||
|
||||
ath11k_dbg(ab, ATH11k_DBG_HAL,
|
||||
"ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
|
||||
ring_id, ring_type, cmd->intr_info, cmd->info2);
|
||||
|
||||
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
@ -868,24 +883,27 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
|
||||
int len = sizeof(*cmd);
|
||||
u8 pdev_mask;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
skb = ath11k_htc_alloc_skb(ab, len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
|
||||
skb = ath11k_htc_alloc_skb(ab, len);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
skb_put(skb, len);
|
||||
cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
|
||||
cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
|
||||
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
|
||||
skb_put(skb, len);
|
||||
cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
|
||||
cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
|
||||
HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
|
||||
|
||||
pdev_mask = 1 << (ar->pdev_idx);
|
||||
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
|
||||
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
|
||||
pdev_mask = 1 << (i + 1);
|
||||
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
|
||||
cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
|
||||
|
||||
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
|
||||
if (ret) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return ret;
|
||||
ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
|
||||
if (ret) {
|
||||
dev_kfree_skb_any(skb);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1028,10 +1046,23 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
|
||||
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
|
||||
}
|
||||
|
||||
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
|
||||
HAL_RXDMA_MONITOR_BUF,
|
||||
DP_RXDMA_REFILL_RING_SIZE,
|
||||
&tlv_filter);
|
||||
if (ab->hw_params.rxdma1_enable) {
|
||||
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
|
||||
HAL_RXDMA_MONITOR_BUF,
|
||||
DP_RXDMA_REFILL_RING_SIZE,
|
||||
&tlv_filter);
|
||||
} else if (!reset) {
|
||||
/* set in monitor mode only */
|
||||
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
|
||||
ring_id = dp->rx_mac_buf_ring[i].ring_id;
|
||||
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
|
||||
dp->mac_id + i,
|
||||
HAL_RXDMA_BUF,
|
||||
1024,
|
||||
&tlv_filter);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1050,5 +1081,9 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
|
||||
&tlv_filter);
|
||||
}
|
||||
|
||||
if (!ar->ab->hw_params.rxdma1_enable)
|
||||
mod_timer(&ar->ab->mon_reap_timer, jiffies +
|
||||
msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
|
||||
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
|
||||
ath11k_hif_write32(ab, reg_base +
|
||||
HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
|
||||
(u32)srng->msi_addr);
|
||||
srng->msi_addr);
|
||||
|
||||
val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
|
||||
((u64)srng->msi_addr >>
|
||||
@ -344,7 +344,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
|
||||
srng->msi_data);
|
||||
}
|
||||
|
||||
ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
|
||||
ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
|
||||
|
||||
val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
|
||||
((u64)srng->ring_base_paddr >>
|
||||
@ -409,7 +409,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
|
||||
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
|
||||
ath11k_hif_write32(ab, reg_base +
|
||||
HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
|
||||
(u32)srng->msi_addr);
|
||||
srng->msi_addr);
|
||||
|
||||
val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
|
||||
((u64)srng->msi_addr >>
|
||||
@ -424,7 +424,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
|
||||
srng->msi_data);
|
||||
}
|
||||
|
||||
ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
|
||||
ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
|
||||
|
||||
val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
|
||||
((u64)srng->ring_base_paddr >>
|
||||
@ -560,6 +560,8 @@ void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
|
||||
params->intr_batch_cntr_thres_entries =
|
||||
srng->intr_batch_cntr_thres_entries;
|
||||
params->low_threshold = srng->u.src_ring.low_threshold;
|
||||
params->msi_addr = srng->msi_addr;
|
||||
params->msi_data = srng->msi_data;
|
||||
params->flags = srng->flags;
|
||||
}
|
||||
|
||||
@ -1018,8 +1020,16 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
|
||||
lmac_idx);
|
||||
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
|
||||
} else {
|
||||
srng->u.src_ring.hp_addr =
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
srng->u.src_ring.hp_addr =
|
||||
(u32 *)((unsigned long)ab->mem + reg_base);
|
||||
else
|
||||
ath11k_dbg(ab, ATH11k_DBG_HAL,
|
||||
"hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
|
||||
type, ring_num,
|
||||
reg_base,
|
||||
(unsigned long)srng->u.src_ring.hp_addr -
|
||||
(unsigned long)ab->mem);
|
||||
}
|
||||
} else {
|
||||
/* During initialization loop count in all the descriptors
|
||||
@ -1043,9 +1053,18 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
|
||||
lmac_idx);
|
||||
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
|
||||
} else {
|
||||
srng->u.dst_ring.tp_addr =
|
||||
if (!ab->hw_params.supports_shadow_regs)
|
||||
srng->u.dst_ring.tp_addr =
|
||||
(u32 *)((unsigned long)ab->mem + reg_base +
|
||||
(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
|
||||
else
|
||||
ath11k_dbg(ab, ATH11k_DBG_HAL,
|
||||
"type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
|
||||
type, ring_num,
|
||||
reg_base + (HAL_REO1_RING_TP(ab) -
|
||||
HAL_REO1_RING_HP(ab)),
|
||||
(unsigned long)srng->u.dst_ring.tp_addr -
|
||||
(unsigned long)ab->mem);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1062,6 +1081,112 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
|
||||
return ring_id;
|
||||
}
|
||||
|
||||
static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
|
||||
int shadow_cfg_idx,
|
||||
enum hal_ring_type ring_type,
|
||||
int ring_num)
|
||||
{
|
||||
struct hal_srng *srng;
|
||||
struct ath11k_hal *hal = &ab->hal;
|
||||
int ring_id;
|
||||
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
|
||||
|
||||
ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
|
||||
if (ring_id < 0)
|
||||
return;
|
||||
|
||||
srng = &hal->srng_list[ring_id];
|
||||
|
||||
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
|
||||
srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
|
||||
(unsigned long)ab->mem);
|
||||
else
|
||||
srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
|
||||
(unsigned long)ab->mem);
|
||||
}
|
||||
|
||||
int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
|
||||
enum hal_ring_type ring_type,
|
||||
int ring_num)
|
||||
{
|
||||
struct ath11k_hal *hal = &ab->hal;
|
||||
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
|
||||
int shadow_cfg_idx = hal->num_shadow_reg_configured;
|
||||
u32 target_reg;
|
||||
|
||||
if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
|
||||
return -EINVAL;
|
||||
|
||||
hal->num_shadow_reg_configured++;
|
||||
|
||||
target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
|
||||
target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
|
||||
ring_num;
|
||||
|
||||
/* For destination ring, shadow the TP */
|
||||
if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
|
||||
target_reg += HAL_OFFSET_FROM_HP_TO_TP;
|
||||
|
||||
hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
|
||||
|
||||
/* update hp/tp addr to hal structure*/
|
||||
ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
|
||||
ring_num);
|
||||
|
||||
ath11k_dbg(ab, ATH11k_DBG_HAL,
|
||||
"target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
|
||||
target_reg,
|
||||
HAL_SHADOW_REG(shadow_cfg_idx),
|
||||
shadow_cfg_idx,
|
||||
ring_type, ring_num);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
|
||||
{
|
||||
struct ath11k_hal *hal = &ab->hal;
|
||||
int ring_type, ring_num;
|
||||
|
||||
/* update all the non-CE srngs. */
|
||||
for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
|
||||
struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
|
||||
|
||||
if (ring_type == HAL_CE_SRC ||
|
||||
ring_type == HAL_CE_DST ||
|
||||
ring_type == HAL_CE_DST_STATUS)
|
||||
continue;
|
||||
|
||||
if (srng_config->lmac_ring)
|
||||
continue;
|
||||
|
||||
for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
|
||||
ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
|
||||
}
|
||||
}
|
||||
|
||||
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
|
||||
u32 **cfg, u32 *len)
|
||||
{
|
||||
struct ath11k_hal *hal = &ab->hal;
|
||||
|
||||
*len = hal->num_shadow_reg_configured;
|
||||
*cfg = hal->shadow_reg_addr;
|
||||
}
|
||||
|
||||
void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
|
||||
struct hal_srng *srng)
|
||||
{
|
||||
lockdep_assert_held(&srng->lock);
|
||||
|
||||
/* check whether the ring is emptry. Update the shadow
|
||||
* HP only when then ring isn't' empty.
|
||||
*/
|
||||
if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
|
||||
*srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
|
||||
ath11k_hal_srng_access_end(ab, srng);
|
||||
}
|
||||
|
||||
static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
|
||||
{
|
||||
struct ath11k_hal *hal = &ab->hal;
|
||||
|
@ -31,8 +31,12 @@ struct ath11k_base;
|
||||
#define HAL_DSCP_TID_TBL_SIZE 24
|
||||
|
||||
/* calculate the register address from bar0 of shadow register x */
|
||||
#define SHADOW_BASE_ADDRESS 0x00003024
|
||||
#define SHADOW_NUM_REGISTERS 36
|
||||
#define HAL_SHADOW_BASE_ADDR 0x000008fc
|
||||
#define HAL_SHADOW_NUM_REGS 36
|
||||
#define HAL_HP_OFFSET_IN_REG_START 1
|
||||
#define HAL_OFFSET_FROM_HP_TO_TP 4
|
||||
|
||||
#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
|
||||
|
||||
/* WCSS Relative address */
|
||||
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
|
||||
@ -882,7 +886,7 @@ struct ath11k_hal {
|
||||
u8 current_blk_index;
|
||||
|
||||
/* shadow register configuration */
|
||||
u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
|
||||
u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
|
||||
int num_shadow_reg_configured;
|
||||
};
|
||||
|
||||
@ -935,5 +939,12 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
|
||||
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
|
||||
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
|
||||
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
|
||||
|
||||
void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
|
||||
u32 **cfg, u32 *len);
|
||||
int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
|
||||
enum hal_ring_type ring_type,
|
||||
int ring_num);
|
||||
void ath11k_hal_srng_shadow_config(struct ath11k_base *ab);
|
||||
void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
|
||||
struct hal_srng *srng);
|
||||
#endif
|
||||
|
@ -256,6 +256,8 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
|
||||
break;
|
||||
}
|
||||
|
||||
ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
|
||||
|
||||
out:
|
||||
ath11k_hal_srng_access_end(ab, srng);
|
||||
spin_unlock_bh(&srng->lock);
|
||||
@ -1195,7 +1197,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
|
||||
|
||||
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
|
||||
u32 *sw_cookie, void **pp_buf_addr,
|
||||
u32 *msdu_cnt)
|
||||
u8 *rbm, u32 *msdu_cnt)
|
||||
{
|
||||
struct hal_reo_entrance_ring *reo_ent_ring =
|
||||
(struct hal_reo_entrance_ring *)rx_desc;
|
||||
@ -1217,6 +1219,8 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
|
||||
|
||||
*sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
|
||||
buf_addr_info->info1);
|
||||
*rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
|
||||
buf_addr_info->info1);
|
||||
|
||||
*pp_buf_addr = (void *)buf_addr_info;
|
||||
}
|
||||
|
@ -321,7 +321,7 @@ void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
|
||||
dma_addr_t *paddr, u32 *desc_bank);
|
||||
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
|
||||
dma_addr_t *paddr, u32 *sw_cookie,
|
||||
void **pp_buf_addr_info,
|
||||
void **pp_buf_addr_info, u8 *rbm,
|
||||
u32 *msdu_cnt);
|
||||
enum hal_rx_mon_status
|
||||
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
|
||||
|
@ -515,6 +515,12 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc)
|
||||
return -ECOMM;
|
||||
}
|
||||
|
||||
/* For QCA6390, wmi endpoint uses 1 credit to avoid
|
||||
* back-to-back write.
|
||||
*/
|
||||
if (ab->hw_params.supports_shadow_regs)
|
||||
htc->total_transmit_credits = 1;
|
||||
|
||||
ath11k_htc_setup_target_buffer_assignments(htc);
|
||||
|
||||
return 0;
|
||||
|
@ -156,6 +156,11 @@ struct ath11k_hw_params {
|
||||
bool htt_peer_map_v2;
|
||||
bool tcl_0_only;
|
||||
u8 spectral_fft_sz;
|
||||
|
||||
u16 interface_modes;
|
||||
bool supports_monitor;
|
||||
bool supports_shadow_regs;
|
||||
bool idle_ps;
|
||||
};
|
||||
|
||||
struct ath11k_hw_ops {
|
||||
|
@ -758,21 +758,12 @@ static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
|
||||
|
||||
static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
|
||||
{
|
||||
struct ath11k *ar = hw->priv;
|
||||
int ret = 0;
|
||||
|
||||
/* mac80211 requires this op to be present and that's why
|
||||
* there's an empty function, this can be extended when
|
||||
* required.
|
||||
*/
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
/* TODO: Handle configuration changes as appropriate */
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
|
||||
@ -2994,7 +2985,8 @@ static int ath11k_mac_station_add(struct ath11k *ar,
|
||||
goto free_tx_stats;
|
||||
}
|
||||
|
||||
if (ab->hw_params.vdev_start_delay) {
|
||||
if (ab->hw_params.vdev_start_delay &&
|
||||
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
|
||||
ret = ath11k_start_vdev_delay(ar->hw, vif);
|
||||
if (ret) {
|
||||
ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
|
||||
@ -4216,6 +4208,15 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
|
||||
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
|
||||
&ab->pdevs[ar->pdev_idx]);
|
||||
|
||||
/* allow device to enter IMPS */
|
||||
if (ab->hw_params.idle_ps) {
|
||||
ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
|
||||
1, pdev->pdev_id);
|
||||
if (ret) {
|
||||
ath11k_err(ab, "failed to enable idle ps: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
err:
|
||||
@ -4351,7 +4352,7 @@ static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
|
||||
}
|
||||
|
||||
static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
|
||||
struct ieee80211_vif *vif)
|
||||
struct ieee80211_vif *vif)
|
||||
{
|
||||
struct ath11k *ar = hw->priv;
|
||||
struct ath11k_base *ab = ar->ab;
|
||||
@ -4702,6 +4703,10 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
|
||||
ath11k_warn(ar->ab,
|
||||
"fail to set monitor filter: %d\n", ret);
|
||||
}
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
|
||||
"changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
|
||||
changed_flags, *total_flags, reset_flag);
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
@ -5207,6 +5212,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
struct ath11k_base *ab = ar->ab;
|
||||
struct ath11k_vif *arvif = (void *)vif->drv_priv;
|
||||
int ret;
|
||||
struct peer_create_params param;
|
||||
|
||||
mutex_lock(&ar->conf_mutex);
|
||||
|
||||
@ -5215,7 +5221,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
ctx, arvif->vdev_id);
|
||||
|
||||
/* for QCA6390 bss peer must be created before vdev_start */
|
||||
if (ab->hw_params.vdev_start_delay) {
|
||||
if (ab->hw_params.vdev_start_delay &&
|
||||
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
|
||||
arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
|
||||
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
return 0;
|
||||
@ -5226,6 +5234,13 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (ab->hw_params.vdev_start_delay) {
|
||||
param.vdev_id = arvif->vdev_id;
|
||||
param.peer_type = WMI_PEER_TYPE_DEFAULT;
|
||||
param.peer_addr = ar->mac_addr;
|
||||
ret = ath11k_peer_create(ar, arvif, NULL, ¶m);
|
||||
}
|
||||
|
||||
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
|
||||
if (ret) {
|
||||
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
|
||||
@ -5271,6 +5286,11 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
|
||||
WARN_ON(!arvif->is_started);
|
||||
|
||||
if (ab->hw_params.vdev_start_delay &&
|
||||
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
|
||||
ath11k_peer_find_by_addr(ab, ar->mac_addr))
|
||||
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
|
||||
|
||||
ret = ath11k_mac_vdev_stop(arvif);
|
||||
if (ret)
|
||||
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
|
||||
@ -5278,6 +5298,10 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
|
||||
|
||||
arvif->is_started = false;
|
||||
|
||||
if (ab->hw_params.vdev_start_delay &&
|
||||
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
|
||||
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
|
||||
|
||||
mutex_unlock(&ar->conf_mutex);
|
||||
}
|
||||
|
||||
@ -5878,35 +5902,6 @@ static const struct ieee80211_ops ath11k_ops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_limit ath11k_if_limits[] = {
|
||||
{
|
||||
.max = 1,
|
||||
.types = BIT(NL80211_IFTYPE_STATION),
|
||||
},
|
||||
{
|
||||
.max = 16,
|
||||
.types = BIT(NL80211_IFTYPE_AP)
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
| BIT(NL80211_IFTYPE_MESH_POINT)
|
||||
#endif
|
||||
},
|
||||
};
|
||||
|
||||
static const struct ieee80211_iface_combination ath11k_if_comb[] = {
|
||||
{
|
||||
.limits = ath11k_if_limits,
|
||||
.n_limits = ARRAY_SIZE(ath11k_if_limits),
|
||||
.max_interfaces = 16,
|
||||
.num_different_channels = 1,
|
||||
.beacon_int_infra_match = true,
|
||||
.beacon_int_min_gcd = 100,
|
||||
.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
|
||||
BIT(NL80211_CHAN_WIDTH_20) |
|
||||
BIT(NL80211_CHAN_WIDTH_40) |
|
||||
BIT(NL80211_CHAN_WIDTH_80),
|
||||
},
|
||||
};
|
||||
|
||||
static void ath11k_mac_update_ch_list(struct ath11k *ar,
|
||||
struct ieee80211_supported_band *band,
|
||||
u32 freq_low, u32 freq_high)
|
||||
@ -6032,6 +6027,50 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
|
||||
{
|
||||
struct ath11k_base *ab = ar->ab;
|
||||
struct ieee80211_iface_combination *combinations;
|
||||
struct ieee80211_iface_limit *limits;
|
||||
int n_limits;
|
||||
|
||||
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
|
||||
if (!combinations)
|
||||
return -ENOMEM;
|
||||
|
||||
n_limits = 2;
|
||||
|
||||
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
|
||||
if (!limits)
|
||||
return -ENOMEM;
|
||||
|
||||
limits[0].max = 1;
|
||||
limits[0].types |= BIT(NL80211_IFTYPE_STATION);
|
||||
|
||||
limits[1].max = 16;
|
||||
limits[1].types |= BIT(NL80211_IFTYPE_AP);
|
||||
|
||||
if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
|
||||
ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
|
||||
limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
|
||||
|
||||
combinations[0].limits = limits;
|
||||
combinations[0].n_limits = n_limits;
|
||||
combinations[0].max_interfaces = 16;
|
||||
combinations[0].num_different_channels = 1;
|
||||
combinations[0].beacon_int_infra_match = true;
|
||||
combinations[0].beacon_int_min_gcd = 100;
|
||||
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
|
||||
BIT(NL80211_CHAN_WIDTH_20) |
|
||||
BIT(NL80211_CHAN_WIDTH_40) |
|
||||
BIT(NL80211_CHAN_WIDTH_80);
|
||||
|
||||
ar->hw->wiphy->iface_combinations = combinations;
|
||||
ar->hw->wiphy->n_iface_combinations = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const u8 ath11k_if_types_ext_capa[] = {
|
||||
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
|
||||
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
|
||||
@ -6082,6 +6121,9 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
|
||||
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
|
||||
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
|
||||
|
||||
kfree(ar->hw->wiphy->iface_combinations[0].limits);
|
||||
kfree(ar->hw->wiphy->iface_combinations);
|
||||
|
||||
SET_IEEE80211_DEV(ar->hw, NULL);
|
||||
}
|
||||
|
||||
@ -6133,12 +6175,16 @@ static int __ath11k_mac_register(struct ath11k *ar)
|
||||
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
|
||||
ath11k_mac_setup_he_cap(ar, cap);
|
||||
|
||||
ret = ath11k_mac_setup_iface_combinations(ar);
|
||||
if (ret) {
|
||||
ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
|
||||
goto err_free_channels;
|
||||
}
|
||||
|
||||
ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
|
||||
ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
|
||||
|
||||
ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
|
||||
BIT(NL80211_IFTYPE_AP) |
|
||||
BIT(NL80211_IFTYPE_MESH_POINT);
|
||||
ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
|
||||
|
||||
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
|
||||
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
|
||||
@ -6200,9 +6246,6 @@ static int __ath11k_mac_register(struct ath11k *ar)
|
||||
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
|
||||
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
|
||||
|
||||
ar->hw->wiphy->iface_combinations = ath11k_if_comb;
|
||||
ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb);
|
||||
|
||||
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
|
||||
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
|
||||
|
||||
@ -6224,25 +6267,37 @@ static int __ath11k_mac_register(struct ath11k *ar)
|
||||
ret = ieee80211_register_hw(ar->hw);
|
||||
if (ret) {
|
||||
ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
|
||||
goto err_free;
|
||||
goto err_free_if_combs;
|
||||
}
|
||||
|
||||
if (!ab->hw_params.supports_monitor)
|
||||
/* There's a race between calling ieee80211_register_hw()
|
||||
* and here where the monitor mode is enabled for a little
|
||||
* while. But that time is so short and in practise it make
|
||||
* a difference in real life.
|
||||
*/
|
||||
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
|
||||
|
||||
/* Apply the regd received during initialization */
|
||||
ret = ath11k_regd_update(ar, true);
|
||||
if (ret) {
|
||||
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
|
||||
goto err_free;
|
||||
goto err_free_if_combs;
|
||||
}
|
||||
|
||||
ret = ath11k_debugfs_register(ar);
|
||||
if (ret) {
|
||||
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
|
||||
goto err_free;
|
||||
goto err_free_if_combs;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free:
|
||||
err_free_if_combs:
|
||||
kfree(ar->hw->wiphy->iface_combinations[0].limits);
|
||||
kfree(ar->hw->wiphy->iface_combinations);
|
||||
|
||||
err_free_channels:
|
||||
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
|
||||
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
|
||||
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
|
||||
|
@ -24,6 +24,16 @@
|
||||
#define WINDOW_START 0x80000
|
||||
#define WINDOW_RANGE_MASK GENMASK(18, 0)
|
||||
|
||||
#define TCSR_SOC_HW_VERSION 0x0224
|
||||
#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
|
||||
#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
|
||||
|
||||
/* BAR0 + 4k is always accessible, and no
|
||||
* need to force wakeup.
|
||||
* 4K - 32 = 0xFE0
|
||||
*/
|
||||
#define ACCESS_ALWAYS_OFF 0xFE0
|
||||
|
||||
#define QCA6390_DEVICE_ID 0x1101
|
||||
|
||||
static const struct pci_device_id ath11k_pci_id_table[] = {
|
||||
@ -124,6 +134,13 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
|
||||
{
|
||||
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
|
||||
|
||||
/* for offset beyond BAR + 4K - 32, may
|
||||
* need to wakeup MHI to access.
|
||||
*/
|
||||
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
|
||||
offset >= ACCESS_ALWAYS_OFF)
|
||||
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
|
||||
|
||||
if (offset < WINDOW_START) {
|
||||
iowrite32(value, ab->mem + offset);
|
||||
} else {
|
||||
@ -132,6 +149,10 @@ void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
|
||||
iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
|
||||
spin_unlock_bh(&ab_pci->window_lock);
|
||||
}
|
||||
|
||||
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
|
||||
offset >= ACCESS_ALWAYS_OFF)
|
||||
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
|
||||
}
|
||||
|
||||
u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
|
||||
@ -139,6 +160,13 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
|
||||
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
|
||||
u32 val;
|
||||
|
||||
/* for offset beyond BAR + 4K - 32, may
|
||||
* need to wakeup MHI to access.
|
||||
*/
|
||||
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
|
||||
offset >= ACCESS_ALWAYS_OFF)
|
||||
mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
|
||||
|
||||
if (offset < WINDOW_START) {
|
||||
val = ioread32(ab->mem + offset);
|
||||
} else {
|
||||
@ -148,6 +176,10 @@ u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
|
||||
spin_unlock_bh(&ab_pci->window_lock);
|
||||
}
|
||||
|
||||
if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
|
||||
offset >= ACCESS_ALWAYS_OFF)
|
||||
mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -582,6 +614,9 @@ static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
|
||||
cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
|
||||
cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
|
||||
ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
|
||||
|
||||
ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
|
||||
&cfg->shadow_reg_v2_len);
|
||||
}
|
||||
|
||||
static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
|
||||
@ -727,6 +762,8 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
|
||||
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
|
||||
int ret;
|
||||
|
||||
ab_pci->register_window = 0;
|
||||
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
|
||||
ath11k_pci_sw_reset(ab_pci->ab);
|
||||
|
||||
ret = ath11k_mhi_start(ab_pci);
|
||||
@ -743,6 +780,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
|
||||
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
|
||||
|
||||
ath11k_mhi_stop(ab_pci);
|
||||
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
|
||||
ath11k_pci_force_wake(ab_pci->ab);
|
||||
ath11k_pci_sw_reset(ab_pci->ab);
|
||||
}
|
||||
@ -771,6 +809,10 @@ static void ath11k_pci_stop(struct ath11k_base *ab)
|
||||
|
||||
static int ath11k_pci_start(struct ath11k_base *ab)
|
||||
{
|
||||
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
|
||||
|
||||
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
|
||||
|
||||
ath11k_pci_ce_irqs_enable(ab);
|
||||
ath11k_ce_rx_post_buf(ab);
|
||||
|
||||
@ -839,21 +881,11 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
|
||||
{
|
||||
struct ath11k_base *ab;
|
||||
struct ath11k_pci *ab_pci;
|
||||
enum ath11k_hw_rev hw_rev;
|
||||
u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
|
||||
int ret;
|
||||
|
||||
dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
|
||||
|
||||
switch (pci_dev->device) {
|
||||
case QCA6390_DEVICE_ID:
|
||||
hw_rev = ATH11K_HW_QCA6390_HW20;
|
||||
break;
|
||||
default:
|
||||
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
|
||||
pci_dev->device);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
|
||||
&ath11k_pci_bus_params);
|
||||
if (!ab) {
|
||||
@ -862,7 +894,6 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
ab->dev = &pdev->dev;
|
||||
ab->hw_rev = hw_rev;
|
||||
pci_set_drvdata(pdev, ab);
|
||||
ab_pci = ath11k_pci_priv(ab);
|
||||
ab_pci->dev_id = pci_dev->device;
|
||||
@ -878,6 +909,35 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
|
||||
goto err_free_core;
|
||||
}
|
||||
|
||||
switch (pci_dev->device) {
|
||||
case QCA6390_DEVICE_ID:
|
||||
soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
|
||||
soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
|
||||
soc_hw_version);
|
||||
soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
|
||||
soc_hw_version);
|
||||
|
||||
ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
|
||||
soc_hw_version_major, soc_hw_version_minor);
|
||||
|
||||
switch (soc_hw_version_major) {
|
||||
case 2:
|
||||
ab->hw_rev = ATH11K_HW_QCA6390_HW20;
|
||||
break;
|
||||
default:
|
||||
dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
|
||||
soc_hw_version_major, soc_hw_version_minor);
|
||||
ret = -EOPNOTSUPP;
|
||||
goto err_pci_free_region;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
|
||||
pci_dev->device);
|
||||
ret = -EOPNOTSUPP;
|
||||
goto err_pci_free_region;
|
||||
}
|
||||
|
||||
ret = ath11k_pci_enable_msi(ab_pci);
|
||||
if (ret) {
|
||||
ath11k_err(ab, "failed to enable msi: %d\n", ret);
|
||||
@ -949,10 +1009,17 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
|
||||
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
|
||||
|
||||
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
|
||||
|
||||
ath11k_core_deinit(ab);
|
||||
|
||||
ath11k_mhi_unregister(ab_pci);
|
||||
|
||||
ath11k_pci_free_irq(ab);
|
||||
ath11k_pci_disable_msi(ab_pci);
|
||||
ath11k_pci_free_region(ab_pci);
|
||||
ath11k_pci_free_irq(ab);
|
||||
|
||||
ath11k_hal_srng_deinit(ab);
|
||||
ath11k_ce_free_pipes(ab);
|
||||
ath11k_core_free(ab);
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,10 @@ struct ath11k_msi_config {
|
||||
struct ath11k_msi_user *users;
|
||||
};
|
||||
|
||||
enum ath11k_pci_flags {
|
||||
ATH11K_PCI_FLAG_INIT_DONE,
|
||||
};
|
||||
|
||||
struct ath11k_pci {
|
||||
struct pci_dev *pdev;
|
||||
struct ath11k_base *ab;
|
||||
@ -48,6 +52,9 @@ struct ath11k_pci {
|
||||
|
||||
/* protects register_window above */
|
||||
spinlock_t window_lock;
|
||||
|
||||
/* enum ath11k_pci_flags */
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
|
||||
|
@ -3,6 +3,8 @@
|
||||
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/elf.h>
|
||||
|
||||
#include "qmi.h"
|
||||
#include "core.h"
|
||||
#include "debug.h"
|
||||
@ -1990,6 +1992,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
|
||||
struct qmi_txn txn = {};
|
||||
int ret;
|
||||
const u8 *temp;
|
||||
int bdf_type;
|
||||
|
||||
req = kzalloc(sizeof(*req), GFP_KERNEL);
|
||||
if (!req)
|
||||
@ -2006,6 +2009,13 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
|
||||
temp = bd.data;
|
||||
remaining = bd.len;
|
||||
|
||||
if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
|
||||
bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
|
||||
else
|
||||
bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
|
||||
|
||||
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type);
|
||||
|
||||
while (remaining) {
|
||||
req->valid = 1;
|
||||
req->file_id_valid = 1;
|
||||
@ -2015,7 +2025,7 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
|
||||
req->seg_id_valid = 1;
|
||||
req->data_valid = 1;
|
||||
req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
|
||||
req->bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
|
||||
req->bdf_type = bdf_type;
|
||||
req->bdf_type_valid = 1;
|
||||
req->end_valid = 1;
|
||||
req->end = 0;
|
||||
@ -2265,7 +2275,18 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
|
||||
req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
|
||||
}
|
||||
req->shadow_reg_valid = 0;
|
||||
req->shadow_reg_v2_valid = 0;
|
||||
|
||||
/* set shadow v2 configuration */
|
||||
if (ab->hw_params.supports_shadow_regs) {
|
||||
req->shadow_reg_v2_valid = 1;
|
||||
req->shadow_reg_v2_len = min_t(u32,
|
||||
ab->qmi.ce_cfg.shadow_reg_v2_len,
|
||||
QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
|
||||
memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2,
|
||||
sizeof(u32) * req->shadow_reg_v2_len);
|
||||
} else {
|
||||
req->shadow_reg_v2_valid = 0;
|
||||
}
|
||||
|
||||
ret = qmi_txn_init(&ab->qmi.handle, &txn,
|
||||
qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
|
||||
|
@ -77,7 +77,7 @@ struct ath11k_qmi_ce_cfg {
|
||||
int svc_to_ce_map_len;
|
||||
const u8 *shadow_reg;
|
||||
int shadow_reg_len;
|
||||
u8 *shadow_reg_v2;
|
||||
u32 *shadow_reg_v2;
|
||||
int shadow_reg_v2_len;
|
||||
};
|
||||
|
||||
|
@ -206,7 +206,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
|
||||
ab = ar->ab;
|
||||
pdev_id = ar->pdev_idx;
|
||||
|
||||
spin_lock(&ab->base_lock);
|
||||
spin_lock_bh(&ab->base_lock);
|
||||
|
||||
if (init) {
|
||||
/* Apply the regd received during init through
|
||||
@ -227,7 +227,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
|
||||
|
||||
if (!regd) {
|
||||
ret = -EINVAL;
|
||||
spin_unlock(&ab->base_lock);
|
||||
spin_unlock_bh(&ab->base_lock);
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -238,7 +238,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
|
||||
if (regd_copy)
|
||||
ath11k_copy_regd(regd, regd_copy);
|
||||
|
||||
spin_unlock(&ab->base_lock);
|
||||
spin_unlock_bh(&ab->base_lock);
|
||||
|
||||
if (!regd_copy) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -6530,7 +6530,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
|
||||
break;
|
||||
/* TODO: Add remaining events */
|
||||
default:
|
||||
ath11k_warn(ab, "Unknown eventid: 0x%x\n", id);
|
||||
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1328,27 +1328,6 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
|
||||
{0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
|
||||
};
|
||||
|
||||
static const u32 ar9580_1p0_pcie_phy_clkreq_enable_L1[][2] = {
|
||||
/* Addr allmodes */
|
||||
{0x00004040, 0x0835365e},
|
||||
{0x00004040, 0x0008003b},
|
||||
{0x00004044, 0x00000000},
|
||||
};
|
||||
|
||||
static const u32 ar9580_1p0_pcie_phy_clkreq_disable_L1[][2] = {
|
||||
/* Addr allmodes */
|
||||
{0x00004040, 0x0831365e},
|
||||
{0x00004040, 0x0008003b},
|
||||
{0x00004044, 0x00000000},
|
||||
};
|
||||
|
||||
static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
|
||||
/* Addr allmodes */
|
||||
{0x00004040, 0x0831265e},
|
||||
{0x00004040, 0x0008003b},
|
||||
{0x00004044, 0x00000000},
|
||||
};
|
||||
|
||||
static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
|
||||
/* Addr 5G 2G */
|
||||
{0x00009814, 0x3400c00f, 0x3400c00f},
|
||||
|
@ -31,6 +31,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
|
||||
if (!ret) {
|
||||
wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
|
||||
vif_priv->pw_state = WCN36XX_BMPS;
|
||||
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
|
||||
} else {
|
||||
/*
|
||||
* One of the reasons why HW will not enter BMPS is because
|
||||
@ -55,6 +56,7 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
|
||||
}
|
||||
wcn36xx_smd_exit_bmps(wcn, vif);
|
||||
vif_priv->pw_state = WCN36XX_FULL_POWER;
|
||||
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user